1 /* $KAME: altq_hfsc.c,v 1.25 2004/04/17 10:54:48 kjc Exp $ */ 2 /* $DragonFly: src/sys/net/altq/altq_hfsc.c,v 1.1 2005/02/11 22:25:57 joerg Exp $ */ 3 4 /* 5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 6 * 7 * Permission to use, copy, modify, and distribute this software and 8 * its documentation is hereby granted (including for commercial or 9 * for-profit use), provided that both the copyright notice and this 10 * permission notice appear in all copies of the software, derivative 11 * works, or modified versions, and any portions thereof. 12 * 13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * Carnegie Mellon encourages (but does not require) users of this 29 * software to return any improvements or extensions that they make, 30 * and to grant Carnegie Mellon the rights to redistribute these 31 * changes without encumbrance. 32 */ 33 /* 34 * H-FSC is described in Proceedings of SIGCOMM'97, 35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 36 * Real-Time and Priority Service" 37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 38 * 39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 40 * when a class has an upperlimit, the fit-time is computed from the 41 * upperlimit service curve. the link-sharing scheduler does not schedule 42 * a class whose fit-time exceeds the current time. 43 */ 44 45 #include "opt_altq.h" 46 #include "opt_inet.h" 47 #include "opt_inet6.h" 48 49 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 50 51 #include <sys/param.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/socket.h> 55 #include <sys/systm.h> 56 #include <sys/errno.h> 57 #include <sys/queue.h> 58 59 #include <net/if.h> 60 #include <net/ifq_var.h> 61 #include <netinet/in.h> 62 63 #include <net/pf/pfvar.h> 64 #include <net/altq/altq.h> 65 #include <net/altq/altq_hfsc.h> 66 67 /* 68 * function prototypes 69 */ 70 static int hfsc_clear_interface(struct hfsc_if *); 71 static int hfsc_request(struct ifaltq *, int, void *); 72 static void hfsc_purge(struct hfsc_if *); 73 static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 74 struct service_curve *, 75 struct service_curve *, 76 struct service_curve *, 77 struct hfsc_class *, int, int, int); 78 static int hfsc_class_destroy(struct hfsc_class *); 79 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 80 static int hfsc_enqueue(struct ifaltq *, struct mbuf *, 81 struct altq_pktattr *); 82 static struct mbuf *hfsc_dequeue(struct ifaltq *, int); 83 84 static int hfsc_addq(struct hfsc_class *, struct mbuf *); 85 static struct mbuf *hfsc_getq(struct hfsc_class *); 86 static struct mbuf *hfsc_pollq(struct hfsc_class *); 87 static void hfsc_purgeq(struct hfsc_class *); 88 89 static void update_cfmin(struct hfsc_class *); 90 static void set_active(struct hfsc_class *, int); 91 static void set_passive(struct hfsc_class *); 92 93 static void init_ed(struct hfsc_class *, int); 94 static void update_ed(struct hfsc_class *, int); 95 static void update_d(struct hfsc_class *, int); 96 static void init_vf(struct hfsc_class *, int); 97 static void update_vf(struct hfsc_class *, int, uint64_t); 98 static ellist_t *ellist_alloc(void); 99 static void ellist_destroy(ellist_t *); 100 static void ellist_insert(struct hfsc_class *); 101 static void ellist_remove(struct hfsc_class *); 102 static void ellist_update(struct hfsc_class *); 103 struct hfsc_class *ellist_get_mindl(ellist_t *, uint64_t); 104 static actlist_t *actlist_alloc(void); 105 static void actlist_destroy(actlist_t *); 106 static void actlist_insert(struct hfsc_class *); 107 static void actlist_remove(struct hfsc_class *); 108 static void actlist_update(struct hfsc_class *); 109 110 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, uint64_t); 111 112 static __inline uint64_t seg_x2y(uint64_t, uint64_t); 113 static __inline uint64_t seg_y2x(uint64_t, uint64_t); 114 static __inline uint64_t m2sm(u_int); 115 static __inline uint64_t m2ism(u_int); 116 static __inline uint64_t d2dx(u_int); 117 static u_int sm2m(uint64_t); 118 static u_int dx2d(uint64_t); 119 120 static void sc2isc(struct service_curve *, struct internal_sc *); 121 static void rtsc_init(struct runtime_sc *, struct internal_sc *, 122 uint64_t, uint64_t); 123 static uint64_t rtsc_y2x(struct runtime_sc *, uint64_t); 124 static uint64_t rtsc_x2y(struct runtime_sc *, uint64_t); 125 static void rtsc_min(struct runtime_sc *, struct internal_sc *, 126 uint64_t, uint64_t); 127 128 static void get_class_stats(struct hfsc_classstats *, struct hfsc_class *); 129 static struct hfsc_class *clh_to_clp(struct hfsc_if *, uint32_t); 130 131 /* 132 * macros 133 */ 134 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 135 136 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */ 137 138 int 139 hfsc_pfattach(struct pf_altq *a) 140 { 141 struct ifnet *ifp; 142 int s, error; 143 144 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL) 145 return (EINVAL); 146 s = splimp(); 147 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc, 148 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL); 149 splx(s); 150 return (error); 151 } 152 153 int 154 hfsc_add_altq(struct pf_altq *a) 155 { 156 struct hfsc_if *hif; 157 struct ifnet *ifp; 158 159 if ((ifp = ifunit(a->ifname)) == NULL) 160 return (EINVAL); 161 if (!ifq_is_ready(&ifp->if_snd)) 162 return (ENODEV); 163 164 hif = malloc(sizeof(struct hfsc_if), M_ALTQ, M_WAITOK | M_ZERO); 165 166 hif->hif_eligible = ellist_alloc(); 167 hif->hif_ifq = &ifp->if_snd; 168 169 /* keep the state in pf_altq */ 170 a->altq_disc = hif; 171 172 return (0); 173 } 174 175 int 176 hfsc_remove_altq(struct pf_altq *a) 177 { 178 struct hfsc_if *hif; 179 180 if ((hif = a->altq_disc) == NULL) 181 return (EINVAL); 182 a->altq_disc = NULL; 183 184 hfsc_clear_interface(hif); 185 hfsc_class_destroy(hif->hif_rootclass); 186 187 ellist_destroy(hif->hif_eligible); 188 189 free(hif, M_ALTQ); 190 191 return (0); 192 } 193 194 int 195 hfsc_add_queue(struct pf_altq *a) 196 { 197 struct hfsc_if *hif; 198 struct hfsc_class *cl, *parent; 199 struct hfsc_opts *opts; 200 struct service_curve rtsc, lssc, ulsc; 201 202 if ((hif = a->altq_disc) == NULL) 203 return (EINVAL); 204 205 opts = &a->pq_u.hfsc_opts; 206 207 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL) 208 parent = NULL; 209 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 210 return (EINVAL); 211 212 if (a->qid == 0) 213 return (EINVAL); 214 215 if (clh_to_clp(hif, a->qid) != NULL) 216 return (EBUSY); 217 218 rtsc.m1 = opts->rtsc_m1; 219 rtsc.d = opts->rtsc_d; 220 rtsc.m2 = opts->rtsc_m2; 221 lssc.m1 = opts->lssc_m1; 222 lssc.d = opts->lssc_d; 223 lssc.m2 = opts->lssc_m2; 224 ulsc.m1 = opts->ulsc_m1; 225 ulsc.d = opts->ulsc_d; 226 ulsc.m2 = opts->ulsc_m2; 227 228 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, parent, a->qlimit, 229 opts->flags, a->qid); 230 if (cl == NULL) 231 return (ENOMEM); 232 233 return (0); 234 } 235 236 int 237 hfsc_remove_queue(struct pf_altq *a) 238 { 239 struct hfsc_if *hif; 240 struct hfsc_class *cl; 241 242 if ((hif = a->altq_disc) == NULL) 243 return (EINVAL); 244 245 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 246 return (EINVAL); 247 248 return (hfsc_class_destroy(cl)); 249 } 250 251 int 252 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) 253 { 254 struct hfsc_if *hif; 255 struct hfsc_class *cl; 256 struct hfsc_classstats stats; 257 int error = 0; 258 259 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 260 return (EBADF); 261 262 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 263 return (EINVAL); 264 265 if (*nbytes < sizeof(stats)) 266 return (EINVAL); 267 268 get_class_stats(&stats, cl); 269 270 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) 271 return (error); 272 *nbytes = sizeof(stats); 273 return (0); 274 } 275 276 /* 277 * bring the interface back to the initial state by discarding 278 * all the filters and classes except the root class. 279 */ 280 static int 281 hfsc_clear_interface(struct hfsc_if *hif) 282 { 283 struct hfsc_class *cl; 284 285 if (hif->hif_rootclass == NULL) 286 return (0); 287 288 289 /* clear out the classes */ 290 while ((cl = hif->hif_rootclass->cl_children) != NULL) { 291 /* 292 * remove the first leaf class found in the hierarchy 293 * then start over 294 */ 295 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 296 if (!is_a_parent_class(cl)) { 297 hfsc_class_destroy(cl); 298 break; 299 } 300 } 301 } 302 303 return (0); 304 } 305 306 static int 307 hfsc_request(struct ifaltq *ifq, int req, void *arg) 308 { 309 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 310 311 switch (req) { 312 case ALTRQ_PURGE: 313 hfsc_purge(hif); 314 break; 315 } 316 return (0); 317 } 318 319 /* discard all the queued packets on the interface */ 320 static void 321 hfsc_purge(struct hfsc_if *hif) 322 { 323 struct hfsc_class *cl; 324 325 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) { 326 if (!qempty(cl->cl_q)) 327 hfsc_purgeq(cl); 328 } 329 if (ifq_is_enabled(hif->hif_ifq)) 330 hif->hif_ifq->ifq_len = 0; 331 } 332 333 struct hfsc_class * 334 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 335 struct service_curve *fsc, struct service_curve *usc, 336 struct hfsc_class *parent, int qlimit, int flags, int qid) 337 { 338 struct hfsc_class *cl, *p; 339 int i, s; 340 341 if (hif->hif_classes >= HFSC_MAX_CLASSES) 342 return (NULL); 343 344 #ifndef ALTQ_RED 345 if (flags & HFCF_RED) { 346 #ifdef ALTQ_DEBUG 347 printf("hfsc_class_create: RED not configured for HFSC!\n"); 348 #endif 349 return (NULL); 350 } 351 #endif 352 353 cl = malloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO); 354 cl->cl_q = malloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO); 355 cl->cl_actc = actlist_alloc(); 356 357 if (qlimit == 0) 358 qlimit = 50; /* use default */ 359 qlimit(cl->cl_q) = qlimit; 360 qtype(cl->cl_q) = Q_DROPTAIL; 361 qlen(cl->cl_q) = 0; 362 cl->cl_flags = flags; 363 #ifdef ALTQ_RED 364 if (flags & (HFCF_RED|HFCF_RIO)) { 365 int red_flags, red_pkttime; 366 u_int m2; 367 368 m2 = 0; 369 if (rsc != NULL && rsc->m2 > m2) 370 m2 = rsc->m2; 371 if (fsc != NULL && fsc->m2 > m2) 372 m2 = fsc->m2; 373 if (usc != NULL && usc->m2 > m2) 374 m2 = usc->m2; 375 376 red_flags = 0; 377 if (flags & HFCF_ECN) 378 red_flags |= REDF_ECN; 379 #ifdef ALTQ_RIO 380 if (flags & HFCF_CLEARDSCP) 381 red_flags |= RIOF_CLEARDSCP; 382 #endif 383 if (m2 < 8) 384 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 385 else 386 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 387 * 1000 * 1000 * 1000 / (m2 / 8); 388 if (flags & HFCF_RED) { 389 cl->cl_red = red_alloc(0, 0, 390 qlimit(cl->cl_q) * 10/100, 391 qlimit(cl->cl_q) * 30/100, 392 red_flags, red_pkttime); 393 if (cl->cl_red != NULL) 394 qtype(cl->cl_q) = Q_RED; 395 } 396 #ifdef ALTQ_RIO 397 else { 398 cl->cl_red = (red_t *)rio_alloc(0, NULL, 399 red_flags, red_pkttime); 400 if (cl->cl_red != NULL) 401 qtype(cl->cl_q) = Q_RIO; 402 } 403 #endif 404 } 405 #endif /* ALTQ_RED */ 406 407 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 408 cl->cl_rsc = malloc(sizeof(*cl->cl_rsc), M_ALTQ, M_WAITOK); 409 sc2isc(rsc, cl->cl_rsc); 410 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 411 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 412 } 413 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 414 cl->cl_fsc = malloc(sizeof(*cl->cl_fsc), M_ALTQ, M_WAITOK); 415 if (cl->cl_fsc == NULL) 416 goto err_ret; 417 sc2isc(fsc, cl->cl_fsc); 418 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 419 } 420 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 421 cl->cl_usc = malloc(sizeof(*cl->cl_usc), M_ALTQ, M_WAITOK); 422 if (cl->cl_usc == NULL) 423 goto err_ret; 424 sc2isc(usc, cl->cl_usc); 425 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 426 } 427 428 cl->cl_id = hif->hif_classid++; 429 cl->cl_handle = qid; 430 cl->cl_hif = hif; 431 cl->cl_parent = parent; 432 433 s = splimp(); 434 hif->hif_classes++; 435 436 /* 437 * find a free slot in the class table. if the slot matching 438 * the lower bits of qid is free, use this slot. otherwise, 439 * use the first free slot. 440 */ 441 i = qid % HFSC_MAX_CLASSES; 442 if (hif->hif_class_tbl[i] == NULL) 443 hif->hif_class_tbl[i] = cl; 444 else { 445 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 446 if (hif->hif_class_tbl[i] == NULL) { 447 hif->hif_class_tbl[i] = cl; 448 break; 449 } 450 } 451 if (i == HFSC_MAX_CLASSES) { 452 splx(s); 453 goto err_ret; 454 } 455 } 456 457 if (flags & HFCF_DEFAULTCLASS) 458 hif->hif_defaultclass = cl; 459 460 if (parent == NULL) { 461 /* this is root class */ 462 hif->hif_rootclass = cl; 463 } else if (parent->cl_children == NULL) { 464 /* add this class to the children list of the parent */ 465 parent->cl_children = cl; 466 } else { 467 p = parent->cl_children; 468 while (p->cl_siblings != NULL) 469 p = p->cl_siblings; 470 p->cl_siblings = cl; 471 } 472 splx(s); 473 474 return (cl); 475 476 err_ret: 477 if (cl->cl_actc != NULL) 478 actlist_destroy(cl->cl_actc); 479 if (cl->cl_red != NULL) { 480 #ifdef ALTQ_RIO 481 if (q_is_rio(cl->cl_q)) 482 rio_destroy((rio_t *)cl->cl_red); 483 #endif 484 #ifdef ALTQ_RED 485 if (q_is_red(cl->cl_q)) 486 red_destroy(cl->cl_red); 487 #endif 488 } 489 if (cl->cl_fsc != NULL) 490 free(cl->cl_fsc, M_ALTQ); 491 if (cl->cl_rsc != NULL) 492 free(cl->cl_rsc, M_ALTQ); 493 if (cl->cl_usc != NULL) 494 free(cl->cl_usc, M_ALTQ); 495 if (cl->cl_q != NULL) 496 free(cl->cl_q, M_ALTQ); 497 free(cl, M_ALTQ); 498 return (NULL); 499 } 500 501 static int 502 hfsc_class_destroy(struct hfsc_class *cl) 503 { 504 int i, s; 505 506 if (cl == NULL) 507 return (0); 508 509 if (is_a_parent_class(cl)) 510 return (EBUSY); 511 512 s = splimp(); 513 514 if (!qempty(cl->cl_q)) 515 hfsc_purgeq(cl); 516 517 if (cl->cl_parent == NULL) { 518 /* this is root class */ 519 } else { 520 struct hfsc_class *p = cl->cl_parent->cl_children; 521 522 if (p == cl) { 523 cl->cl_parent->cl_children = cl->cl_siblings; 524 } else { 525 do { 526 if (p->cl_siblings == cl) { 527 p->cl_siblings = cl->cl_siblings; 528 break; 529 } 530 } while ((p = p->cl_siblings) != NULL); 531 } 532 KKASSERT(p != NULL); 533 } 534 535 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 536 if (cl->cl_hif->hif_class_tbl[i] == cl) { 537 cl->cl_hif->hif_class_tbl[i] = NULL; 538 break; 539 } 540 } 541 542 cl->cl_hif->hif_classes--; 543 splx(s); 544 545 actlist_destroy(cl->cl_actc); 546 547 if (cl->cl_red != NULL) { 548 #ifdef ALTQ_RIO 549 if (q_is_rio(cl->cl_q)) 550 rio_destroy((rio_t *)cl->cl_red); 551 #endif 552 #ifdef ALTQ_RED 553 if (q_is_red(cl->cl_q)) 554 red_destroy(cl->cl_red); 555 #endif 556 } 557 558 if (cl == cl->cl_hif->hif_rootclass) 559 cl->cl_hif->hif_rootclass = NULL; 560 if (cl == cl->cl_hif->hif_defaultclass) 561 cl->cl_hif->hif_defaultclass = NULL; 562 563 if (cl->cl_usc != NULL) 564 free(cl->cl_usc, M_ALTQ); 565 if (cl->cl_fsc != NULL) 566 free(cl->cl_fsc, M_ALTQ); 567 if (cl->cl_rsc != NULL) 568 free(cl->cl_rsc, M_ALTQ); 569 free(cl->cl_q, M_ALTQ); 570 free(cl, M_ALTQ); 571 572 return (0); 573 } 574 575 /* 576 * hfsc_nextclass returns the next class in the tree. 577 * usage: 578 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 579 * do_something; 580 */ 581 static struct hfsc_class * 582 hfsc_nextclass(struct hfsc_class *cl) 583 { 584 if (cl->cl_children != NULL) { 585 cl = cl->cl_children; 586 } else if (cl->cl_siblings != NULL) { 587 cl = cl->cl_siblings; 588 } else { 589 while ((cl = cl->cl_parent) != NULL) { 590 if (cl->cl_siblings != NULL) { 591 cl = cl->cl_siblings; 592 break; 593 } 594 } 595 } 596 597 return (cl); 598 } 599 600 /* 601 * hfsc_enqueue is an enqueue function to be registered to 602 * (*altq_enqueue) in struct ifaltq. 603 */ 604 static int 605 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr) 606 { 607 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 608 struct hfsc_class *cl; 609 int len; 610 611 /* grab class set by classifier */ 612 if ((m->m_flags & M_PKTHDR) == 0) { 613 /* should not happen */ 614 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n"); 615 m_freem(m); 616 return (ENOBUFS); 617 } 618 if (m->m_pkthdr.fw_flags & ALTQ_MBUF_TAGGED) 619 cl = clh_to_clp(hif, m->m_pkthdr.altq_qid); 620 else 621 cl = NULL; 622 if (cl == NULL || is_a_parent_class(cl)) { 623 cl = hif->hif_defaultclass; 624 if (cl == NULL) { 625 m_freem(m); 626 return (ENOBUFS); 627 } 628 } 629 cl->cl_pktattr = NULL; 630 len = m_pktlen(m); 631 if (hfsc_addq(cl, m) != 0) { 632 /* drop occurred. mbuf was freed in hfsc_addq. */ 633 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 634 return (ENOBUFS); 635 } 636 ifq->ifq_len++; 637 cl->cl_hif->hif_packets++; 638 639 /* successfully queued. */ 640 if (qlen(cl->cl_q) == 1) 641 set_active(cl, m_pktlen(m)); 642 643 return (0); 644 } 645 646 /* 647 * hfsc_dequeue is a dequeue function to be registered to 648 * (*altq_dequeue) in struct ifaltq. 649 * 650 * note: ALTDQ_POLL returns the next packet without removing the packet 651 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 652 * ALTDQ_REMOVE must return the same packet if called immediately 653 * after ALTDQ_POLL. 654 */ 655 static struct mbuf * 656 hfsc_dequeue(struct ifaltq *ifq, int op) 657 { 658 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 659 struct hfsc_class *cl; 660 struct mbuf *m; 661 int len, next_len; 662 int realtime = 0; 663 uint64_t cur_time; 664 665 if (hif->hif_packets == 0) { 666 /* no packet in the tree */ 667 return (NULL); 668 } 669 670 cur_time = read_machclk(); 671 672 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 673 cl = hif->hif_pollcache; 674 hif->hif_pollcache = NULL; 675 /* check if the class was scheduled by real-time criteria */ 676 if (cl->cl_rsc != NULL) 677 realtime = (cl->cl_e <= cur_time); 678 } else { 679 /* 680 * if there are eligible classes, use real-time criteria. 681 * find the class with the minimum deadline among 682 * the eligible classes. 683 */ 684 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time)) != NULL) { 685 realtime = 1; 686 } else { 687 #ifdef ALTQ_DEBUG 688 int fits = 0; 689 #endif 690 /* 691 * use link-sharing criteria 692 * get the class with the minimum vt in the hierarchy 693 */ 694 cl = hif->hif_rootclass; 695 while (is_a_parent_class(cl)) { 696 697 cl = actlist_firstfit(cl, cur_time); 698 if (cl == NULL) { 699 #ifdef ALTQ_DEBUG 700 if (fits > 0) 701 printf("%d fit but none found\n",fits); 702 #endif 703 return (NULL); 704 } 705 /* 706 * update parent's cl_cvtmin. 707 * don't update if the new vt is smaller. 708 */ 709 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 710 cl->cl_parent->cl_cvtmin = cl->cl_vt; 711 #ifdef ALTQ_DEBUG 712 fits++; 713 #endif 714 } 715 } 716 717 if (op == ALTDQ_POLL) { 718 hif->hif_pollcache = cl; 719 m = hfsc_pollq(cl); 720 return (m); 721 } 722 } 723 724 m = hfsc_getq(cl); 725 if (m == NULL) 726 panic("hfsc_dequeue:"); 727 len = m_pktlen(m); 728 cl->cl_hif->hif_packets--; 729 ifq->ifq_len--; 730 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 731 732 update_vf(cl, len, cur_time); 733 if (realtime) 734 cl->cl_cumul += len; 735 736 if (!qempty(cl->cl_q)) { 737 if (cl->cl_rsc != NULL) { 738 /* update ed */ 739 next_len = m_pktlen(qhead(cl->cl_q)); 740 741 if (realtime) 742 update_ed(cl, next_len); 743 else 744 update_d(cl, next_len); 745 } 746 } else { 747 /* the class becomes passive */ 748 set_passive(cl); 749 } 750 751 return (m); 752 } 753 754 static int 755 hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 756 { 757 758 #ifdef ALTQ_RIO 759 if (q_is_rio(cl->cl_q)) 760 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 761 m, cl->cl_pktattr); 762 #endif 763 #ifdef ALTQ_RED 764 if (q_is_red(cl->cl_q)) 765 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 766 #endif 767 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 768 m_freem(m); 769 return (-1); 770 } 771 772 if (cl->cl_flags & HFCF_CLEARDSCP) 773 write_dsfield(m, cl->cl_pktattr, 0); 774 775 _addq(cl->cl_q, m); 776 777 return (0); 778 } 779 780 static struct mbuf * 781 hfsc_getq(struct hfsc_class *cl) 782 { 783 #ifdef ALTQ_RIO 784 if (q_is_rio(cl->cl_q)) 785 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 786 #endif 787 #ifdef ALTQ_RED 788 if (q_is_red(cl->cl_q)) 789 return red_getq(cl->cl_red, cl->cl_q); 790 #endif 791 return _getq(cl->cl_q); 792 } 793 794 static struct mbuf * 795 hfsc_pollq(struct hfsc_class *cl) 796 { 797 return qhead(cl->cl_q); 798 } 799 800 static void 801 hfsc_purgeq(struct hfsc_class *cl) 802 { 803 struct mbuf *m; 804 805 if (qempty(cl->cl_q)) 806 return; 807 808 while ((m = _getq(cl->cl_q)) != NULL) { 809 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 810 m_freem(m); 811 cl->cl_hif->hif_packets--; 812 cl->cl_hif->hif_ifq->ifq_len--; 813 } 814 KKASSERT(qlen(cl->cl_q) == 0); 815 816 update_vf(cl, 0, 0); /* remove cl from the actlist */ 817 set_passive(cl); 818 } 819 820 static void 821 set_active(struct hfsc_class *cl, int len) 822 { 823 if (cl->cl_rsc != NULL) 824 init_ed(cl, len); 825 if (cl->cl_fsc != NULL) 826 init_vf(cl, len); 827 828 cl->cl_stats.period++; 829 } 830 831 static void 832 set_passive(struct hfsc_class *cl) 833 { 834 if (cl->cl_rsc != NULL) 835 ellist_remove(cl); 836 837 /* 838 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 839 * needs to be called explicitly to remove a class from actlist 840 */ 841 } 842 843 static void 844 init_ed(struct hfsc_class *cl, int next_len) 845 { 846 uint64_t cur_time; 847 848 cur_time = read_machclk(); 849 850 /* update the deadline curve */ 851 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 852 853 /* 854 * update the eligible curve. 855 * for concave, it is equal to the deadline curve. 856 * for convex, it is a linear curve with slope m2. 857 */ 858 cl->cl_eligible = cl->cl_deadline; 859 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 860 cl->cl_eligible.dx = 0; 861 cl->cl_eligible.dy = 0; 862 } 863 864 /* compute e and d */ 865 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 866 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 867 868 ellist_insert(cl); 869 } 870 871 static void 872 update_ed(struct hfsc_class *cl, int next_len) 873 { 874 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 875 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 876 877 ellist_update(cl); 878 } 879 880 static void 881 update_d(struct hfsc_class *cl, int next_len) 882 { 883 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 884 } 885 886 static void 887 init_vf(struct hfsc_class *cl, int len) 888 { 889 struct hfsc_class *max_cl, *p; 890 uint64_t vt, f, cur_time; 891 int go_active; 892 893 cur_time = 0; 894 go_active = 1; 895 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 896 if (go_active && cl->cl_nactive++ == 0) 897 go_active = 1; 898 else 899 go_active = 0; 900 901 if (go_active) { 902 max_cl = actlist_last(cl->cl_parent->cl_actc); 903 if (max_cl != NULL) { 904 /* 905 * set vt to the average of the min and max 906 * classes. if the parent's period didn't 907 * change, don't decrease vt of the class. 908 */ 909 vt = max_cl->cl_vt; 910 if (cl->cl_parent->cl_cvtmin != 0) 911 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 912 913 if (cl->cl_parent->cl_vtperiod != 914 cl->cl_parentperiod || vt > cl->cl_vt) 915 cl->cl_vt = vt; 916 } else { 917 /* 918 * first child for a new parent backlog period. 919 * add parent's cvtmax to vtoff of children 920 * to make a new vt (vtoff + vt) larger than 921 * the vt in the last period for all children. 922 */ 923 vt = cl->cl_parent->cl_cvtmax; 924 for (p = cl->cl_parent->cl_children; p != NULL; 925 p = p->cl_siblings) 926 p->cl_vtoff += vt; 927 cl->cl_vt = 0; 928 cl->cl_parent->cl_cvtmax = 0; 929 cl->cl_parent->cl_cvtmin = 0; 930 } 931 cl->cl_initvt = cl->cl_vt; 932 933 /* update the virtual curve */ 934 vt = cl->cl_vt + cl->cl_vtoff; 935 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 936 if (cl->cl_virtual.x == vt) { 937 cl->cl_virtual.x -= cl->cl_vtoff; 938 cl->cl_vtoff = 0; 939 } 940 cl->cl_vtadj = 0; 941 942 cl->cl_vtperiod++; /* increment vt period */ 943 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 944 if (cl->cl_parent->cl_nactive == 0) 945 cl->cl_parentperiod++; 946 cl->cl_f = 0; 947 948 actlist_insert(cl); 949 950 if (cl->cl_usc != NULL) { 951 /* class has upper limit curve */ 952 if (cur_time == 0) 953 cur_time = read_machclk(); 954 955 /* update the ulimit curve */ 956 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 957 cl->cl_total); 958 /* compute myf */ 959 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 960 cl->cl_total); 961 cl->cl_myfadj = 0; 962 } 963 } 964 965 if (cl->cl_myf > cl->cl_cfmin) 966 f = cl->cl_myf; 967 else 968 f = cl->cl_cfmin; 969 if (f != cl->cl_f) { 970 cl->cl_f = f; 971 update_cfmin(cl->cl_parent); 972 } 973 } 974 } 975 976 static void 977 update_vf(struct hfsc_class *cl, int len, uint64_t cur_time) 978 { 979 uint64_t f, myf_bound, delta; 980 int go_passive; 981 982 go_passive = qempty(cl->cl_q); 983 984 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 985 cl->cl_total += len; 986 987 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 988 continue; 989 990 if (go_passive && --cl->cl_nactive == 0) 991 go_passive = 1; 992 else 993 go_passive = 0; 994 995 if (go_passive) { 996 /* no more active child, going passive */ 997 998 /* update cvtmax of the parent class */ 999 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1000 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1001 1002 /* remove this class from the vt list */ 1003 actlist_remove(cl); 1004 1005 update_cfmin(cl->cl_parent); 1006 1007 continue; 1008 } 1009 1010 /* 1011 * update vt and f 1012 */ 1013 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1014 - cl->cl_vtoff + cl->cl_vtadj; 1015 1016 /* 1017 * if vt of the class is smaller than cvtmin, 1018 * the class was skipped in the past due to non-fit. 1019 * if so, we need to adjust vtadj. 1020 */ 1021 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1022 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1023 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1024 } 1025 1026 /* update the vt list */ 1027 actlist_update(cl); 1028 1029 if (cl->cl_usc != NULL) { 1030 cl->cl_myf = cl->cl_myfadj 1031 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1032 1033 /* 1034 * if myf lags behind by more than one clock tick 1035 * from the current time, adjust myfadj to prevent 1036 * a rate-limited class from going greedy. 1037 * in a steady state under rate-limiting, myf 1038 * fluctuates within one clock tick. 1039 */ 1040 myf_bound = cur_time - machclk_per_tick; 1041 if (cl->cl_myf < myf_bound) { 1042 delta = cur_time - cl->cl_myf; 1043 cl->cl_myfadj += delta; 1044 cl->cl_myf += delta; 1045 } 1046 } 1047 1048 /* cl_f is max(cl_myf, cl_cfmin) */ 1049 if (cl->cl_myf > cl->cl_cfmin) 1050 f = cl->cl_myf; 1051 else 1052 f = cl->cl_cfmin; 1053 if (f != cl->cl_f) { 1054 cl->cl_f = f; 1055 update_cfmin(cl->cl_parent); 1056 } 1057 } 1058 } 1059 1060 static void 1061 update_cfmin(struct hfsc_class *cl) 1062 { 1063 struct hfsc_class *p; 1064 uint64_t cfmin; 1065 1066 if (TAILQ_EMPTY(cl->cl_actc)) { 1067 cl->cl_cfmin = 0; 1068 return; 1069 } 1070 cfmin = HT_INFINITY; 1071 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1072 if (p->cl_f == 0) { 1073 cl->cl_cfmin = 0; 1074 return; 1075 } 1076 if (p->cl_f < cfmin) 1077 cfmin = p->cl_f; 1078 } 1079 cl->cl_cfmin = cfmin; 1080 } 1081 1082 /* 1083 * TAILQ based ellist and actlist implementation 1084 * (ion wanted to make a calendar queue based implementation) 1085 */ 1086 /* 1087 * eligible list holds backlogged classes being sorted by their eligible times. 1088 * there is one eligible list per interface. 1089 */ 1090 1091 static ellist_t * 1092 ellist_alloc(void) 1093 { 1094 ellist_t *head; 1095 1096 head = malloc(sizeof(ellist_t *), M_ALTQ, M_WAITOK); 1097 TAILQ_INIT(head); 1098 return (head); 1099 } 1100 1101 static void 1102 ellist_destroy(ellist_t *head) 1103 { 1104 free(head, M_ALTQ); 1105 } 1106 1107 static void 1108 ellist_insert(struct hfsc_class *cl) 1109 { 1110 struct hfsc_if *hif = cl->cl_hif; 1111 struct hfsc_class *p; 1112 1113 /* check the last entry first */ 1114 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL || 1115 p->cl_e <= cl->cl_e) { 1116 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1117 return; 1118 } 1119 1120 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) { 1121 if (cl->cl_e < p->cl_e) { 1122 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1123 return; 1124 } 1125 } 1126 KKASSERT(0); /* should not reach here */ 1127 } 1128 1129 static void 1130 ellist_remove(struct hfsc_class *cl) 1131 { 1132 struct hfsc_if *hif = cl->cl_hif; 1133 1134 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1135 } 1136 1137 static void 1138 ellist_update(struct hfsc_class *cl) 1139 { 1140 struct hfsc_if *hif = cl->cl_hif; 1141 struct hfsc_class *p, *last; 1142 1143 /* 1144 * the eligible time of a class increases monotonically. 1145 * if the next entry has a larger eligible time, nothing to do. 1146 */ 1147 p = TAILQ_NEXT(cl, cl_ellist); 1148 if (p == NULL || cl->cl_e <= p->cl_e) 1149 return; 1150 1151 /* check the last entry */ 1152 last = TAILQ_LAST(hif->hif_eligible, _eligible); 1153 KKASSERT(last != NULL); 1154 if (last->cl_e <= cl->cl_e) { 1155 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1156 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1157 return; 1158 } 1159 1160 /* 1161 * the new position must be between the next entry 1162 * and the last entry 1163 */ 1164 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1165 if (cl->cl_e < p->cl_e) { 1166 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1167 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1168 return; 1169 } 1170 } 1171 KKASSERT(0); /* should not reach here */ 1172 } 1173 1174 /* find the class with the minimum deadline among the eligible classes */ 1175 struct hfsc_class * 1176 ellist_get_mindl(ellist_t *head, uint64_t cur_time) 1177 { 1178 struct hfsc_class *p, *cl = NULL; 1179 1180 TAILQ_FOREACH(p, head, cl_ellist) { 1181 if (p->cl_e > cur_time) 1182 break; 1183 if (cl == NULL || p->cl_d < cl->cl_d) 1184 cl = p; 1185 } 1186 return (cl); 1187 } 1188 1189 /* 1190 * active children list holds backlogged child classes being sorted 1191 * by their virtual time. 1192 * each intermediate class has one active children list. 1193 */ 1194 static actlist_t * 1195 actlist_alloc(void) 1196 { 1197 actlist_t *head; 1198 1199 head = malloc(sizeof(*head), M_ALTQ, M_WAITOK); 1200 TAILQ_INIT(head); 1201 return (head); 1202 } 1203 1204 static void 1205 actlist_destroy(actlist_t *head) 1206 { 1207 free(head, M_ALTQ); 1208 } 1209 static void 1210 actlist_insert(struct hfsc_class *cl) 1211 { 1212 struct hfsc_class *p; 1213 1214 /* check the last entry first */ 1215 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL 1216 || p->cl_vt <= cl->cl_vt) { 1217 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1218 return; 1219 } 1220 1221 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) { 1222 if (cl->cl_vt < p->cl_vt) { 1223 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1224 return; 1225 } 1226 } 1227 KKASSERT(0); /* should not reach here */ 1228 } 1229 1230 static void 1231 actlist_remove(struct hfsc_class *cl) 1232 { 1233 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1234 } 1235 1236 static void 1237 actlist_update(struct hfsc_class *cl) 1238 { 1239 struct hfsc_class *p, *last; 1240 1241 /* 1242 * the virtual time of a class increases monotonically during its 1243 * backlogged period. 1244 * if the next entry has a larger virtual time, nothing to do. 1245 */ 1246 p = TAILQ_NEXT(cl, cl_actlist); 1247 if (p == NULL || cl->cl_vt < p->cl_vt) 1248 return; 1249 1250 /* check the last entry */ 1251 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active); 1252 KKASSERT(last != NULL); 1253 if (last->cl_vt <= cl->cl_vt) { 1254 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1255 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1256 return; 1257 } 1258 1259 /* 1260 * the new position must be between the next entry 1261 * and the last entry 1262 */ 1263 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1264 if (cl->cl_vt < p->cl_vt) { 1265 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1266 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1267 return; 1268 } 1269 } 1270 KKASSERT(0); /* should not reach here */ 1271 } 1272 1273 static struct hfsc_class * 1274 actlist_firstfit(struct hfsc_class *cl, uint64_t cur_time) 1275 { 1276 struct hfsc_class *p; 1277 1278 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1279 if (p->cl_f <= cur_time) 1280 return (p); 1281 } 1282 return (NULL); 1283 } 1284 1285 /* 1286 * service curve support functions 1287 * 1288 * external service curve parameters 1289 * m: bits/sec 1290 * d: msec 1291 * internal service curve parameters 1292 * sm: (bytes/tsc_interval) << SM_SHIFT 1293 * ism: (tsc_count/byte) << ISM_SHIFT 1294 * dx: tsc_count 1295 * 1296 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. 1297 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU 1298 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective 1299 * digits in decimal using the following table. 1300 * 1301 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 1302 * ----------+------------------------------------------------------- 1303 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6 1304 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6 1305 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6 1306 * 1307 * nsec/byte 80000 8000 800 80 8 1308 * ism(500MHz) 40000 4000 400 40 4 1309 * ism(200MHz) 16000 1600 160 16 1.6 1310 */ 1311 #define SM_SHIFT 24 1312 #define ISM_SHIFT 10 1313 1314 #define SM_MASK ((1LL << SM_SHIFT) - 1) 1315 #define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1316 1317 static __inline uint64_t 1318 seg_x2y(uint64_t x, uint64_t sm) 1319 { 1320 uint64_t y; 1321 1322 /* 1323 * compute 1324 * y = x * sm >> SM_SHIFT 1325 * but divide it for the upper and lower bits to avoid overflow 1326 */ 1327 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1328 return (y); 1329 } 1330 1331 static __inline uint64_t 1332 seg_y2x(uint64_t y, uint64_t ism) 1333 { 1334 uint64_t x; 1335 1336 if (y == 0) 1337 x = 0; 1338 else if (ism == HT_INFINITY) 1339 x = HT_INFINITY; 1340 else 1341 x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1342 1343 return (x); 1344 } 1345 1346 static __inline uint64_t 1347 m2sm(u_int m) 1348 { 1349 uint64_t sm; 1350 1351 sm = ((uint64_t)m << SM_SHIFT) / 8 / machclk_freq; 1352 return (sm); 1353 } 1354 1355 static __inline uint64_t 1356 m2ism(u_int m) 1357 { 1358 uint64_t ism; 1359 1360 if (m == 0) 1361 ism = HT_INFINITY; 1362 else 1363 ism = ((uint64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1364 return (ism); 1365 } 1366 1367 static __inline uint64_t 1368 d2dx(u_int d) 1369 { 1370 uint64_t dx; 1371 1372 dx = ((uint64_t)d * machclk_freq) / 1000; 1373 return (dx); 1374 } 1375 1376 static u_int 1377 sm2m(uint64_t sm) 1378 { 1379 uint64_t m; 1380 1381 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1382 return ((u_int)m); 1383 } 1384 1385 static u_int 1386 dx2d(uint64_t dx) 1387 { 1388 uint64_t d; 1389 1390 d = dx * 1000 / machclk_freq; 1391 return ((u_int)d); 1392 } 1393 1394 static void 1395 sc2isc(struct service_curve *sc, struct internal_sc *isc) 1396 { 1397 isc->sm1 = m2sm(sc->m1); 1398 isc->ism1 = m2ism(sc->m1); 1399 isc->dx = d2dx(sc->d); 1400 isc->dy = seg_x2y(isc->dx, isc->sm1); 1401 isc->sm2 = m2sm(sc->m2); 1402 isc->ism2 = m2ism(sc->m2); 1403 } 1404 1405 /* 1406 * initialize the runtime service curve with the given internal 1407 * service curve starting at (x, y). 1408 */ 1409 static void 1410 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1411 { 1412 rtsc->x = x; 1413 rtsc->y = y; 1414 rtsc->sm1 = isc->sm1; 1415 rtsc->ism1 = isc->ism1; 1416 rtsc->dx = isc->dx; 1417 rtsc->dy = isc->dy; 1418 rtsc->sm2 = isc->sm2; 1419 rtsc->ism2 = isc->ism2; 1420 } 1421 1422 /* 1423 * calculate the y-projection of the runtime service curve by the 1424 * given x-projection value 1425 */ 1426 static uint64_t 1427 rtsc_y2x(struct runtime_sc *rtsc, uint64_t y) 1428 { 1429 uint64_t x; 1430 1431 if (y < rtsc->y) { 1432 x = rtsc->x; 1433 } else if (y <= rtsc->y + rtsc->dy) { 1434 /* x belongs to the 1st segment */ 1435 if (rtsc->dy == 0) 1436 x = rtsc->x + rtsc->dx; 1437 else 1438 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1439 } else { 1440 /* x belongs to the 2nd segment */ 1441 x = rtsc->x + rtsc->dx 1442 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1443 } 1444 return (x); 1445 } 1446 1447 static uint64_t 1448 rtsc_x2y(struct runtime_sc *rtsc, uint64_t x) 1449 { 1450 uint64_t y; 1451 1452 if (x <= rtsc->x) { 1453 y = rtsc->y; 1454 } else if (x <= rtsc->x + rtsc->dx) { 1455 /* y belongs to the 1st segment */ 1456 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1457 } else 1458 /* y belongs to the 2nd segment */ 1459 y = rtsc->y + rtsc->dy 1460 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1461 return (y); 1462 } 1463 1464 /* 1465 * update the runtime service curve by taking the minimum of the current 1466 * runtime service curve and the service curve starting at (x, y). 1467 */ 1468 static void 1469 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1470 { 1471 uint64_t y1, y2, dx, dy; 1472 1473 if (isc->sm1 <= isc->sm2) { 1474 /* service curve is convex */ 1475 y1 = rtsc_x2y(rtsc, x); 1476 if (y1 < y) 1477 /* the current rtsc is smaller */ 1478 return; 1479 rtsc->x = x; 1480 rtsc->y = y; 1481 return; 1482 } 1483 1484 /* 1485 * service curve is concave 1486 * compute the two y values of the current rtsc 1487 * y1: at x 1488 * y2: at (x + dx) 1489 */ 1490 y1 = rtsc_x2y(rtsc, x); 1491 if (y1 <= y) { 1492 /* rtsc is below isc, no change to rtsc */ 1493 return; 1494 } 1495 1496 y2 = rtsc_x2y(rtsc, x + isc->dx); 1497 if (y2 >= y + isc->dy) { 1498 /* rtsc is above isc, replace rtsc by isc */ 1499 rtsc->x = x; 1500 rtsc->y = y; 1501 rtsc->dx = isc->dx; 1502 rtsc->dy = isc->dy; 1503 return; 1504 } 1505 1506 /* 1507 * the two curves intersect 1508 * compute the offsets (dx, dy) using the reverse 1509 * function of seg_x2y() 1510 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1511 */ 1512 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1513 /* 1514 * check if (x, y1) belongs to the 1st segment of rtsc. 1515 * if so, add the offset. 1516 */ 1517 if (rtsc->x + rtsc->dx > x) 1518 dx += rtsc->x + rtsc->dx - x; 1519 dy = seg_x2y(dx, isc->sm1); 1520 1521 rtsc->x = x; 1522 rtsc->y = y; 1523 rtsc->dx = dx; 1524 rtsc->dy = dy; 1525 } 1526 1527 static void 1528 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl) 1529 { 1530 sp->class_id = cl->cl_id; 1531 sp->class_handle = cl->cl_handle; 1532 1533 if (cl->cl_rsc != NULL) { 1534 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1535 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1536 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1537 } else { 1538 sp->rsc.m1 = 0; 1539 sp->rsc.d = 0; 1540 sp->rsc.m2 = 0; 1541 } 1542 if (cl->cl_fsc != NULL) { 1543 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1544 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1545 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1546 } else { 1547 sp->fsc.m1 = 0; 1548 sp->fsc.d = 0; 1549 sp->fsc.m2 = 0; 1550 } 1551 if (cl->cl_usc != NULL) { 1552 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1553 sp->usc.d = dx2d(cl->cl_usc->dx); 1554 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1555 } else { 1556 sp->usc.m1 = 0; 1557 sp->usc.d = 0; 1558 sp->usc.m2 = 0; 1559 } 1560 1561 sp->total = cl->cl_total; 1562 sp->cumul = cl->cl_cumul; 1563 1564 sp->d = cl->cl_d; 1565 sp->e = cl->cl_e; 1566 sp->vt = cl->cl_vt; 1567 sp->f = cl->cl_f; 1568 1569 sp->initvt = cl->cl_initvt; 1570 sp->vtperiod = cl->cl_vtperiod; 1571 sp->parentperiod = cl->cl_parentperiod; 1572 sp->nactive = cl->cl_nactive; 1573 sp->vtoff = cl->cl_vtoff; 1574 sp->cvtmax = cl->cl_cvtmax; 1575 sp->myf = cl->cl_myf; 1576 sp->cfmin = cl->cl_cfmin; 1577 sp->cvtmin = cl->cl_cvtmin; 1578 sp->myfadj = cl->cl_myfadj; 1579 sp->vtadj = cl->cl_vtadj; 1580 1581 sp->cur_time = read_machclk(); 1582 sp->machclk_freq = machclk_freq; 1583 1584 sp->qlength = qlen(cl->cl_q); 1585 sp->qlimit = qlimit(cl->cl_q); 1586 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1587 sp->drop_cnt = cl->cl_stats.drop_cnt; 1588 sp->period = cl->cl_stats.period; 1589 1590 sp->qtype = qtype(cl->cl_q); 1591 #ifdef ALTQ_RED 1592 if (q_is_red(cl->cl_q)) 1593 red_getstats(cl->cl_red, &sp->red[0]); 1594 #endif 1595 #ifdef ALTQ_RIO 1596 if (q_is_rio(cl->cl_q)) 1597 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1598 #endif 1599 } 1600 1601 /* convert a class handle to the corresponding class pointer */ 1602 static struct hfsc_class * 1603 clh_to_clp(struct hfsc_if *hif, uint32_t chandle) 1604 { 1605 int i; 1606 struct hfsc_class *cl; 1607 1608 if (chandle == 0) 1609 return (NULL); 1610 /* 1611 * first, try optimistically the slot matching the lower bits of 1612 * the handle. if it fails, do the linear table search. 1613 */ 1614 i = chandle % HFSC_MAX_CLASSES; 1615 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1616 return (cl); 1617 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1618 if ((cl = hif->hif_class_tbl[i]) != NULL && 1619 cl->cl_handle == chandle) 1620 return (cl); 1621 return (NULL); 1622 } 1623 1624 #endif /* ALTQ_HFSC */ 1625