1 /* $KAME: altq_hfsc.c,v 1.25 2004/04/17 10:54:48 kjc Exp $ */ 2 /* $DragonFly: src/sys/net/altq/altq_hfsc.c,v 1.4 2005/06/03 18:20:36 swildner Exp $ */ 3 4 /* 5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 6 * 7 * Permission to use, copy, modify, and distribute this software and 8 * its documentation is hereby granted (including for commercial or 9 * for-profit use), provided that both the copyright notice and this 10 * permission notice appear in all copies of the software, derivative 11 * works, or modified versions, and any portions thereof. 12 * 13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * Carnegie Mellon encourages (but does not require) users of this 29 * software to return any improvements or extensions that they make, 30 * and to grant Carnegie Mellon the rights to redistribute these 31 * changes without encumbrance. 32 */ 33 /* 34 * H-FSC is described in Proceedings of SIGCOMM'97, 35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 36 * Real-Time and Priority Service" 37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 38 * 39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 40 * when a class has an upperlimit, the fit-time is computed from the 41 * upperlimit service curve. the link-sharing scheduler does not schedule 42 * a class whose fit-time exceeds the current time. 43 */ 44 45 #include "opt_altq.h" 46 #include "opt_inet.h" 47 #include "opt_inet6.h" 48 49 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 50 51 #include <sys/param.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/socket.h> 55 #include <sys/systm.h> 56 #include <sys/errno.h> 57 #include <sys/queue.h> 58 #include <sys/thread.h> 59 60 #include <net/if.h> 61 #include <net/ifq_var.h> 62 #include <netinet/in.h> 63 64 #include <net/pf/pfvar.h> 65 #include <net/altq/altq.h> 66 #include <net/altq/altq_hfsc.h> 67 68 #include <sys/thread2.h> 69 70 /* 71 * function prototypes 72 */ 73 static int hfsc_clear_interface(struct hfsc_if *); 74 static int hfsc_request(struct ifaltq *, int, void *); 75 static void hfsc_purge(struct hfsc_if *); 76 static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 77 struct service_curve *, 78 struct service_curve *, 79 struct service_curve *, 80 struct hfsc_class *, int, int, int); 81 static int hfsc_class_destroy(struct hfsc_class *); 82 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 83 static int hfsc_enqueue(struct ifaltq *, struct mbuf *, 84 struct altq_pktattr *); 85 static struct mbuf *hfsc_dequeue(struct ifaltq *, int); 86 87 static int hfsc_addq(struct hfsc_class *, struct mbuf *); 88 static struct mbuf *hfsc_getq(struct hfsc_class *); 89 static struct mbuf *hfsc_pollq(struct hfsc_class *); 90 static void hfsc_purgeq(struct hfsc_class *); 91 92 static void update_cfmin(struct hfsc_class *); 93 static void set_active(struct hfsc_class *, int); 94 static void set_passive(struct hfsc_class *); 95 96 static void init_ed(struct hfsc_class *, int); 97 static void update_ed(struct hfsc_class *, int); 98 static void update_d(struct hfsc_class *, int); 99 static void init_vf(struct hfsc_class *, int); 100 static void update_vf(struct hfsc_class *, int, uint64_t); 101 static ellist_t *ellist_alloc(void); 102 static void ellist_destroy(ellist_t *); 103 static void ellist_insert(struct hfsc_class *); 104 static void ellist_remove(struct hfsc_class *); 105 static void ellist_update(struct hfsc_class *); 106 struct hfsc_class *ellist_get_mindl(ellist_t *, uint64_t); 107 static actlist_t *actlist_alloc(void); 108 static void actlist_destroy(actlist_t *); 109 static void actlist_insert(struct hfsc_class *); 110 static void actlist_remove(struct hfsc_class *); 111 static void actlist_update(struct hfsc_class *); 112 113 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, uint64_t); 114 115 static __inline uint64_t seg_x2y(uint64_t, uint64_t); 116 static __inline uint64_t seg_y2x(uint64_t, uint64_t); 117 static __inline uint64_t m2sm(u_int); 118 static __inline uint64_t m2ism(u_int); 119 static __inline uint64_t d2dx(u_int); 120 static u_int sm2m(uint64_t); 121 static u_int dx2d(uint64_t); 122 123 static void sc2isc(struct service_curve *, struct internal_sc *); 124 static void rtsc_init(struct runtime_sc *, struct internal_sc *, 125 uint64_t, uint64_t); 126 static uint64_t rtsc_y2x(struct runtime_sc *, uint64_t); 127 static uint64_t rtsc_x2y(struct runtime_sc *, uint64_t); 128 static void rtsc_min(struct runtime_sc *, struct internal_sc *, 129 uint64_t, uint64_t); 130 131 static void get_class_stats(struct hfsc_classstats *, struct hfsc_class *); 132 static struct hfsc_class *clh_to_clp(struct hfsc_if *, uint32_t); 133 134 /* 135 * macros 136 */ 137 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 138 139 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */ 140 141 int 142 hfsc_pfattach(struct pf_altq *a) 143 { 144 struct ifnet *ifp; 145 int error; 146 147 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL) 148 return (EINVAL); 149 crit_enter(); 150 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc, 151 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL); 152 crit_exit(); 153 return (error); 154 } 155 156 int 157 hfsc_add_altq(struct pf_altq *a) 158 { 159 struct hfsc_if *hif; 160 struct ifnet *ifp; 161 162 if ((ifp = ifunit(a->ifname)) == NULL) 163 return (EINVAL); 164 if (!ifq_is_ready(&ifp->if_snd)) 165 return (ENODEV); 166 167 hif = malloc(sizeof(struct hfsc_if), M_ALTQ, M_WAITOK | M_ZERO); 168 169 hif->hif_eligible = ellist_alloc(); 170 hif->hif_ifq = &ifp->if_snd; 171 172 /* keep the state in pf_altq */ 173 a->altq_disc = hif; 174 175 return (0); 176 } 177 178 int 179 hfsc_remove_altq(struct pf_altq *a) 180 { 181 struct hfsc_if *hif; 182 183 if ((hif = a->altq_disc) == NULL) 184 return (EINVAL); 185 a->altq_disc = NULL; 186 187 hfsc_clear_interface(hif); 188 hfsc_class_destroy(hif->hif_rootclass); 189 190 ellist_destroy(hif->hif_eligible); 191 192 free(hif, M_ALTQ); 193 194 return (0); 195 } 196 197 int 198 hfsc_add_queue(struct pf_altq *a) 199 { 200 struct hfsc_if *hif; 201 struct hfsc_class *cl, *parent; 202 struct hfsc_opts *opts; 203 struct service_curve rtsc, lssc, ulsc; 204 205 if ((hif = a->altq_disc) == NULL) 206 return (EINVAL); 207 208 opts = &a->pq_u.hfsc_opts; 209 210 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL) 211 parent = NULL; 212 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 213 return (EINVAL); 214 215 if (a->qid == 0) 216 return (EINVAL); 217 218 if (clh_to_clp(hif, a->qid) != NULL) 219 return (EBUSY); 220 221 rtsc.m1 = opts->rtsc_m1; 222 rtsc.d = opts->rtsc_d; 223 rtsc.m2 = opts->rtsc_m2; 224 lssc.m1 = opts->lssc_m1; 225 lssc.d = opts->lssc_d; 226 lssc.m2 = opts->lssc_m2; 227 ulsc.m1 = opts->ulsc_m1; 228 ulsc.d = opts->ulsc_d; 229 ulsc.m2 = opts->ulsc_m2; 230 231 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, parent, a->qlimit, 232 opts->flags, a->qid); 233 if (cl == NULL) 234 return (ENOMEM); 235 236 return (0); 237 } 238 239 int 240 hfsc_remove_queue(struct pf_altq *a) 241 { 242 struct hfsc_if *hif; 243 struct hfsc_class *cl; 244 245 if ((hif = a->altq_disc) == NULL) 246 return (EINVAL); 247 248 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 249 return (EINVAL); 250 251 return (hfsc_class_destroy(cl)); 252 } 253 254 int 255 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) 256 { 257 struct hfsc_if *hif; 258 struct hfsc_class *cl; 259 struct hfsc_classstats stats; 260 int error = 0; 261 262 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 263 return (EBADF); 264 265 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 266 return (EINVAL); 267 268 if (*nbytes < sizeof(stats)) 269 return (EINVAL); 270 271 get_class_stats(&stats, cl); 272 273 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) 274 return (error); 275 *nbytes = sizeof(stats); 276 return (0); 277 } 278 279 /* 280 * bring the interface back to the initial state by discarding 281 * all the filters and classes except the root class. 282 */ 283 static int 284 hfsc_clear_interface(struct hfsc_if *hif) 285 { 286 struct hfsc_class *cl; 287 288 if (hif->hif_rootclass == NULL) 289 return (0); 290 291 292 /* clear out the classes */ 293 while ((cl = hif->hif_rootclass->cl_children) != NULL) { 294 /* 295 * remove the first leaf class found in the hierarchy 296 * then start over 297 */ 298 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 299 if (!is_a_parent_class(cl)) { 300 hfsc_class_destroy(cl); 301 break; 302 } 303 } 304 } 305 306 return (0); 307 } 308 309 static int 310 hfsc_request(struct ifaltq *ifq, int req, void *arg) 311 { 312 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 313 314 crit_enter(); 315 switch (req) { 316 case ALTRQ_PURGE: 317 hfsc_purge(hif); 318 break; 319 } 320 crit_exit(); 321 return (0); 322 } 323 324 /* discard all the queued packets on the interface */ 325 static void 326 hfsc_purge(struct hfsc_if *hif) 327 { 328 struct hfsc_class *cl; 329 330 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) { 331 if (!qempty(cl->cl_q)) 332 hfsc_purgeq(cl); 333 } 334 if (ifq_is_enabled(hif->hif_ifq)) 335 hif->hif_ifq->ifq_len = 0; 336 } 337 338 struct hfsc_class * 339 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 340 struct service_curve *fsc, struct service_curve *usc, 341 struct hfsc_class *parent, int qlimit, int flags, int qid) 342 { 343 struct hfsc_class *cl, *p; 344 int i; 345 346 if (hif->hif_classes >= HFSC_MAX_CLASSES) 347 return (NULL); 348 349 #ifndef ALTQ_RED 350 if (flags & HFCF_RED) { 351 #ifdef ALTQ_DEBUG 352 printf("hfsc_class_create: RED not configured for HFSC!\n"); 353 #endif 354 return (NULL); 355 } 356 #endif 357 358 cl = malloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO); 359 cl->cl_q = malloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO); 360 cl->cl_actc = actlist_alloc(); 361 362 if (qlimit == 0) 363 qlimit = 50; /* use default */ 364 qlimit(cl->cl_q) = qlimit; 365 qtype(cl->cl_q) = Q_DROPTAIL; 366 qlen(cl->cl_q) = 0; 367 cl->cl_flags = flags; 368 #ifdef ALTQ_RED 369 if (flags & (HFCF_RED|HFCF_RIO)) { 370 int red_flags, red_pkttime; 371 u_int m2; 372 373 m2 = 0; 374 if (rsc != NULL && rsc->m2 > m2) 375 m2 = rsc->m2; 376 if (fsc != NULL && fsc->m2 > m2) 377 m2 = fsc->m2; 378 if (usc != NULL && usc->m2 > m2) 379 m2 = usc->m2; 380 381 red_flags = 0; 382 if (flags & HFCF_ECN) 383 red_flags |= REDF_ECN; 384 #ifdef ALTQ_RIO 385 if (flags & HFCF_CLEARDSCP) 386 red_flags |= RIOF_CLEARDSCP; 387 #endif 388 if (m2 < 8) 389 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 390 else 391 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 392 * 1000 * 1000 * 1000 / (m2 / 8); 393 if (flags & HFCF_RED) { 394 cl->cl_red = red_alloc(0, 0, 395 qlimit(cl->cl_q) * 10/100, 396 qlimit(cl->cl_q) * 30/100, 397 red_flags, red_pkttime); 398 if (cl->cl_red != NULL) 399 qtype(cl->cl_q) = Q_RED; 400 } 401 #ifdef ALTQ_RIO 402 else { 403 cl->cl_red = (red_t *)rio_alloc(0, NULL, 404 red_flags, red_pkttime); 405 if (cl->cl_red != NULL) 406 qtype(cl->cl_q) = Q_RIO; 407 } 408 #endif 409 } 410 #endif /* ALTQ_RED */ 411 412 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 413 cl->cl_rsc = malloc(sizeof(*cl->cl_rsc), M_ALTQ, M_WAITOK); 414 sc2isc(rsc, cl->cl_rsc); 415 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 416 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 417 } 418 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 419 cl->cl_fsc = malloc(sizeof(*cl->cl_fsc), M_ALTQ, M_WAITOK); 420 if (cl->cl_fsc == NULL) 421 goto err_ret; 422 sc2isc(fsc, cl->cl_fsc); 423 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 424 } 425 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 426 cl->cl_usc = malloc(sizeof(*cl->cl_usc), M_ALTQ, M_WAITOK); 427 if (cl->cl_usc == NULL) 428 goto err_ret; 429 sc2isc(usc, cl->cl_usc); 430 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 431 } 432 433 cl->cl_id = hif->hif_classid++; 434 cl->cl_handle = qid; 435 cl->cl_hif = hif; 436 cl->cl_parent = parent; 437 438 crit_enter(); 439 hif->hif_classes++; 440 441 /* 442 * find a free slot in the class table. if the slot matching 443 * the lower bits of qid is free, use this slot. otherwise, 444 * use the first free slot. 445 */ 446 i = qid % HFSC_MAX_CLASSES; 447 if (hif->hif_class_tbl[i] == NULL) 448 hif->hif_class_tbl[i] = cl; 449 else { 450 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 451 if (hif->hif_class_tbl[i] == NULL) { 452 hif->hif_class_tbl[i] = cl; 453 break; 454 } 455 } 456 if (i == HFSC_MAX_CLASSES) { 457 crit_exit(); 458 goto err_ret; 459 } 460 } 461 462 if (flags & HFCF_DEFAULTCLASS) 463 hif->hif_defaultclass = cl; 464 465 if (parent == NULL) { 466 /* this is root class */ 467 hif->hif_rootclass = cl; 468 } else if (parent->cl_children == NULL) { 469 /* add this class to the children list of the parent */ 470 parent->cl_children = cl; 471 } else { 472 p = parent->cl_children; 473 while (p->cl_siblings != NULL) 474 p = p->cl_siblings; 475 p->cl_siblings = cl; 476 } 477 crit_exit(); 478 479 return (cl); 480 481 err_ret: 482 if (cl->cl_actc != NULL) 483 actlist_destroy(cl->cl_actc); 484 if (cl->cl_red != NULL) { 485 #ifdef ALTQ_RIO 486 if (q_is_rio(cl->cl_q)) 487 rio_destroy((rio_t *)cl->cl_red); 488 #endif 489 #ifdef ALTQ_RED 490 if (q_is_red(cl->cl_q)) 491 red_destroy(cl->cl_red); 492 #endif 493 } 494 if (cl->cl_fsc != NULL) 495 free(cl->cl_fsc, M_ALTQ); 496 if (cl->cl_rsc != NULL) 497 free(cl->cl_rsc, M_ALTQ); 498 if (cl->cl_usc != NULL) 499 free(cl->cl_usc, M_ALTQ); 500 if (cl->cl_q != NULL) 501 free(cl->cl_q, M_ALTQ); 502 free(cl, M_ALTQ); 503 return (NULL); 504 } 505 506 static int 507 hfsc_class_destroy(struct hfsc_class *cl) 508 { 509 int i; 510 511 if (cl == NULL) 512 return (0); 513 514 if (is_a_parent_class(cl)) 515 return (EBUSY); 516 517 crit_enter(); 518 519 if (!qempty(cl->cl_q)) 520 hfsc_purgeq(cl); 521 522 if (cl->cl_parent == NULL) { 523 /* this is root class */ 524 } else { 525 struct hfsc_class *p = cl->cl_parent->cl_children; 526 527 if (p == cl) { 528 cl->cl_parent->cl_children = cl->cl_siblings; 529 } else { 530 do { 531 if (p->cl_siblings == cl) { 532 p->cl_siblings = cl->cl_siblings; 533 break; 534 } 535 } while ((p = p->cl_siblings) != NULL); 536 } 537 KKASSERT(p != NULL); 538 } 539 540 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 541 if (cl->cl_hif->hif_class_tbl[i] == cl) { 542 cl->cl_hif->hif_class_tbl[i] = NULL; 543 break; 544 } 545 } 546 547 cl->cl_hif->hif_classes--; 548 crit_exit(); 549 550 actlist_destroy(cl->cl_actc); 551 552 if (cl->cl_red != NULL) { 553 #ifdef ALTQ_RIO 554 if (q_is_rio(cl->cl_q)) 555 rio_destroy((rio_t *)cl->cl_red); 556 #endif 557 #ifdef ALTQ_RED 558 if (q_is_red(cl->cl_q)) 559 red_destroy(cl->cl_red); 560 #endif 561 } 562 563 if (cl == cl->cl_hif->hif_rootclass) 564 cl->cl_hif->hif_rootclass = NULL; 565 if (cl == cl->cl_hif->hif_defaultclass) 566 cl->cl_hif->hif_defaultclass = NULL; 567 568 if (cl->cl_usc != NULL) 569 free(cl->cl_usc, M_ALTQ); 570 if (cl->cl_fsc != NULL) 571 free(cl->cl_fsc, M_ALTQ); 572 if (cl->cl_rsc != NULL) 573 free(cl->cl_rsc, M_ALTQ); 574 free(cl->cl_q, M_ALTQ); 575 free(cl, M_ALTQ); 576 577 return (0); 578 } 579 580 /* 581 * hfsc_nextclass returns the next class in the tree. 582 * usage: 583 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 584 * do_something; 585 */ 586 static struct hfsc_class * 587 hfsc_nextclass(struct hfsc_class *cl) 588 { 589 if (cl->cl_children != NULL) { 590 cl = cl->cl_children; 591 } else if (cl->cl_siblings != NULL) { 592 cl = cl->cl_siblings; 593 } else { 594 while ((cl = cl->cl_parent) != NULL) { 595 if (cl->cl_siblings != NULL) { 596 cl = cl->cl_siblings; 597 break; 598 } 599 } 600 } 601 602 return (cl); 603 } 604 605 /* 606 * hfsc_enqueue is an enqueue function to be registered to 607 * (*altq_enqueue) in struct ifaltq. 608 */ 609 static int 610 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr) 611 { 612 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 613 struct hfsc_class *cl; 614 int len; 615 616 /* grab class set by classifier */ 617 if ((m->m_flags & M_PKTHDR) == 0) { 618 /* should not happen */ 619 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n"); 620 m_freem(m); 621 return (ENOBUFS); 622 } 623 crit_enter(); 624 if (m->m_pkthdr.fw_flags & ALTQ_MBUF_TAGGED) 625 cl = clh_to_clp(hif, m->m_pkthdr.altq_qid); 626 else 627 cl = NULL; 628 if (cl == NULL || is_a_parent_class(cl)) { 629 cl = hif->hif_defaultclass; 630 if (cl == NULL) { 631 m_freem(m); 632 crit_exit(); 633 return (ENOBUFS); 634 } 635 } 636 cl->cl_pktattr = NULL; 637 len = m_pktlen(m); 638 if (hfsc_addq(cl, m) != 0) { 639 /* drop occurred. mbuf was freed in hfsc_addq. */ 640 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 641 crit_exit(); 642 return (ENOBUFS); 643 } 644 ifq->ifq_len++; 645 cl->cl_hif->hif_packets++; 646 647 /* successfully queued. */ 648 if (qlen(cl->cl_q) == 1) 649 set_active(cl, m_pktlen(m)); 650 crit_exit(); 651 return (0); 652 } 653 654 /* 655 * hfsc_dequeue is a dequeue function to be registered to 656 * (*altq_dequeue) in struct ifaltq. 657 * 658 * note: ALTDQ_POLL returns the next packet without removing the packet 659 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 660 * ALTDQ_REMOVE must return the same packet if called immediately 661 * after ALTDQ_POLL. 662 */ 663 static struct mbuf * 664 hfsc_dequeue(struct ifaltq *ifq, int op) 665 { 666 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 667 struct hfsc_class *cl; 668 struct mbuf *m; 669 int len, next_len; 670 int realtime = 0; 671 uint64_t cur_time; 672 673 if (hif->hif_packets == 0) { 674 /* no packet in the tree */ 675 return (NULL); 676 } 677 678 crit_enter(); 679 cur_time = read_machclk(); 680 681 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 682 cl = hif->hif_pollcache; 683 hif->hif_pollcache = NULL; 684 /* check if the class was scheduled by real-time criteria */ 685 if (cl->cl_rsc != NULL) 686 realtime = (cl->cl_e <= cur_time); 687 } else { 688 /* 689 * if there are eligible classes, use real-time criteria. 690 * find the class with the minimum deadline among 691 * the eligible classes. 692 */ 693 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time)) != NULL) { 694 realtime = 1; 695 } else { 696 #ifdef ALTQ_DEBUG 697 int fits = 0; 698 #endif 699 /* 700 * use link-sharing criteria 701 * get the class with the minimum vt in the hierarchy 702 */ 703 cl = hif->hif_rootclass; 704 while (is_a_parent_class(cl)) { 705 706 cl = actlist_firstfit(cl, cur_time); 707 if (cl == NULL) { 708 #ifdef ALTQ_DEBUG 709 if (fits > 0) 710 printf("%d fit but none found\n",fits); 711 #endif 712 m = NULL; 713 goto done; 714 } 715 /* 716 * update parent's cl_cvtmin. 717 * don't update if the new vt is smaller. 718 */ 719 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 720 cl->cl_parent->cl_cvtmin = cl->cl_vt; 721 #ifdef ALTQ_DEBUG 722 fits++; 723 #endif 724 } 725 } 726 727 if (op == ALTDQ_POLL) { 728 hif->hif_pollcache = cl; 729 m = hfsc_pollq(cl); 730 goto done; 731 } 732 } 733 734 m = hfsc_getq(cl); 735 if (m == NULL) 736 panic("hfsc_dequeue:"); 737 len = m_pktlen(m); 738 cl->cl_hif->hif_packets--; 739 ifq->ifq_len--; 740 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 741 742 update_vf(cl, len, cur_time); 743 if (realtime) 744 cl->cl_cumul += len; 745 746 if (!qempty(cl->cl_q)) { 747 if (cl->cl_rsc != NULL) { 748 /* update ed */ 749 next_len = m_pktlen(qhead(cl->cl_q)); 750 751 if (realtime) 752 update_ed(cl, next_len); 753 else 754 update_d(cl, next_len); 755 } 756 } else { 757 /* the class becomes passive */ 758 set_passive(cl); 759 } 760 done: 761 crit_exit(); 762 return (m); 763 } 764 765 static int 766 hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 767 { 768 769 #ifdef ALTQ_RIO 770 if (q_is_rio(cl->cl_q)) 771 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 772 m, cl->cl_pktattr); 773 #endif 774 #ifdef ALTQ_RED 775 if (q_is_red(cl->cl_q)) 776 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 777 #endif 778 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 779 m_freem(m); 780 return (-1); 781 } 782 783 if (cl->cl_flags & HFCF_CLEARDSCP) 784 write_dsfield(m, cl->cl_pktattr, 0); 785 786 _addq(cl->cl_q, m); 787 788 return (0); 789 } 790 791 static struct mbuf * 792 hfsc_getq(struct hfsc_class *cl) 793 { 794 #ifdef ALTQ_RIO 795 if (q_is_rio(cl->cl_q)) 796 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 797 #endif 798 #ifdef ALTQ_RED 799 if (q_is_red(cl->cl_q)) 800 return red_getq(cl->cl_red, cl->cl_q); 801 #endif 802 return _getq(cl->cl_q); 803 } 804 805 static struct mbuf * 806 hfsc_pollq(struct hfsc_class *cl) 807 { 808 return qhead(cl->cl_q); 809 } 810 811 static void 812 hfsc_purgeq(struct hfsc_class *cl) 813 { 814 struct mbuf *m; 815 816 if (qempty(cl->cl_q)) 817 return; 818 819 while ((m = _getq(cl->cl_q)) != NULL) { 820 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 821 m_freem(m); 822 cl->cl_hif->hif_packets--; 823 cl->cl_hif->hif_ifq->ifq_len--; 824 } 825 KKASSERT(qlen(cl->cl_q) == 0); 826 827 update_vf(cl, 0, 0); /* remove cl from the actlist */ 828 set_passive(cl); 829 } 830 831 static void 832 set_active(struct hfsc_class *cl, int len) 833 { 834 if (cl->cl_rsc != NULL) 835 init_ed(cl, len); 836 if (cl->cl_fsc != NULL) 837 init_vf(cl, len); 838 839 cl->cl_stats.period++; 840 } 841 842 static void 843 set_passive(struct hfsc_class *cl) 844 { 845 if (cl->cl_rsc != NULL) 846 ellist_remove(cl); 847 848 /* 849 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 850 * needs to be called explicitly to remove a class from actlist 851 */ 852 } 853 854 static void 855 init_ed(struct hfsc_class *cl, int next_len) 856 { 857 uint64_t cur_time; 858 859 cur_time = read_machclk(); 860 861 /* update the deadline curve */ 862 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 863 864 /* 865 * update the eligible curve. 866 * for concave, it is equal to the deadline curve. 867 * for convex, it is a linear curve with slope m2. 868 */ 869 cl->cl_eligible = cl->cl_deadline; 870 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 871 cl->cl_eligible.dx = 0; 872 cl->cl_eligible.dy = 0; 873 } 874 875 /* compute e and d */ 876 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 877 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 878 879 ellist_insert(cl); 880 } 881 882 static void 883 update_ed(struct hfsc_class *cl, int next_len) 884 { 885 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 886 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 887 888 ellist_update(cl); 889 } 890 891 static void 892 update_d(struct hfsc_class *cl, int next_len) 893 { 894 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 895 } 896 897 static void 898 init_vf(struct hfsc_class *cl, int len) 899 { 900 struct hfsc_class *max_cl, *p; 901 uint64_t vt, f, cur_time; 902 int go_active; 903 904 cur_time = 0; 905 go_active = 1; 906 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 907 if (go_active && cl->cl_nactive++ == 0) 908 go_active = 1; 909 else 910 go_active = 0; 911 912 if (go_active) { 913 max_cl = actlist_last(cl->cl_parent->cl_actc); 914 if (max_cl != NULL) { 915 /* 916 * set vt to the average of the min and max 917 * classes. if the parent's period didn't 918 * change, don't decrease vt of the class. 919 */ 920 vt = max_cl->cl_vt; 921 if (cl->cl_parent->cl_cvtmin != 0) 922 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 923 924 if (cl->cl_parent->cl_vtperiod != 925 cl->cl_parentperiod || vt > cl->cl_vt) 926 cl->cl_vt = vt; 927 } else { 928 /* 929 * first child for a new parent backlog period. 930 * add parent's cvtmax to vtoff of children 931 * to make a new vt (vtoff + vt) larger than 932 * the vt in the last period for all children. 933 */ 934 vt = cl->cl_parent->cl_cvtmax; 935 for (p = cl->cl_parent->cl_children; p != NULL; 936 p = p->cl_siblings) 937 p->cl_vtoff += vt; 938 cl->cl_vt = 0; 939 cl->cl_parent->cl_cvtmax = 0; 940 cl->cl_parent->cl_cvtmin = 0; 941 } 942 cl->cl_initvt = cl->cl_vt; 943 944 /* update the virtual curve */ 945 vt = cl->cl_vt + cl->cl_vtoff; 946 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 947 if (cl->cl_virtual.x == vt) { 948 cl->cl_virtual.x -= cl->cl_vtoff; 949 cl->cl_vtoff = 0; 950 } 951 cl->cl_vtadj = 0; 952 953 cl->cl_vtperiod++; /* increment vt period */ 954 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 955 if (cl->cl_parent->cl_nactive == 0) 956 cl->cl_parentperiod++; 957 cl->cl_f = 0; 958 959 actlist_insert(cl); 960 961 if (cl->cl_usc != NULL) { 962 /* class has upper limit curve */ 963 if (cur_time == 0) 964 cur_time = read_machclk(); 965 966 /* update the ulimit curve */ 967 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 968 cl->cl_total); 969 /* compute myf */ 970 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 971 cl->cl_total); 972 cl->cl_myfadj = 0; 973 } 974 } 975 976 if (cl->cl_myf > cl->cl_cfmin) 977 f = cl->cl_myf; 978 else 979 f = cl->cl_cfmin; 980 if (f != cl->cl_f) { 981 cl->cl_f = f; 982 update_cfmin(cl->cl_parent); 983 } 984 } 985 } 986 987 static void 988 update_vf(struct hfsc_class *cl, int len, uint64_t cur_time) 989 { 990 uint64_t f, myf_bound, delta; 991 int go_passive; 992 993 go_passive = qempty(cl->cl_q); 994 995 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 996 cl->cl_total += len; 997 998 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 999 continue; 1000 1001 if (go_passive && --cl->cl_nactive == 0) 1002 go_passive = 1; 1003 else 1004 go_passive = 0; 1005 1006 if (go_passive) { 1007 /* no more active child, going passive */ 1008 1009 /* update cvtmax of the parent class */ 1010 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1011 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1012 1013 /* remove this class from the vt list */ 1014 actlist_remove(cl); 1015 1016 update_cfmin(cl->cl_parent); 1017 1018 continue; 1019 } 1020 1021 /* 1022 * update vt and f 1023 */ 1024 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1025 - cl->cl_vtoff + cl->cl_vtadj; 1026 1027 /* 1028 * if vt of the class is smaller than cvtmin, 1029 * the class was skipped in the past due to non-fit. 1030 * if so, we need to adjust vtadj. 1031 */ 1032 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1033 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1034 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1035 } 1036 1037 /* update the vt list */ 1038 actlist_update(cl); 1039 1040 if (cl->cl_usc != NULL) { 1041 cl->cl_myf = cl->cl_myfadj 1042 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1043 1044 /* 1045 * if myf lags behind by more than one clock tick 1046 * from the current time, adjust myfadj to prevent 1047 * a rate-limited class from going greedy. 1048 * in a steady state under rate-limiting, myf 1049 * fluctuates within one clock tick. 1050 */ 1051 myf_bound = cur_time - machclk_per_tick; 1052 if (cl->cl_myf < myf_bound) { 1053 delta = cur_time - cl->cl_myf; 1054 cl->cl_myfadj += delta; 1055 cl->cl_myf += delta; 1056 } 1057 } 1058 1059 /* cl_f is max(cl_myf, cl_cfmin) */ 1060 if (cl->cl_myf > cl->cl_cfmin) 1061 f = cl->cl_myf; 1062 else 1063 f = cl->cl_cfmin; 1064 if (f != cl->cl_f) { 1065 cl->cl_f = f; 1066 update_cfmin(cl->cl_parent); 1067 } 1068 } 1069 } 1070 1071 static void 1072 update_cfmin(struct hfsc_class *cl) 1073 { 1074 struct hfsc_class *p; 1075 uint64_t cfmin; 1076 1077 if (TAILQ_EMPTY(cl->cl_actc)) { 1078 cl->cl_cfmin = 0; 1079 return; 1080 } 1081 cfmin = HT_INFINITY; 1082 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1083 if (p->cl_f == 0) { 1084 cl->cl_cfmin = 0; 1085 return; 1086 } 1087 if (p->cl_f < cfmin) 1088 cfmin = p->cl_f; 1089 } 1090 cl->cl_cfmin = cfmin; 1091 } 1092 1093 /* 1094 * TAILQ based ellist and actlist implementation 1095 * (ion wanted to make a calendar queue based implementation) 1096 */ 1097 /* 1098 * eligible list holds backlogged classes being sorted by their eligible times. 1099 * there is one eligible list per interface. 1100 */ 1101 1102 static ellist_t * 1103 ellist_alloc(void) 1104 { 1105 ellist_t *head; 1106 1107 head = malloc(sizeof(ellist_t *), M_ALTQ, M_WAITOK); 1108 TAILQ_INIT(head); 1109 return (head); 1110 } 1111 1112 static void 1113 ellist_destroy(ellist_t *head) 1114 { 1115 free(head, M_ALTQ); 1116 } 1117 1118 static void 1119 ellist_insert(struct hfsc_class *cl) 1120 { 1121 struct hfsc_if *hif = cl->cl_hif; 1122 struct hfsc_class *p; 1123 1124 /* check the last entry first */ 1125 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL || 1126 p->cl_e <= cl->cl_e) { 1127 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1128 return; 1129 } 1130 1131 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) { 1132 if (cl->cl_e < p->cl_e) { 1133 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1134 return; 1135 } 1136 } 1137 KKASSERT(0); /* should not reach here */ 1138 } 1139 1140 static void 1141 ellist_remove(struct hfsc_class *cl) 1142 { 1143 struct hfsc_if *hif = cl->cl_hif; 1144 1145 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1146 } 1147 1148 static void 1149 ellist_update(struct hfsc_class *cl) 1150 { 1151 struct hfsc_if *hif = cl->cl_hif; 1152 struct hfsc_class *p, *last; 1153 1154 /* 1155 * the eligible time of a class increases monotonically. 1156 * if the next entry has a larger eligible time, nothing to do. 1157 */ 1158 p = TAILQ_NEXT(cl, cl_ellist); 1159 if (p == NULL || cl->cl_e <= p->cl_e) 1160 return; 1161 1162 /* check the last entry */ 1163 last = TAILQ_LAST(hif->hif_eligible, _eligible); 1164 KKASSERT(last != NULL); 1165 if (last->cl_e <= cl->cl_e) { 1166 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1167 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1168 return; 1169 } 1170 1171 /* 1172 * the new position must be between the next entry 1173 * and the last entry 1174 */ 1175 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1176 if (cl->cl_e < p->cl_e) { 1177 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1178 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1179 return; 1180 } 1181 } 1182 KKASSERT(0); /* should not reach here */ 1183 } 1184 1185 /* find the class with the minimum deadline among the eligible classes */ 1186 struct hfsc_class * 1187 ellist_get_mindl(ellist_t *head, uint64_t cur_time) 1188 { 1189 struct hfsc_class *p, *cl = NULL; 1190 1191 TAILQ_FOREACH(p, head, cl_ellist) { 1192 if (p->cl_e > cur_time) 1193 break; 1194 if (cl == NULL || p->cl_d < cl->cl_d) 1195 cl = p; 1196 } 1197 return (cl); 1198 } 1199 1200 /* 1201 * active children list holds backlogged child classes being sorted 1202 * by their virtual time. 1203 * each intermediate class has one active children list. 1204 */ 1205 static actlist_t * 1206 actlist_alloc(void) 1207 { 1208 actlist_t *head; 1209 1210 head = malloc(sizeof(*head), M_ALTQ, M_WAITOK); 1211 TAILQ_INIT(head); 1212 return (head); 1213 } 1214 1215 static void 1216 actlist_destroy(actlist_t *head) 1217 { 1218 free(head, M_ALTQ); 1219 } 1220 static void 1221 actlist_insert(struct hfsc_class *cl) 1222 { 1223 struct hfsc_class *p; 1224 1225 /* check the last entry first */ 1226 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL 1227 || p->cl_vt <= cl->cl_vt) { 1228 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1229 return; 1230 } 1231 1232 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) { 1233 if (cl->cl_vt < p->cl_vt) { 1234 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1235 return; 1236 } 1237 } 1238 KKASSERT(0); /* should not reach here */ 1239 } 1240 1241 static void 1242 actlist_remove(struct hfsc_class *cl) 1243 { 1244 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1245 } 1246 1247 static void 1248 actlist_update(struct hfsc_class *cl) 1249 { 1250 struct hfsc_class *p, *last; 1251 1252 /* 1253 * the virtual time of a class increases monotonically during its 1254 * backlogged period. 1255 * if the next entry has a larger virtual time, nothing to do. 1256 */ 1257 p = TAILQ_NEXT(cl, cl_actlist); 1258 if (p == NULL || cl->cl_vt < p->cl_vt) 1259 return; 1260 1261 /* check the last entry */ 1262 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active); 1263 KKASSERT(last != NULL); 1264 if (last->cl_vt <= cl->cl_vt) { 1265 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1266 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1267 return; 1268 } 1269 1270 /* 1271 * the new position must be between the next entry 1272 * and the last entry 1273 */ 1274 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1275 if (cl->cl_vt < p->cl_vt) { 1276 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1277 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1278 return; 1279 } 1280 } 1281 KKASSERT(0); /* should not reach here */ 1282 } 1283 1284 static struct hfsc_class * 1285 actlist_firstfit(struct hfsc_class *cl, uint64_t cur_time) 1286 { 1287 struct hfsc_class *p; 1288 1289 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1290 if (p->cl_f <= cur_time) 1291 return (p); 1292 } 1293 return (NULL); 1294 } 1295 1296 /* 1297 * service curve support functions 1298 * 1299 * external service curve parameters 1300 * m: bits/sec 1301 * d: msec 1302 * internal service curve parameters 1303 * sm: (bytes/tsc_interval) << SM_SHIFT 1304 * ism: (tsc_count/byte) << ISM_SHIFT 1305 * dx: tsc_count 1306 * 1307 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. 1308 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU 1309 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective 1310 * digits in decimal using the following table. 1311 * 1312 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 1313 * ----------+------------------------------------------------------- 1314 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6 1315 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6 1316 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6 1317 * 1318 * nsec/byte 80000 8000 800 80 8 1319 * ism(500MHz) 40000 4000 400 40 4 1320 * ism(200MHz) 16000 1600 160 16 1.6 1321 */ 1322 #define SM_SHIFT 24 1323 #define ISM_SHIFT 10 1324 1325 #define SM_MASK ((1LL << SM_SHIFT) - 1) 1326 #define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1327 1328 static __inline uint64_t 1329 seg_x2y(uint64_t x, uint64_t sm) 1330 { 1331 uint64_t y; 1332 1333 /* 1334 * compute 1335 * y = x * sm >> SM_SHIFT 1336 * but divide it for the upper and lower bits to avoid overflow 1337 */ 1338 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1339 return (y); 1340 } 1341 1342 static __inline uint64_t 1343 seg_y2x(uint64_t y, uint64_t ism) 1344 { 1345 uint64_t x; 1346 1347 if (y == 0) 1348 x = 0; 1349 else if (ism == HT_INFINITY) 1350 x = HT_INFINITY; 1351 else 1352 x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1353 1354 return (x); 1355 } 1356 1357 static __inline uint64_t 1358 m2sm(u_int m) 1359 { 1360 uint64_t sm; 1361 1362 sm = ((uint64_t)m << SM_SHIFT) / 8 / machclk_freq; 1363 return (sm); 1364 } 1365 1366 static __inline uint64_t 1367 m2ism(u_int m) 1368 { 1369 uint64_t ism; 1370 1371 if (m == 0) 1372 ism = HT_INFINITY; 1373 else 1374 ism = ((uint64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1375 return (ism); 1376 } 1377 1378 static __inline uint64_t 1379 d2dx(u_int d) 1380 { 1381 uint64_t dx; 1382 1383 dx = ((uint64_t)d * machclk_freq) / 1000; 1384 return (dx); 1385 } 1386 1387 static u_int 1388 sm2m(uint64_t sm) 1389 { 1390 uint64_t m; 1391 1392 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1393 return ((u_int)m); 1394 } 1395 1396 static u_int 1397 dx2d(uint64_t dx) 1398 { 1399 uint64_t d; 1400 1401 d = dx * 1000 / machclk_freq; 1402 return ((u_int)d); 1403 } 1404 1405 static void 1406 sc2isc(struct service_curve *sc, struct internal_sc *isc) 1407 { 1408 isc->sm1 = m2sm(sc->m1); 1409 isc->ism1 = m2ism(sc->m1); 1410 isc->dx = d2dx(sc->d); 1411 isc->dy = seg_x2y(isc->dx, isc->sm1); 1412 isc->sm2 = m2sm(sc->m2); 1413 isc->ism2 = m2ism(sc->m2); 1414 } 1415 1416 /* 1417 * initialize the runtime service curve with the given internal 1418 * service curve starting at (x, y). 1419 */ 1420 static void 1421 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1422 { 1423 rtsc->x = x; 1424 rtsc->y = y; 1425 rtsc->sm1 = isc->sm1; 1426 rtsc->ism1 = isc->ism1; 1427 rtsc->dx = isc->dx; 1428 rtsc->dy = isc->dy; 1429 rtsc->sm2 = isc->sm2; 1430 rtsc->ism2 = isc->ism2; 1431 } 1432 1433 /* 1434 * calculate the y-projection of the runtime service curve by the 1435 * given x-projection value 1436 */ 1437 static uint64_t 1438 rtsc_y2x(struct runtime_sc *rtsc, uint64_t y) 1439 { 1440 uint64_t x; 1441 1442 if (y < rtsc->y) { 1443 x = rtsc->x; 1444 } else if (y <= rtsc->y + rtsc->dy) { 1445 /* x belongs to the 1st segment */ 1446 if (rtsc->dy == 0) 1447 x = rtsc->x + rtsc->dx; 1448 else 1449 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1450 } else { 1451 /* x belongs to the 2nd segment */ 1452 x = rtsc->x + rtsc->dx 1453 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1454 } 1455 return (x); 1456 } 1457 1458 static uint64_t 1459 rtsc_x2y(struct runtime_sc *rtsc, uint64_t x) 1460 { 1461 uint64_t y; 1462 1463 if (x <= rtsc->x) { 1464 y = rtsc->y; 1465 } else if (x <= rtsc->x + rtsc->dx) { 1466 /* y belongs to the 1st segment */ 1467 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1468 } else 1469 /* y belongs to the 2nd segment */ 1470 y = rtsc->y + rtsc->dy 1471 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1472 return (y); 1473 } 1474 1475 /* 1476 * update the runtime service curve by taking the minimum of the current 1477 * runtime service curve and the service curve starting at (x, y). 1478 */ 1479 static void 1480 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1481 { 1482 uint64_t y1, y2, dx, dy; 1483 1484 if (isc->sm1 <= isc->sm2) { 1485 /* service curve is convex */ 1486 y1 = rtsc_x2y(rtsc, x); 1487 if (y1 < y) 1488 /* the current rtsc is smaller */ 1489 return; 1490 rtsc->x = x; 1491 rtsc->y = y; 1492 return; 1493 } 1494 1495 /* 1496 * service curve is concave 1497 * compute the two y values of the current rtsc 1498 * y1: at x 1499 * y2: at (x + dx) 1500 */ 1501 y1 = rtsc_x2y(rtsc, x); 1502 if (y1 <= y) { 1503 /* rtsc is below isc, no change to rtsc */ 1504 return; 1505 } 1506 1507 y2 = rtsc_x2y(rtsc, x + isc->dx); 1508 if (y2 >= y + isc->dy) { 1509 /* rtsc is above isc, replace rtsc by isc */ 1510 rtsc->x = x; 1511 rtsc->y = y; 1512 rtsc->dx = isc->dx; 1513 rtsc->dy = isc->dy; 1514 return; 1515 } 1516 1517 /* 1518 * the two curves intersect 1519 * compute the offsets (dx, dy) using the reverse 1520 * function of seg_x2y() 1521 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1522 */ 1523 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1524 /* 1525 * check if (x, y1) belongs to the 1st segment of rtsc. 1526 * if so, add the offset. 1527 */ 1528 if (rtsc->x + rtsc->dx > x) 1529 dx += rtsc->x + rtsc->dx - x; 1530 dy = seg_x2y(dx, isc->sm1); 1531 1532 rtsc->x = x; 1533 rtsc->y = y; 1534 rtsc->dx = dx; 1535 rtsc->dy = dy; 1536 } 1537 1538 static void 1539 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl) 1540 { 1541 sp->class_id = cl->cl_id; 1542 sp->class_handle = cl->cl_handle; 1543 1544 if (cl->cl_rsc != NULL) { 1545 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1546 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1547 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1548 } else { 1549 sp->rsc.m1 = 0; 1550 sp->rsc.d = 0; 1551 sp->rsc.m2 = 0; 1552 } 1553 if (cl->cl_fsc != NULL) { 1554 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1555 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1556 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1557 } else { 1558 sp->fsc.m1 = 0; 1559 sp->fsc.d = 0; 1560 sp->fsc.m2 = 0; 1561 } 1562 if (cl->cl_usc != NULL) { 1563 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1564 sp->usc.d = dx2d(cl->cl_usc->dx); 1565 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1566 } else { 1567 sp->usc.m1 = 0; 1568 sp->usc.d = 0; 1569 sp->usc.m2 = 0; 1570 } 1571 1572 sp->total = cl->cl_total; 1573 sp->cumul = cl->cl_cumul; 1574 1575 sp->d = cl->cl_d; 1576 sp->e = cl->cl_e; 1577 sp->vt = cl->cl_vt; 1578 sp->f = cl->cl_f; 1579 1580 sp->initvt = cl->cl_initvt; 1581 sp->vtperiod = cl->cl_vtperiod; 1582 sp->parentperiod = cl->cl_parentperiod; 1583 sp->nactive = cl->cl_nactive; 1584 sp->vtoff = cl->cl_vtoff; 1585 sp->cvtmax = cl->cl_cvtmax; 1586 sp->myf = cl->cl_myf; 1587 sp->cfmin = cl->cl_cfmin; 1588 sp->cvtmin = cl->cl_cvtmin; 1589 sp->myfadj = cl->cl_myfadj; 1590 sp->vtadj = cl->cl_vtadj; 1591 1592 sp->cur_time = read_machclk(); 1593 sp->machclk_freq = machclk_freq; 1594 1595 sp->qlength = qlen(cl->cl_q); 1596 sp->qlimit = qlimit(cl->cl_q); 1597 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1598 sp->drop_cnt = cl->cl_stats.drop_cnt; 1599 sp->period = cl->cl_stats.period; 1600 1601 sp->qtype = qtype(cl->cl_q); 1602 #ifdef ALTQ_RED 1603 if (q_is_red(cl->cl_q)) 1604 red_getstats(cl->cl_red, &sp->red[0]); 1605 #endif 1606 #ifdef ALTQ_RIO 1607 if (q_is_rio(cl->cl_q)) 1608 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1609 #endif 1610 } 1611 1612 /* convert a class handle to the corresponding class pointer */ 1613 static struct hfsc_class * 1614 clh_to_clp(struct hfsc_if *hif, uint32_t chandle) 1615 { 1616 int i; 1617 struct hfsc_class *cl; 1618 1619 if (chandle == 0) 1620 return (NULL); 1621 /* 1622 * first, try optimistically the slot matching the lower bits of 1623 * the handle. if it fails, do the linear table search. 1624 */ 1625 i = chandle % HFSC_MAX_CLASSES; 1626 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1627 return (cl); 1628 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1629 if ((cl = hif->hif_class_tbl[i]) != NULL && 1630 cl->cl_handle == chandle) 1631 return (cl); 1632 return (NULL); 1633 } 1634 1635 #endif /* ALTQ_HFSC */ 1636