1 /* $KAME: altq_hfsc.c,v 1.25 2004/04/17 10:54:48 kjc Exp $ */ 2 /* $DragonFly: src/sys/net/altq/altq_hfsc.c,v 1.8 2006/12/22 23:44:55 swildner Exp $ */ 3 4 /* 5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 6 * 7 * Permission to use, copy, modify, and distribute this software and 8 * its documentation is hereby granted (including for commercial or 9 * for-profit use), provided that both the copyright notice and this 10 * permission notice appear in all copies of the software, derivative 11 * works, or modified versions, and any portions thereof. 12 * 13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * Carnegie Mellon encourages (but does not require) users of this 29 * software to return any improvements or extensions that they make, 30 * and to grant Carnegie Mellon the rights to redistribute these 31 * changes without encumbrance. 32 */ 33 /* 34 * H-FSC is described in Proceedings of SIGCOMM'97, 35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 36 * Real-Time and Priority Service" 37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 38 * 39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 40 * when a class has an upperlimit, the fit-time is computed from the 41 * upperlimit service curve. the link-sharing scheduler does not schedule 42 * a class whose fit-time exceeds the current time. 43 */ 44 45 #include "opt_altq.h" 46 #include "opt_inet.h" 47 #include "opt_inet6.h" 48 49 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 50 51 #include <sys/param.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/socket.h> 55 #include <sys/systm.h> 56 #include <sys/errno.h> 57 #include <sys/queue.h> 58 #include <sys/thread.h> 59 60 #include <net/if.h> 61 #include <net/ifq_var.h> 62 #include <netinet/in.h> 63 64 #include <net/pf/pfvar.h> 65 #include <net/altq/altq.h> 66 #include <net/altq/altq_hfsc.h> 67 68 #include <sys/thread2.h> 69 70 /* 71 * function prototypes 72 */ 73 static int hfsc_clear_interface(struct hfsc_if *); 74 static int hfsc_request(struct ifaltq *, int, void *); 75 static void hfsc_purge(struct hfsc_if *); 76 static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 77 struct service_curve *, 78 struct service_curve *, 79 struct service_curve *, 80 struct hfsc_class *, int, int, int); 81 static int hfsc_class_destroy(struct hfsc_class *); 82 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 83 static int hfsc_enqueue(struct ifaltq *, struct mbuf *, 84 struct altq_pktattr *); 85 static struct mbuf *hfsc_dequeue(struct ifaltq *, struct mbuf *, int); 86 87 static int hfsc_addq(struct hfsc_class *, struct mbuf *); 88 static struct mbuf *hfsc_getq(struct hfsc_class *); 89 static struct mbuf *hfsc_pollq(struct hfsc_class *); 90 static void hfsc_purgeq(struct hfsc_class *); 91 92 static void update_cfmin(struct hfsc_class *); 93 static void set_active(struct hfsc_class *, int); 94 static void set_passive(struct hfsc_class *); 95 96 static void init_ed(struct hfsc_class *, int); 97 static void update_ed(struct hfsc_class *, int); 98 static void update_d(struct hfsc_class *, int); 99 static void init_vf(struct hfsc_class *, int); 100 static void update_vf(struct hfsc_class *, int, uint64_t); 101 static ellist_t *ellist_alloc(void); 102 static void ellist_destroy(ellist_t *); 103 static void ellist_insert(struct hfsc_class *); 104 static void ellist_remove(struct hfsc_class *); 105 static void ellist_update(struct hfsc_class *); 106 struct hfsc_class *ellist_get_mindl(ellist_t *, uint64_t); 107 static actlist_t *actlist_alloc(void); 108 static void actlist_destroy(actlist_t *); 109 static void actlist_insert(struct hfsc_class *); 110 static void actlist_remove(struct hfsc_class *); 111 static void actlist_update(struct hfsc_class *); 112 113 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, uint64_t); 114 115 static __inline uint64_t seg_x2y(uint64_t, uint64_t); 116 static __inline uint64_t seg_y2x(uint64_t, uint64_t); 117 static __inline uint64_t m2sm(u_int); 118 static __inline uint64_t m2ism(u_int); 119 static __inline uint64_t d2dx(u_int); 120 static u_int sm2m(uint64_t); 121 static u_int dx2d(uint64_t); 122 123 static void sc2isc(struct service_curve *, struct internal_sc *); 124 static void rtsc_init(struct runtime_sc *, struct internal_sc *, 125 uint64_t, uint64_t); 126 static uint64_t rtsc_y2x(struct runtime_sc *, uint64_t); 127 static uint64_t rtsc_x2y(struct runtime_sc *, uint64_t); 128 static void rtsc_min(struct runtime_sc *, struct internal_sc *, 129 uint64_t, uint64_t); 130 131 static void get_class_stats(struct hfsc_classstats *, struct hfsc_class *); 132 static struct hfsc_class *clh_to_clp(struct hfsc_if *, uint32_t); 133 134 /* 135 * macros 136 */ 137 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 138 139 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */ 140 141 int 142 hfsc_pfattach(struct pf_altq *a) 143 { 144 struct ifnet *ifp; 145 int error; 146 147 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL) 148 return (EINVAL); 149 crit_enter(); 150 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc, 151 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL); 152 crit_exit(); 153 return (error); 154 } 155 156 int 157 hfsc_add_altq(struct pf_altq *a) 158 { 159 struct hfsc_if *hif; 160 struct ifnet *ifp; 161 162 if ((ifp = ifunit(a->ifname)) == NULL) 163 return (EINVAL); 164 if (!ifq_is_ready(&ifp->if_snd)) 165 return (ENODEV); 166 167 hif = kmalloc(sizeof(struct hfsc_if), M_ALTQ, M_WAITOK | M_ZERO); 168 169 hif->hif_eligible = ellist_alloc(); 170 hif->hif_ifq = &ifp->if_snd; 171 ifq_purge(&ifp->if_snd); 172 173 /* keep the state in pf_altq */ 174 a->altq_disc = hif; 175 176 return (0); 177 } 178 179 int 180 hfsc_remove_altq(struct pf_altq *a) 181 { 182 struct hfsc_if *hif; 183 184 if ((hif = a->altq_disc) == NULL) 185 return (EINVAL); 186 a->altq_disc = NULL; 187 188 hfsc_clear_interface(hif); 189 hfsc_class_destroy(hif->hif_rootclass); 190 191 ellist_destroy(hif->hif_eligible); 192 193 kfree(hif, M_ALTQ); 194 195 return (0); 196 } 197 198 int 199 hfsc_add_queue(struct pf_altq *a) 200 { 201 struct hfsc_if *hif; 202 struct hfsc_class *cl, *parent; 203 struct hfsc_opts *opts; 204 struct service_curve rtsc, lssc, ulsc; 205 206 if ((hif = a->altq_disc) == NULL) 207 return (EINVAL); 208 209 opts = &a->pq_u.hfsc_opts; 210 211 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL) 212 parent = NULL; 213 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 214 return (EINVAL); 215 216 if (a->qid == 0) 217 return (EINVAL); 218 219 if (clh_to_clp(hif, a->qid) != NULL) 220 return (EBUSY); 221 222 rtsc.m1 = opts->rtsc_m1; 223 rtsc.d = opts->rtsc_d; 224 rtsc.m2 = opts->rtsc_m2; 225 lssc.m1 = opts->lssc_m1; 226 lssc.d = opts->lssc_d; 227 lssc.m2 = opts->lssc_m2; 228 ulsc.m1 = opts->ulsc_m1; 229 ulsc.d = opts->ulsc_d; 230 ulsc.m2 = opts->ulsc_m2; 231 232 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, parent, a->qlimit, 233 opts->flags, a->qid); 234 if (cl == NULL) 235 return (ENOMEM); 236 237 return (0); 238 } 239 240 int 241 hfsc_remove_queue(struct pf_altq *a) 242 { 243 struct hfsc_if *hif; 244 struct hfsc_class *cl; 245 246 if ((hif = a->altq_disc) == NULL) 247 return (EINVAL); 248 249 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 250 return (EINVAL); 251 252 return (hfsc_class_destroy(cl)); 253 } 254 255 int 256 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) 257 { 258 struct hfsc_if *hif; 259 struct hfsc_class *cl; 260 struct hfsc_classstats stats; 261 int error = 0; 262 263 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 264 return (EBADF); 265 266 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 267 return (EINVAL); 268 269 if (*nbytes < sizeof(stats)) 270 return (EINVAL); 271 272 get_class_stats(&stats, cl); 273 274 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) 275 return (error); 276 *nbytes = sizeof(stats); 277 return (0); 278 } 279 280 /* 281 * bring the interface back to the initial state by discarding 282 * all the filters and classes except the root class. 283 */ 284 static int 285 hfsc_clear_interface(struct hfsc_if *hif) 286 { 287 struct hfsc_class *cl; 288 289 if (hif->hif_rootclass == NULL) 290 return (0); 291 292 293 /* clear out the classes */ 294 while ((cl = hif->hif_rootclass->cl_children) != NULL) { 295 /* 296 * remove the first leaf class found in the hierarchy 297 * then start over 298 */ 299 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 300 if (!is_a_parent_class(cl)) { 301 hfsc_class_destroy(cl); 302 break; 303 } 304 } 305 } 306 307 return (0); 308 } 309 310 static int 311 hfsc_request(struct ifaltq *ifq, int req, void *arg) 312 { 313 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 314 315 crit_enter(); 316 switch (req) { 317 case ALTRQ_PURGE: 318 hfsc_purge(hif); 319 break; 320 } 321 crit_exit(); 322 return (0); 323 } 324 325 /* discard all the queued packets on the interface */ 326 static void 327 hfsc_purge(struct hfsc_if *hif) 328 { 329 struct hfsc_class *cl; 330 331 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) { 332 if (!qempty(cl->cl_q)) 333 hfsc_purgeq(cl); 334 } 335 if (ifq_is_enabled(hif->hif_ifq)) 336 hif->hif_ifq->ifq_len = 0; 337 } 338 339 struct hfsc_class * 340 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 341 struct service_curve *fsc, struct service_curve *usc, 342 struct hfsc_class *parent, int qlimit, int flags, int qid) 343 { 344 struct hfsc_class *cl, *p; 345 int i; 346 347 if (hif->hif_classes >= HFSC_MAX_CLASSES) 348 return (NULL); 349 350 #ifndef ALTQ_RED 351 if (flags & HFCF_RED) { 352 #ifdef ALTQ_DEBUG 353 kprintf("hfsc_class_create: RED not configured for HFSC!\n"); 354 #endif 355 return (NULL); 356 } 357 #endif 358 359 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO); 360 cl->cl_q = kmalloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO); 361 cl->cl_actc = actlist_alloc(); 362 363 if (qlimit == 0) 364 qlimit = 50; /* use default */ 365 qlimit(cl->cl_q) = qlimit; 366 qtype(cl->cl_q) = Q_DROPTAIL; 367 qlen(cl->cl_q) = 0; 368 cl->cl_flags = flags; 369 #ifdef ALTQ_RED 370 if (flags & (HFCF_RED|HFCF_RIO)) { 371 int red_flags, red_pkttime; 372 u_int m2; 373 374 m2 = 0; 375 if (rsc != NULL && rsc->m2 > m2) 376 m2 = rsc->m2; 377 if (fsc != NULL && fsc->m2 > m2) 378 m2 = fsc->m2; 379 if (usc != NULL && usc->m2 > m2) 380 m2 = usc->m2; 381 382 red_flags = 0; 383 if (flags & HFCF_ECN) 384 red_flags |= REDF_ECN; 385 #ifdef ALTQ_RIO 386 if (flags & HFCF_CLEARDSCP) 387 red_flags |= RIOF_CLEARDSCP; 388 #endif 389 if (m2 < 8) 390 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 391 else 392 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 393 * 1000 * 1000 * 1000 / (m2 / 8); 394 if (flags & HFCF_RED) { 395 cl->cl_red = red_alloc(0, 0, 396 qlimit(cl->cl_q) * 10/100, 397 qlimit(cl->cl_q) * 30/100, 398 red_flags, red_pkttime); 399 if (cl->cl_red != NULL) 400 qtype(cl->cl_q) = Q_RED; 401 } 402 #ifdef ALTQ_RIO 403 else { 404 cl->cl_red = (red_t *)rio_alloc(0, NULL, 405 red_flags, red_pkttime); 406 if (cl->cl_red != NULL) 407 qtype(cl->cl_q) = Q_RIO; 408 } 409 #endif 410 } 411 #endif /* ALTQ_RED */ 412 413 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 414 cl->cl_rsc = kmalloc(sizeof(*cl->cl_rsc), M_ALTQ, M_WAITOK); 415 sc2isc(rsc, cl->cl_rsc); 416 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 417 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 418 } 419 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 420 cl->cl_fsc = kmalloc(sizeof(*cl->cl_fsc), M_ALTQ, M_WAITOK); 421 if (cl->cl_fsc == NULL) 422 goto err_ret; 423 sc2isc(fsc, cl->cl_fsc); 424 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 425 } 426 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 427 cl->cl_usc = kmalloc(sizeof(*cl->cl_usc), M_ALTQ, M_WAITOK); 428 if (cl->cl_usc == NULL) 429 goto err_ret; 430 sc2isc(usc, cl->cl_usc); 431 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 432 } 433 434 cl->cl_id = hif->hif_classid++; 435 cl->cl_handle = qid; 436 cl->cl_hif = hif; 437 cl->cl_parent = parent; 438 439 crit_enter(); 440 hif->hif_classes++; 441 442 /* 443 * find a free slot in the class table. if the slot matching 444 * the lower bits of qid is free, use this slot. otherwise, 445 * use the first free slot. 446 */ 447 i = qid % HFSC_MAX_CLASSES; 448 if (hif->hif_class_tbl[i] == NULL) 449 hif->hif_class_tbl[i] = cl; 450 else { 451 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 452 if (hif->hif_class_tbl[i] == NULL) { 453 hif->hif_class_tbl[i] = cl; 454 break; 455 } 456 } 457 if (i == HFSC_MAX_CLASSES) { 458 crit_exit(); 459 goto err_ret; 460 } 461 } 462 463 if (flags & HFCF_DEFAULTCLASS) 464 hif->hif_defaultclass = cl; 465 466 if (parent == NULL) { 467 /* this is root class */ 468 hif->hif_rootclass = cl; 469 } else if (parent->cl_children == NULL) { 470 /* add this class to the children list of the parent */ 471 parent->cl_children = cl; 472 } else { 473 p = parent->cl_children; 474 while (p->cl_siblings != NULL) 475 p = p->cl_siblings; 476 p->cl_siblings = cl; 477 } 478 crit_exit(); 479 480 return (cl); 481 482 err_ret: 483 if (cl->cl_actc != NULL) 484 actlist_destroy(cl->cl_actc); 485 if (cl->cl_red != NULL) { 486 #ifdef ALTQ_RIO 487 if (q_is_rio(cl->cl_q)) 488 rio_destroy((rio_t *)cl->cl_red); 489 #endif 490 #ifdef ALTQ_RED 491 if (q_is_red(cl->cl_q)) 492 red_destroy(cl->cl_red); 493 #endif 494 } 495 if (cl->cl_fsc != NULL) 496 kfree(cl->cl_fsc, M_ALTQ); 497 if (cl->cl_rsc != NULL) 498 kfree(cl->cl_rsc, M_ALTQ); 499 if (cl->cl_usc != NULL) 500 kfree(cl->cl_usc, M_ALTQ); 501 if (cl->cl_q != NULL) 502 kfree(cl->cl_q, M_ALTQ); 503 kfree(cl, M_ALTQ); 504 return (NULL); 505 } 506 507 static int 508 hfsc_class_destroy(struct hfsc_class *cl) 509 { 510 int i; 511 512 if (cl == NULL) 513 return (0); 514 515 if (is_a_parent_class(cl)) 516 return (EBUSY); 517 518 crit_enter(); 519 520 if (!qempty(cl->cl_q)) 521 hfsc_purgeq(cl); 522 523 if (cl->cl_parent == NULL) { 524 /* this is root class */ 525 } else { 526 struct hfsc_class *p = cl->cl_parent->cl_children; 527 528 if (p == cl) { 529 cl->cl_parent->cl_children = cl->cl_siblings; 530 } else { 531 do { 532 if (p->cl_siblings == cl) { 533 p->cl_siblings = cl->cl_siblings; 534 break; 535 } 536 } while ((p = p->cl_siblings) != NULL); 537 } 538 KKASSERT(p != NULL); 539 } 540 541 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 542 if (cl->cl_hif->hif_class_tbl[i] == cl) { 543 cl->cl_hif->hif_class_tbl[i] = NULL; 544 break; 545 } 546 } 547 548 cl->cl_hif->hif_classes--; 549 crit_exit(); 550 551 actlist_destroy(cl->cl_actc); 552 553 if (cl->cl_red != NULL) { 554 #ifdef ALTQ_RIO 555 if (q_is_rio(cl->cl_q)) 556 rio_destroy((rio_t *)cl->cl_red); 557 #endif 558 #ifdef ALTQ_RED 559 if (q_is_red(cl->cl_q)) 560 red_destroy(cl->cl_red); 561 #endif 562 } 563 564 if (cl == cl->cl_hif->hif_rootclass) 565 cl->cl_hif->hif_rootclass = NULL; 566 if (cl == cl->cl_hif->hif_defaultclass) 567 cl->cl_hif->hif_defaultclass = NULL; 568 569 if (cl->cl_usc != NULL) 570 kfree(cl->cl_usc, M_ALTQ); 571 if (cl->cl_fsc != NULL) 572 kfree(cl->cl_fsc, M_ALTQ); 573 if (cl->cl_rsc != NULL) 574 kfree(cl->cl_rsc, M_ALTQ); 575 kfree(cl->cl_q, M_ALTQ); 576 kfree(cl, M_ALTQ); 577 578 return (0); 579 } 580 581 /* 582 * hfsc_nextclass returns the next class in the tree. 583 * usage: 584 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 585 * do_something; 586 */ 587 static struct hfsc_class * 588 hfsc_nextclass(struct hfsc_class *cl) 589 { 590 if (cl->cl_children != NULL) { 591 cl = cl->cl_children; 592 } else if (cl->cl_siblings != NULL) { 593 cl = cl->cl_siblings; 594 } else { 595 while ((cl = cl->cl_parent) != NULL) { 596 if (cl->cl_siblings != NULL) { 597 cl = cl->cl_siblings; 598 break; 599 } 600 } 601 } 602 603 return (cl); 604 } 605 606 /* 607 * hfsc_enqueue is an enqueue function to be registered to 608 * (*altq_enqueue) in struct ifaltq. 609 */ 610 static int 611 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr) 612 { 613 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 614 struct hfsc_class *cl; 615 int len; 616 617 /* grab class set by classifier */ 618 if ((m->m_flags & M_PKTHDR) == 0) { 619 /* should not happen */ 620 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n"); 621 m_freem(m); 622 return (ENOBUFS); 623 } 624 crit_enter(); 625 if (m->m_pkthdr.fw_flags & ALTQ_MBUF_TAGGED) 626 cl = clh_to_clp(hif, m->m_pkthdr.altq_qid); 627 else 628 cl = NULL; 629 if (cl == NULL || is_a_parent_class(cl)) { 630 cl = hif->hif_defaultclass; 631 if (cl == NULL) { 632 m_freem(m); 633 crit_exit(); 634 return (ENOBUFS); 635 } 636 } 637 cl->cl_pktattr = NULL; 638 len = m_pktlen(m); 639 if (hfsc_addq(cl, m) != 0) { 640 /* drop occurred. mbuf was freed in hfsc_addq. */ 641 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 642 crit_exit(); 643 return (ENOBUFS); 644 } 645 ifq->ifq_len++; 646 cl->cl_hif->hif_packets++; 647 648 /* successfully queued. */ 649 if (qlen(cl->cl_q) == 1) 650 set_active(cl, m_pktlen(m)); 651 crit_exit(); 652 return (0); 653 } 654 655 /* 656 * hfsc_dequeue is a dequeue function to be registered to 657 * (*altq_dequeue) in struct ifaltq. 658 * 659 * note: ALTDQ_POLL returns the next packet without removing the packet 660 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 661 * ALTDQ_REMOVE must return the same packet if called immediately 662 * after ALTDQ_POLL. 663 */ 664 static struct mbuf * 665 hfsc_dequeue(struct ifaltq *ifq, struct mbuf *mpolled, int op) 666 { 667 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 668 struct hfsc_class *cl; 669 struct mbuf *m; 670 int len, next_len; 671 int realtime = 0; 672 uint64_t cur_time; 673 674 if (hif->hif_packets == 0) { 675 /* no packet in the tree */ 676 return (NULL); 677 } 678 679 crit_enter(); 680 cur_time = read_machclk(); 681 682 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 683 cl = hif->hif_pollcache; 684 hif->hif_pollcache = NULL; 685 /* check if the class was scheduled by real-time criteria */ 686 if (cl->cl_rsc != NULL) 687 realtime = (cl->cl_e <= cur_time); 688 } else { 689 /* 690 * if there are eligible classes, use real-time criteria. 691 * find the class with the minimum deadline among 692 * the eligible classes. 693 */ 694 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time)) != NULL) { 695 realtime = 1; 696 } else { 697 #ifdef ALTQ_DEBUG 698 int fits = 0; 699 #endif 700 /* 701 * use link-sharing criteria 702 * get the class with the minimum vt in the hierarchy 703 */ 704 cl = hif->hif_rootclass; 705 while (is_a_parent_class(cl)) { 706 707 cl = actlist_firstfit(cl, cur_time); 708 if (cl == NULL) { 709 #ifdef ALTQ_DEBUG 710 if (fits > 0) 711 kprintf("%d fit but none found\n",fits); 712 #endif 713 m = NULL; 714 goto done; 715 } 716 /* 717 * update parent's cl_cvtmin. 718 * don't update if the new vt is smaller. 719 */ 720 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 721 cl->cl_parent->cl_cvtmin = cl->cl_vt; 722 #ifdef ALTQ_DEBUG 723 fits++; 724 #endif 725 } 726 } 727 728 if (op == ALTDQ_POLL) { 729 hif->hif_pollcache = cl; 730 m = hfsc_pollq(cl); 731 goto done; 732 } 733 } 734 735 m = hfsc_getq(cl); 736 if (m == NULL) 737 panic("hfsc_dequeue:"); 738 len = m_pktlen(m); 739 cl->cl_hif->hif_packets--; 740 ifq->ifq_len--; 741 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 742 743 update_vf(cl, len, cur_time); 744 if (realtime) 745 cl->cl_cumul += len; 746 747 if (!qempty(cl->cl_q)) { 748 if (cl->cl_rsc != NULL) { 749 /* update ed */ 750 next_len = m_pktlen(qhead(cl->cl_q)); 751 752 if (realtime) 753 update_ed(cl, next_len); 754 else 755 update_d(cl, next_len); 756 } 757 } else { 758 /* the class becomes passive */ 759 set_passive(cl); 760 } 761 done: 762 crit_exit(); 763 KKASSERT(mpolled == NULL || m == mpolled); 764 return (m); 765 } 766 767 static int 768 hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 769 { 770 771 #ifdef ALTQ_RIO 772 if (q_is_rio(cl->cl_q)) 773 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 774 m, cl->cl_pktattr); 775 #endif 776 #ifdef ALTQ_RED 777 if (q_is_red(cl->cl_q)) 778 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 779 #endif 780 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 781 m_freem(m); 782 return (-1); 783 } 784 785 if (cl->cl_flags & HFCF_CLEARDSCP) 786 write_dsfield(m, cl->cl_pktattr, 0); 787 788 _addq(cl->cl_q, m); 789 790 return (0); 791 } 792 793 static struct mbuf * 794 hfsc_getq(struct hfsc_class *cl) 795 { 796 #ifdef ALTQ_RIO 797 if (q_is_rio(cl->cl_q)) 798 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 799 #endif 800 #ifdef ALTQ_RED 801 if (q_is_red(cl->cl_q)) 802 return red_getq(cl->cl_red, cl->cl_q); 803 #endif 804 return _getq(cl->cl_q); 805 } 806 807 static struct mbuf * 808 hfsc_pollq(struct hfsc_class *cl) 809 { 810 return qhead(cl->cl_q); 811 } 812 813 static void 814 hfsc_purgeq(struct hfsc_class *cl) 815 { 816 struct mbuf *m; 817 818 if (qempty(cl->cl_q)) 819 return; 820 821 while ((m = _getq(cl->cl_q)) != NULL) { 822 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 823 m_freem(m); 824 cl->cl_hif->hif_packets--; 825 cl->cl_hif->hif_ifq->ifq_len--; 826 } 827 KKASSERT(qlen(cl->cl_q) == 0); 828 829 update_vf(cl, 0, 0); /* remove cl from the actlist */ 830 set_passive(cl); 831 } 832 833 static void 834 set_active(struct hfsc_class *cl, int len) 835 { 836 if (cl->cl_rsc != NULL) 837 init_ed(cl, len); 838 if (cl->cl_fsc != NULL) 839 init_vf(cl, len); 840 841 cl->cl_stats.period++; 842 } 843 844 static void 845 set_passive(struct hfsc_class *cl) 846 { 847 if (cl->cl_rsc != NULL) 848 ellist_remove(cl); 849 850 /* 851 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 852 * needs to be called explicitly to remove a class from actlist 853 */ 854 } 855 856 static void 857 init_ed(struct hfsc_class *cl, int next_len) 858 { 859 uint64_t cur_time; 860 861 cur_time = read_machclk(); 862 863 /* update the deadline curve */ 864 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 865 866 /* 867 * update the eligible curve. 868 * for concave, it is equal to the deadline curve. 869 * for convex, it is a linear curve with slope m2. 870 */ 871 cl->cl_eligible = cl->cl_deadline; 872 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 873 cl->cl_eligible.dx = 0; 874 cl->cl_eligible.dy = 0; 875 } 876 877 /* compute e and d */ 878 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 879 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 880 881 ellist_insert(cl); 882 } 883 884 static void 885 update_ed(struct hfsc_class *cl, int next_len) 886 { 887 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 888 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 889 890 ellist_update(cl); 891 } 892 893 static void 894 update_d(struct hfsc_class *cl, int next_len) 895 { 896 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 897 } 898 899 static void 900 init_vf(struct hfsc_class *cl, int len) 901 { 902 struct hfsc_class *max_cl, *p; 903 uint64_t vt, f, cur_time; 904 int go_active; 905 906 cur_time = 0; 907 go_active = 1; 908 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 909 if (go_active && cl->cl_nactive++ == 0) 910 go_active = 1; 911 else 912 go_active = 0; 913 914 if (go_active) { 915 max_cl = actlist_last(cl->cl_parent->cl_actc); 916 if (max_cl != NULL) { 917 /* 918 * set vt to the average of the min and max 919 * classes. if the parent's period didn't 920 * change, don't decrease vt of the class. 921 */ 922 vt = max_cl->cl_vt; 923 if (cl->cl_parent->cl_cvtmin != 0) 924 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 925 926 if (cl->cl_parent->cl_vtperiod != 927 cl->cl_parentperiod || vt > cl->cl_vt) 928 cl->cl_vt = vt; 929 } else { 930 /* 931 * first child for a new parent backlog period. 932 * add parent's cvtmax to vtoff of children 933 * to make a new vt (vtoff + vt) larger than 934 * the vt in the last period for all children. 935 */ 936 vt = cl->cl_parent->cl_cvtmax; 937 for (p = cl->cl_parent->cl_children; p != NULL; 938 p = p->cl_siblings) 939 p->cl_vtoff += vt; 940 cl->cl_vt = 0; 941 cl->cl_parent->cl_cvtmax = 0; 942 cl->cl_parent->cl_cvtmin = 0; 943 } 944 cl->cl_initvt = cl->cl_vt; 945 946 /* update the virtual curve */ 947 vt = cl->cl_vt + cl->cl_vtoff; 948 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 949 if (cl->cl_virtual.x == vt) { 950 cl->cl_virtual.x -= cl->cl_vtoff; 951 cl->cl_vtoff = 0; 952 } 953 cl->cl_vtadj = 0; 954 955 cl->cl_vtperiod++; /* increment vt period */ 956 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 957 if (cl->cl_parent->cl_nactive == 0) 958 cl->cl_parentperiod++; 959 cl->cl_f = 0; 960 961 actlist_insert(cl); 962 963 if (cl->cl_usc != NULL) { 964 /* class has upper limit curve */ 965 if (cur_time == 0) 966 cur_time = read_machclk(); 967 968 /* update the ulimit curve */ 969 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 970 cl->cl_total); 971 /* compute myf */ 972 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 973 cl->cl_total); 974 cl->cl_myfadj = 0; 975 } 976 } 977 978 if (cl->cl_myf > cl->cl_cfmin) 979 f = cl->cl_myf; 980 else 981 f = cl->cl_cfmin; 982 if (f != cl->cl_f) { 983 cl->cl_f = f; 984 update_cfmin(cl->cl_parent); 985 } 986 } 987 } 988 989 static void 990 update_vf(struct hfsc_class *cl, int len, uint64_t cur_time) 991 { 992 uint64_t f, myf_bound, delta; 993 int go_passive; 994 995 go_passive = qempty(cl->cl_q); 996 997 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 998 cl->cl_total += len; 999 1000 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 1001 continue; 1002 1003 if (go_passive && --cl->cl_nactive == 0) 1004 go_passive = 1; 1005 else 1006 go_passive = 0; 1007 1008 if (go_passive) { 1009 /* no more active child, going passive */ 1010 1011 /* update cvtmax of the parent class */ 1012 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1013 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1014 1015 /* remove this class from the vt list */ 1016 actlist_remove(cl); 1017 1018 update_cfmin(cl->cl_parent); 1019 1020 continue; 1021 } 1022 1023 /* 1024 * update vt and f 1025 */ 1026 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1027 - cl->cl_vtoff + cl->cl_vtadj; 1028 1029 /* 1030 * if vt of the class is smaller than cvtmin, 1031 * the class was skipped in the past due to non-fit. 1032 * if so, we need to adjust vtadj. 1033 */ 1034 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1035 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1036 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1037 } 1038 1039 /* update the vt list */ 1040 actlist_update(cl); 1041 1042 if (cl->cl_usc != NULL) { 1043 cl->cl_myf = cl->cl_myfadj 1044 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1045 1046 /* 1047 * if myf lags behind by more than one clock tick 1048 * from the current time, adjust myfadj to prevent 1049 * a rate-limited class from going greedy. 1050 * in a steady state under rate-limiting, myf 1051 * fluctuates within one clock tick. 1052 */ 1053 myf_bound = cur_time - machclk_per_tick; 1054 if (cl->cl_myf < myf_bound) { 1055 delta = cur_time - cl->cl_myf; 1056 cl->cl_myfadj += delta; 1057 cl->cl_myf += delta; 1058 } 1059 } 1060 1061 /* cl_f is max(cl_myf, cl_cfmin) */ 1062 if (cl->cl_myf > cl->cl_cfmin) 1063 f = cl->cl_myf; 1064 else 1065 f = cl->cl_cfmin; 1066 if (f != cl->cl_f) { 1067 cl->cl_f = f; 1068 update_cfmin(cl->cl_parent); 1069 } 1070 } 1071 } 1072 1073 static void 1074 update_cfmin(struct hfsc_class *cl) 1075 { 1076 struct hfsc_class *p; 1077 uint64_t cfmin; 1078 1079 if (TAILQ_EMPTY(cl->cl_actc)) { 1080 cl->cl_cfmin = 0; 1081 return; 1082 } 1083 cfmin = HT_INFINITY; 1084 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1085 if (p->cl_f == 0) { 1086 cl->cl_cfmin = 0; 1087 return; 1088 } 1089 if (p->cl_f < cfmin) 1090 cfmin = p->cl_f; 1091 } 1092 cl->cl_cfmin = cfmin; 1093 } 1094 1095 /* 1096 * TAILQ based ellist and actlist implementation 1097 * (ion wanted to make a calendar queue based implementation) 1098 */ 1099 /* 1100 * eligible list holds backlogged classes being sorted by their eligible times. 1101 * there is one eligible list per interface. 1102 */ 1103 1104 static ellist_t * 1105 ellist_alloc(void) 1106 { 1107 ellist_t *head; 1108 1109 head = kmalloc(sizeof(ellist_t *), M_ALTQ, M_WAITOK); 1110 TAILQ_INIT(head); 1111 return (head); 1112 } 1113 1114 static void 1115 ellist_destroy(ellist_t *head) 1116 { 1117 kfree(head, M_ALTQ); 1118 } 1119 1120 static void 1121 ellist_insert(struct hfsc_class *cl) 1122 { 1123 struct hfsc_if *hif = cl->cl_hif; 1124 struct hfsc_class *p; 1125 1126 /* check the last entry first */ 1127 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL || 1128 p->cl_e <= cl->cl_e) { 1129 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1130 return; 1131 } 1132 1133 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) { 1134 if (cl->cl_e < p->cl_e) { 1135 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1136 return; 1137 } 1138 } 1139 KKASSERT(0); /* should not reach here */ 1140 } 1141 1142 static void 1143 ellist_remove(struct hfsc_class *cl) 1144 { 1145 struct hfsc_if *hif = cl->cl_hif; 1146 1147 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1148 } 1149 1150 static void 1151 ellist_update(struct hfsc_class *cl) 1152 { 1153 struct hfsc_if *hif = cl->cl_hif; 1154 struct hfsc_class *p, *last; 1155 1156 /* 1157 * the eligible time of a class increases monotonically. 1158 * if the next entry has a larger eligible time, nothing to do. 1159 */ 1160 p = TAILQ_NEXT(cl, cl_ellist); 1161 if (p == NULL || cl->cl_e <= p->cl_e) 1162 return; 1163 1164 /* check the last entry */ 1165 last = TAILQ_LAST(hif->hif_eligible, _eligible); 1166 KKASSERT(last != NULL); 1167 if (last->cl_e <= cl->cl_e) { 1168 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1169 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1170 return; 1171 } 1172 1173 /* 1174 * the new position must be between the next entry 1175 * and the last entry 1176 */ 1177 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1178 if (cl->cl_e < p->cl_e) { 1179 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1180 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1181 return; 1182 } 1183 } 1184 KKASSERT(0); /* should not reach here */ 1185 } 1186 1187 /* find the class with the minimum deadline among the eligible classes */ 1188 struct hfsc_class * 1189 ellist_get_mindl(ellist_t *head, uint64_t cur_time) 1190 { 1191 struct hfsc_class *p, *cl = NULL; 1192 1193 TAILQ_FOREACH(p, head, cl_ellist) { 1194 if (p->cl_e > cur_time) 1195 break; 1196 if (cl == NULL || p->cl_d < cl->cl_d) 1197 cl = p; 1198 } 1199 return (cl); 1200 } 1201 1202 /* 1203 * active children list holds backlogged child classes being sorted 1204 * by their virtual time. 1205 * each intermediate class has one active children list. 1206 */ 1207 static actlist_t * 1208 actlist_alloc(void) 1209 { 1210 actlist_t *head; 1211 1212 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK); 1213 TAILQ_INIT(head); 1214 return (head); 1215 } 1216 1217 static void 1218 actlist_destroy(actlist_t *head) 1219 { 1220 kfree(head, M_ALTQ); 1221 } 1222 static void 1223 actlist_insert(struct hfsc_class *cl) 1224 { 1225 struct hfsc_class *p; 1226 1227 /* check the last entry first */ 1228 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL 1229 || p->cl_vt <= cl->cl_vt) { 1230 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1231 return; 1232 } 1233 1234 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) { 1235 if (cl->cl_vt < p->cl_vt) { 1236 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1237 return; 1238 } 1239 } 1240 KKASSERT(0); /* should not reach here */ 1241 } 1242 1243 static void 1244 actlist_remove(struct hfsc_class *cl) 1245 { 1246 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1247 } 1248 1249 static void 1250 actlist_update(struct hfsc_class *cl) 1251 { 1252 struct hfsc_class *p, *last; 1253 1254 /* 1255 * the virtual time of a class increases monotonically during its 1256 * backlogged period. 1257 * if the next entry has a larger virtual time, nothing to do. 1258 */ 1259 p = TAILQ_NEXT(cl, cl_actlist); 1260 if (p == NULL || cl->cl_vt < p->cl_vt) 1261 return; 1262 1263 /* check the last entry */ 1264 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active); 1265 KKASSERT(last != NULL); 1266 if (last->cl_vt <= cl->cl_vt) { 1267 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1268 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1269 return; 1270 } 1271 1272 /* 1273 * the new position must be between the next entry 1274 * and the last entry 1275 */ 1276 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1277 if (cl->cl_vt < p->cl_vt) { 1278 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1279 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1280 return; 1281 } 1282 } 1283 KKASSERT(0); /* should not reach here */ 1284 } 1285 1286 static struct hfsc_class * 1287 actlist_firstfit(struct hfsc_class *cl, uint64_t cur_time) 1288 { 1289 struct hfsc_class *p; 1290 1291 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1292 if (p->cl_f <= cur_time) 1293 return (p); 1294 } 1295 return (NULL); 1296 } 1297 1298 /* 1299 * service curve support functions 1300 * 1301 * external service curve parameters 1302 * m: bits/sec 1303 * d: msec 1304 * internal service curve parameters 1305 * sm: (bytes/tsc_interval) << SM_SHIFT 1306 * ism: (tsc_count/byte) << ISM_SHIFT 1307 * dx: tsc_count 1308 * 1309 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. 1310 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU 1311 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective 1312 * digits in decimal using the following table. 1313 * 1314 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 1315 * ----------+------------------------------------------------------- 1316 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6 1317 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6 1318 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6 1319 * 1320 * nsec/byte 80000 8000 800 80 8 1321 * ism(500MHz) 40000 4000 400 40 4 1322 * ism(200MHz) 16000 1600 160 16 1.6 1323 */ 1324 #define SM_SHIFT 24 1325 #define ISM_SHIFT 10 1326 1327 #define SM_MASK ((1LL << SM_SHIFT) - 1) 1328 #define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1329 1330 static __inline uint64_t 1331 seg_x2y(uint64_t x, uint64_t sm) 1332 { 1333 uint64_t y; 1334 1335 /* 1336 * compute 1337 * y = x * sm >> SM_SHIFT 1338 * but divide it for the upper and lower bits to avoid overflow 1339 */ 1340 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1341 return (y); 1342 } 1343 1344 static __inline uint64_t 1345 seg_y2x(uint64_t y, uint64_t ism) 1346 { 1347 uint64_t x; 1348 1349 if (y == 0) 1350 x = 0; 1351 else if (ism == HT_INFINITY) 1352 x = HT_INFINITY; 1353 else 1354 x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1355 1356 return (x); 1357 } 1358 1359 static __inline uint64_t 1360 m2sm(u_int m) 1361 { 1362 uint64_t sm; 1363 1364 sm = ((uint64_t)m << SM_SHIFT) / 8 / machclk_freq; 1365 return (sm); 1366 } 1367 1368 static __inline uint64_t 1369 m2ism(u_int m) 1370 { 1371 uint64_t ism; 1372 1373 if (m == 0) 1374 ism = HT_INFINITY; 1375 else 1376 ism = ((uint64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1377 return (ism); 1378 } 1379 1380 static __inline uint64_t 1381 d2dx(u_int d) 1382 { 1383 uint64_t dx; 1384 1385 dx = ((uint64_t)d * machclk_freq) / 1000; 1386 return (dx); 1387 } 1388 1389 static u_int 1390 sm2m(uint64_t sm) 1391 { 1392 uint64_t m; 1393 1394 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1395 return ((u_int)m); 1396 } 1397 1398 static u_int 1399 dx2d(uint64_t dx) 1400 { 1401 uint64_t d; 1402 1403 d = dx * 1000 / machclk_freq; 1404 return ((u_int)d); 1405 } 1406 1407 static void 1408 sc2isc(struct service_curve *sc, struct internal_sc *isc) 1409 { 1410 isc->sm1 = m2sm(sc->m1); 1411 isc->ism1 = m2ism(sc->m1); 1412 isc->dx = d2dx(sc->d); 1413 isc->dy = seg_x2y(isc->dx, isc->sm1); 1414 isc->sm2 = m2sm(sc->m2); 1415 isc->ism2 = m2ism(sc->m2); 1416 } 1417 1418 /* 1419 * initialize the runtime service curve with the given internal 1420 * service curve starting at (x, y). 1421 */ 1422 static void 1423 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1424 { 1425 rtsc->x = x; 1426 rtsc->y = y; 1427 rtsc->sm1 = isc->sm1; 1428 rtsc->ism1 = isc->ism1; 1429 rtsc->dx = isc->dx; 1430 rtsc->dy = isc->dy; 1431 rtsc->sm2 = isc->sm2; 1432 rtsc->ism2 = isc->ism2; 1433 } 1434 1435 /* 1436 * calculate the y-projection of the runtime service curve by the 1437 * given x-projection value 1438 */ 1439 static uint64_t 1440 rtsc_y2x(struct runtime_sc *rtsc, uint64_t y) 1441 { 1442 uint64_t x; 1443 1444 if (y < rtsc->y) { 1445 x = rtsc->x; 1446 } else if (y <= rtsc->y + rtsc->dy) { 1447 /* x belongs to the 1st segment */ 1448 if (rtsc->dy == 0) 1449 x = rtsc->x + rtsc->dx; 1450 else 1451 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1452 } else { 1453 /* x belongs to the 2nd segment */ 1454 x = rtsc->x + rtsc->dx 1455 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1456 } 1457 return (x); 1458 } 1459 1460 static uint64_t 1461 rtsc_x2y(struct runtime_sc *rtsc, uint64_t x) 1462 { 1463 uint64_t y; 1464 1465 if (x <= rtsc->x) { 1466 y = rtsc->y; 1467 } else if (x <= rtsc->x + rtsc->dx) { 1468 /* y belongs to the 1st segment */ 1469 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1470 } else 1471 /* y belongs to the 2nd segment */ 1472 y = rtsc->y + rtsc->dy 1473 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1474 return (y); 1475 } 1476 1477 /* 1478 * update the runtime service curve by taking the minimum of the current 1479 * runtime service curve and the service curve starting at (x, y). 1480 */ 1481 static void 1482 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1483 { 1484 uint64_t y1, y2, dx, dy; 1485 1486 if (isc->sm1 <= isc->sm2) { 1487 /* service curve is convex */ 1488 y1 = rtsc_x2y(rtsc, x); 1489 if (y1 < y) 1490 /* the current rtsc is smaller */ 1491 return; 1492 rtsc->x = x; 1493 rtsc->y = y; 1494 return; 1495 } 1496 1497 /* 1498 * service curve is concave 1499 * compute the two y values of the current rtsc 1500 * y1: at x 1501 * y2: at (x + dx) 1502 */ 1503 y1 = rtsc_x2y(rtsc, x); 1504 if (y1 <= y) { 1505 /* rtsc is below isc, no change to rtsc */ 1506 return; 1507 } 1508 1509 y2 = rtsc_x2y(rtsc, x + isc->dx); 1510 if (y2 >= y + isc->dy) { 1511 /* rtsc is above isc, replace rtsc by isc */ 1512 rtsc->x = x; 1513 rtsc->y = y; 1514 rtsc->dx = isc->dx; 1515 rtsc->dy = isc->dy; 1516 return; 1517 } 1518 1519 /* 1520 * the two curves intersect 1521 * compute the offsets (dx, dy) using the reverse 1522 * function of seg_x2y() 1523 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1524 */ 1525 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1526 /* 1527 * check if (x, y1) belongs to the 1st segment of rtsc. 1528 * if so, add the offset. 1529 */ 1530 if (rtsc->x + rtsc->dx > x) 1531 dx += rtsc->x + rtsc->dx - x; 1532 dy = seg_x2y(dx, isc->sm1); 1533 1534 rtsc->x = x; 1535 rtsc->y = y; 1536 rtsc->dx = dx; 1537 rtsc->dy = dy; 1538 } 1539 1540 static void 1541 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl) 1542 { 1543 sp->class_id = cl->cl_id; 1544 sp->class_handle = cl->cl_handle; 1545 1546 if (cl->cl_rsc != NULL) { 1547 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1548 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1549 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1550 } else { 1551 sp->rsc.m1 = 0; 1552 sp->rsc.d = 0; 1553 sp->rsc.m2 = 0; 1554 } 1555 if (cl->cl_fsc != NULL) { 1556 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1557 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1558 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1559 } else { 1560 sp->fsc.m1 = 0; 1561 sp->fsc.d = 0; 1562 sp->fsc.m2 = 0; 1563 } 1564 if (cl->cl_usc != NULL) { 1565 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1566 sp->usc.d = dx2d(cl->cl_usc->dx); 1567 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1568 } else { 1569 sp->usc.m1 = 0; 1570 sp->usc.d = 0; 1571 sp->usc.m2 = 0; 1572 } 1573 1574 sp->total = cl->cl_total; 1575 sp->cumul = cl->cl_cumul; 1576 1577 sp->d = cl->cl_d; 1578 sp->e = cl->cl_e; 1579 sp->vt = cl->cl_vt; 1580 sp->f = cl->cl_f; 1581 1582 sp->initvt = cl->cl_initvt; 1583 sp->vtperiod = cl->cl_vtperiod; 1584 sp->parentperiod = cl->cl_parentperiod; 1585 sp->nactive = cl->cl_nactive; 1586 sp->vtoff = cl->cl_vtoff; 1587 sp->cvtmax = cl->cl_cvtmax; 1588 sp->myf = cl->cl_myf; 1589 sp->cfmin = cl->cl_cfmin; 1590 sp->cvtmin = cl->cl_cvtmin; 1591 sp->myfadj = cl->cl_myfadj; 1592 sp->vtadj = cl->cl_vtadj; 1593 1594 sp->cur_time = read_machclk(); 1595 sp->machclk_freq = machclk_freq; 1596 1597 sp->qlength = qlen(cl->cl_q); 1598 sp->qlimit = qlimit(cl->cl_q); 1599 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1600 sp->drop_cnt = cl->cl_stats.drop_cnt; 1601 sp->period = cl->cl_stats.period; 1602 1603 sp->qtype = qtype(cl->cl_q); 1604 #ifdef ALTQ_RED 1605 if (q_is_red(cl->cl_q)) 1606 red_getstats(cl->cl_red, &sp->red[0]); 1607 #endif 1608 #ifdef ALTQ_RIO 1609 if (q_is_rio(cl->cl_q)) 1610 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1611 #endif 1612 } 1613 1614 /* convert a class handle to the corresponding class pointer */ 1615 static struct hfsc_class * 1616 clh_to_clp(struct hfsc_if *hif, uint32_t chandle) 1617 { 1618 int i; 1619 struct hfsc_class *cl; 1620 1621 if (chandle == 0) 1622 return (NULL); 1623 /* 1624 * first, try optimistically the slot matching the lower bits of 1625 * the handle. if it fails, do the linear table search. 1626 */ 1627 i = chandle % HFSC_MAX_CLASSES; 1628 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1629 return (cl); 1630 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1631 if ((cl = hif->hif_class_tbl[i]) != NULL && 1632 cl->cl_handle == chandle) 1633 return (cl); 1634 return (NULL); 1635 } 1636 1637 #endif /* ALTQ_HFSC */ 1638