1 /* $KAME: altq_hfsc.c,v 1.25 2004/04/17 10:54:48 kjc Exp $ */ 2 3 /* 4 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 5 * 6 * Permission to use, copy, modify, and distribute this software and 7 * its documentation is hereby granted (including for commercial or 8 * for-profit use), provided that both the copyright notice and this 9 * permission notice appear in all copies of the software, derivative 10 * works, or modified versions, and any portions thereof. 11 * 12 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 13 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 14 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 20 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 21 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 24 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 25 * DAMAGE. 26 * 27 * Carnegie Mellon encourages (but does not require) users of this 28 * software to return any improvements or extensions that they make, 29 * and to grant Carnegie Mellon the rights to redistribute these 30 * changes without encumbrance. 31 */ 32 /* 33 * H-FSC is described in Proceedings of SIGCOMM'97, 34 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 35 * Real-Time and Priority Service" 36 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 37 * 38 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 39 * when a class has an upperlimit, the fit-time is computed from the 40 * upperlimit service curve. the link-sharing scheduler does not schedule 41 * a class whose fit-time exceeds the current time. 42 */ 43 44 #include "opt_altq.h" 45 #include "opt_inet.h" 46 #include "opt_inet6.h" 47 48 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 49 50 #include <sys/param.h> 51 #include <sys/malloc.h> 52 #include <sys/mbuf.h> 53 #include <sys/socket.h> 54 #include <sys/systm.h> 55 #include <sys/errno.h> 56 #include <sys/queue.h> 57 #include <sys/thread.h> 58 59 #include <net/if.h> 60 #include <net/ifq_var.h> 61 #include <netinet/in.h> 62 63 #include <net/pf/pfvar.h> 64 #include <net/altq/altq.h> 65 #include <net/altq/altq_hfsc.h> 66 67 #include <sys/thread2.h> 68 69 #define HFSC_SUBQ_INDEX ALTQ_SUBQ_INDEX_DEFAULT 70 #define HFSC_LOCK(ifq) \ 71 ALTQ_SQ_LOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX]) 72 #define HFSC_UNLOCK(ifq) \ 73 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX]) 74 75 /* 76 * function prototypes 77 */ 78 static int hfsc_clear_interface(struct hfsc_if *); 79 static int hfsc_request(struct ifaltq_subque *, int, void *); 80 static void hfsc_purge(struct hfsc_if *); 81 static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 82 struct service_curve *, 83 struct service_curve *, 84 struct service_curve *, 85 struct hfsc_class *, int, int, int); 86 static int hfsc_class_destroy(struct hfsc_class *); 87 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 88 static int hfsc_enqueue(struct ifaltq_subque *, struct mbuf *, 89 struct altq_pktattr *); 90 static struct mbuf *hfsc_dequeue(struct ifaltq_subque *, struct mbuf *, int); 91 92 static int hfsc_addq(struct hfsc_class *, struct mbuf *); 93 static struct mbuf *hfsc_getq(struct hfsc_class *); 94 static struct mbuf *hfsc_pollq(struct hfsc_class *); 95 static void hfsc_purgeq(struct hfsc_class *); 96 97 static void update_cfmin(struct hfsc_class *); 98 static void set_active(struct hfsc_class *, int); 99 static void set_passive(struct hfsc_class *); 100 101 static void init_ed(struct hfsc_class *, int); 102 static void update_ed(struct hfsc_class *, int); 103 static void update_d(struct hfsc_class *, int); 104 static void init_vf(struct hfsc_class *, int); 105 static void update_vf(struct hfsc_class *, int, uint64_t); 106 static ellist_t *ellist_alloc(void); 107 static void ellist_destroy(ellist_t *); 108 static void ellist_insert(struct hfsc_class *); 109 static void ellist_remove(struct hfsc_class *); 110 static void ellist_update(struct hfsc_class *); 111 struct hfsc_class *ellist_get_mindl(ellist_t *, uint64_t); 112 static actlist_t *actlist_alloc(void); 113 static void actlist_destroy(actlist_t *); 114 static void actlist_insert(struct hfsc_class *); 115 static void actlist_remove(struct hfsc_class *); 116 static void actlist_update(struct hfsc_class *); 117 118 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, uint64_t); 119 120 static __inline uint64_t seg_x2y(uint64_t, uint64_t); 121 static __inline uint64_t seg_y2x(uint64_t, uint64_t); 122 static __inline uint64_t m2sm(u_int); 123 static __inline uint64_t m2ism(u_int); 124 static __inline uint64_t d2dx(u_int); 125 static u_int sm2m(uint64_t); 126 static u_int dx2d(uint64_t); 127 128 static void sc2isc(struct service_curve *, struct internal_sc *); 129 static void rtsc_init(struct runtime_sc *, struct internal_sc *, 130 uint64_t, uint64_t); 131 static uint64_t rtsc_y2x(struct runtime_sc *, uint64_t); 132 static uint64_t rtsc_x2y(struct runtime_sc *, uint64_t); 133 static void rtsc_min(struct runtime_sc *, struct internal_sc *, 134 uint64_t, uint64_t); 135 136 static void get_class_stats(struct hfsc_classstats *, struct hfsc_class *); 137 static struct hfsc_class *clh_to_clp(struct hfsc_if *, uint32_t); 138 139 /* 140 * macros 141 */ 142 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 143 144 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */ 145 146 int 147 hfsc_pfattach(struct pf_altq *a, struct ifaltq *ifq) 148 { 149 return altq_attach(ifq, ALTQT_HFSC, a->altq_disc, ifq_mapsubq_default, 150 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL); 151 } 152 153 int 154 hfsc_add_altq(struct pf_altq *a) 155 { 156 struct hfsc_if *hif; 157 struct ifnet *ifp; 158 159 if ((ifp = ifunit(a->ifname)) == NULL) 160 return (EINVAL); 161 if (!ifq_is_ready(&ifp->if_snd)) 162 return (ENODEV); 163 164 hif = kmalloc(sizeof(struct hfsc_if), M_ALTQ, M_WAITOK | M_ZERO); 165 166 hif->hif_eligible = ellist_alloc(); 167 hif->hif_ifq = &ifp->if_snd; 168 ifq_purge_all(&ifp->if_snd); 169 170 /* keep the state in pf_altq */ 171 a->altq_disc = hif; 172 173 return (0); 174 } 175 176 int 177 hfsc_remove_altq(struct pf_altq *a) 178 { 179 struct hfsc_if *hif; 180 181 if ((hif = a->altq_disc) == NULL) 182 return (EINVAL); 183 a->altq_disc = NULL; 184 185 hfsc_clear_interface(hif); 186 hfsc_class_destroy(hif->hif_rootclass); 187 188 ellist_destroy(hif->hif_eligible); 189 190 kfree(hif, M_ALTQ); 191 192 return (0); 193 } 194 195 static int 196 hfsc_add_queue_locked(struct pf_altq *a, struct hfsc_if *hif) 197 { 198 struct hfsc_class *cl, *parent; 199 struct hfsc_opts *opts; 200 struct service_curve rtsc, lssc, ulsc; 201 202 KKASSERT(a->qid != 0); 203 204 opts = &a->pq_u.hfsc_opts; 205 206 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL) 207 parent = NULL; 208 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 209 return (EINVAL); 210 211 if (clh_to_clp(hif, a->qid) != NULL) 212 return (EBUSY); 213 214 rtsc.m1 = opts->rtsc_m1; 215 rtsc.d = opts->rtsc_d; 216 rtsc.m2 = opts->rtsc_m2; 217 lssc.m1 = opts->lssc_m1; 218 lssc.d = opts->lssc_d; 219 lssc.m2 = opts->lssc_m2; 220 ulsc.m1 = opts->ulsc_m1; 221 ulsc.d = opts->ulsc_d; 222 ulsc.m2 = opts->ulsc_m2; 223 224 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, parent, a->qlimit, 225 opts->flags, a->qid); 226 if (cl == NULL) 227 return (ENOMEM); 228 229 return (0); 230 } 231 232 int 233 hfsc_add_queue(struct pf_altq *a) 234 { 235 struct hfsc_if *hif; 236 struct ifaltq *ifq; 237 int error; 238 239 if (a->qid == 0) 240 return (EINVAL); 241 242 /* XXX not MP safe */ 243 if ((hif = a->altq_disc) == NULL) 244 return (EINVAL); 245 ifq = hif->hif_ifq; 246 247 HFSC_LOCK(ifq); 248 error = hfsc_add_queue_locked(a, hif); 249 HFSC_UNLOCK(ifq); 250 251 return error; 252 } 253 254 static int 255 hfsc_remove_queue_locked(struct pf_altq *a, struct hfsc_if *hif) 256 { 257 struct hfsc_class *cl; 258 259 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 260 return (EINVAL); 261 262 return (hfsc_class_destroy(cl)); 263 } 264 265 int 266 hfsc_remove_queue(struct pf_altq *a) 267 { 268 struct hfsc_if *hif; 269 struct ifaltq *ifq; 270 int error; 271 272 /* XXX not MP safe */ 273 if ((hif = a->altq_disc) == NULL) 274 return (EINVAL); 275 ifq = hif->hif_ifq; 276 277 HFSC_LOCK(ifq); 278 error = hfsc_remove_queue_locked(a, hif); 279 HFSC_UNLOCK(ifq); 280 281 return error; 282 } 283 284 int 285 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) 286 { 287 struct hfsc_if *hif; 288 struct hfsc_class *cl; 289 struct hfsc_classstats stats; 290 struct ifaltq *ifq; 291 int error = 0; 292 293 if (*nbytes < sizeof(stats)) 294 return (EINVAL); 295 296 /* XXX not MP safe */ 297 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 298 return (EBADF); 299 ifq = hif->hif_ifq; 300 301 HFSC_LOCK(ifq); 302 303 if ((cl = clh_to_clp(hif, a->qid)) == NULL) { 304 HFSC_UNLOCK(ifq); 305 return (EINVAL); 306 } 307 308 get_class_stats(&stats, cl); 309 310 HFSC_UNLOCK(ifq); 311 312 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) 313 return (error); 314 *nbytes = sizeof(stats); 315 return (0); 316 } 317 318 /* 319 * bring the interface back to the initial state by discarding 320 * all the filters and classes except the root class. 321 */ 322 static int 323 hfsc_clear_interface(struct hfsc_if *hif) 324 { 325 struct hfsc_class *cl; 326 327 if (hif->hif_rootclass == NULL) 328 return (0); 329 330 331 /* clear out the classes */ 332 while ((cl = hif->hif_rootclass->cl_children) != NULL) { 333 /* 334 * remove the first leaf class found in the hierarchy 335 * then start over 336 */ 337 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 338 if (!is_a_parent_class(cl)) { 339 hfsc_class_destroy(cl); 340 break; 341 } 342 } 343 } 344 345 return (0); 346 } 347 348 static int 349 hfsc_request(struct ifaltq_subque *ifsq, int req, void *arg) 350 { 351 struct ifaltq *ifq = ifsq->ifsq_altq; 352 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 353 354 crit_enter(); 355 switch (req) { 356 case ALTRQ_PURGE: 357 if (ifsq_get_index(ifsq) == HFSC_SUBQ_INDEX) { 358 hfsc_purge(hif); 359 } else { 360 /* 361 * Race happened, the unrelated subqueue was 362 * picked during the packet scheduler transition. 363 */ 364 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 365 } 366 break; 367 } 368 crit_exit(); 369 return (0); 370 } 371 372 /* discard all the queued packets on the interface */ 373 static void 374 hfsc_purge(struct hfsc_if *hif) 375 { 376 struct hfsc_class *cl; 377 378 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) { 379 if (!qempty(cl->cl_q)) 380 hfsc_purgeq(cl); 381 } 382 if (ifq_is_enabled(hif->hif_ifq)) 383 hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX].ifq_len = 0; 384 } 385 386 struct hfsc_class * 387 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 388 struct service_curve *fsc, struct service_curve *usc, 389 struct hfsc_class *parent, int qlimit, int flags, int qid) 390 { 391 struct hfsc_class *cl, *p; 392 int i; 393 394 if (hif->hif_classes >= HFSC_MAX_CLASSES) 395 return (NULL); 396 397 #ifndef ALTQ_RED 398 if (flags & HFCF_RED) { 399 #ifdef ALTQ_DEBUG 400 kprintf("hfsc_class_create: RED not configured for HFSC!\n"); 401 #endif 402 return (NULL); 403 } 404 #endif 405 406 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO); 407 cl->cl_q = kmalloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO); 408 cl->cl_actc = actlist_alloc(); 409 410 if (qlimit == 0) 411 qlimit = 50; /* use default */ 412 qlimit(cl->cl_q) = qlimit; 413 qtype(cl->cl_q) = Q_DROPTAIL; 414 qlen(cl->cl_q) = 0; 415 cl->cl_flags = flags; 416 #ifdef ALTQ_RED 417 if (flags & (HFCF_RED|HFCF_RIO)) { 418 int red_flags, red_pkttime; 419 u_int m2; 420 421 m2 = 0; 422 if (rsc != NULL && rsc->m2 > m2) 423 m2 = rsc->m2; 424 if (fsc != NULL && fsc->m2 > m2) 425 m2 = fsc->m2; 426 if (usc != NULL && usc->m2 > m2) 427 m2 = usc->m2; 428 429 red_flags = 0; 430 if (flags & HFCF_ECN) 431 red_flags |= REDF_ECN; 432 #ifdef ALTQ_RIO 433 if (flags & HFCF_CLEARDSCP) 434 red_flags |= RIOF_CLEARDSCP; 435 #endif 436 if (m2 < 8) 437 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 438 else 439 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 440 * 1000 * 1000 * 1000 / (m2 / 8); 441 if (flags & HFCF_RED) { 442 cl->cl_red = red_alloc(0, 0, 443 qlimit(cl->cl_q) * 10/100, 444 qlimit(cl->cl_q) * 30/100, 445 red_flags, red_pkttime); 446 if (cl->cl_red != NULL) 447 qtype(cl->cl_q) = Q_RED; 448 } 449 #ifdef ALTQ_RIO 450 else { 451 cl->cl_red = (red_t *)rio_alloc(0, NULL, 452 red_flags, red_pkttime); 453 if (cl->cl_red != NULL) 454 qtype(cl->cl_q) = Q_RIO; 455 } 456 #endif 457 } 458 #endif /* ALTQ_RED */ 459 460 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 461 cl->cl_rsc = kmalloc(sizeof(*cl->cl_rsc), M_ALTQ, M_WAITOK); 462 sc2isc(rsc, cl->cl_rsc); 463 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 464 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 465 } 466 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 467 cl->cl_fsc = kmalloc(sizeof(*cl->cl_fsc), M_ALTQ, M_WAITOK); 468 sc2isc(fsc, cl->cl_fsc); 469 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 470 } 471 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 472 cl->cl_usc = kmalloc(sizeof(*cl->cl_usc), M_ALTQ, M_WAITOK); 473 sc2isc(usc, cl->cl_usc); 474 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 475 } 476 477 cl->cl_id = hif->hif_classid++; 478 cl->cl_handle = qid; 479 cl->cl_hif = hif; 480 cl->cl_parent = parent; 481 482 crit_enter(); 483 hif->hif_classes++; 484 485 /* 486 * find a free slot in the class table. if the slot matching 487 * the lower bits of qid is free, use this slot. otherwise, 488 * use the first free slot. 489 */ 490 i = qid % HFSC_MAX_CLASSES; 491 if (hif->hif_class_tbl[i] == NULL) 492 hif->hif_class_tbl[i] = cl; 493 else { 494 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 495 if (hif->hif_class_tbl[i] == NULL) { 496 hif->hif_class_tbl[i] = cl; 497 break; 498 } 499 } 500 if (i == HFSC_MAX_CLASSES) { 501 crit_exit(); 502 goto err_ret; 503 } 504 } 505 506 if (flags & HFCF_DEFAULTCLASS) 507 hif->hif_defaultclass = cl; 508 509 if (parent == NULL) { 510 /* this is root class */ 511 hif->hif_rootclass = cl; 512 } else if (parent->cl_children == NULL) { 513 /* add this class to the children list of the parent */ 514 parent->cl_children = cl; 515 } else { 516 p = parent->cl_children; 517 while (p->cl_siblings != NULL) 518 p = p->cl_siblings; 519 p->cl_siblings = cl; 520 } 521 crit_exit(); 522 523 return (cl); 524 525 err_ret: 526 if (cl->cl_actc != NULL) 527 actlist_destroy(cl->cl_actc); 528 if (cl->cl_red != NULL) { 529 #ifdef ALTQ_RIO 530 if (q_is_rio(cl->cl_q)) 531 rio_destroy((rio_t *)cl->cl_red); 532 #endif 533 #ifdef ALTQ_RED 534 if (q_is_red(cl->cl_q)) 535 red_destroy(cl->cl_red); 536 #endif 537 } 538 if (cl->cl_fsc != NULL) 539 kfree(cl->cl_fsc, M_ALTQ); 540 if (cl->cl_rsc != NULL) 541 kfree(cl->cl_rsc, M_ALTQ); 542 if (cl->cl_usc != NULL) 543 kfree(cl->cl_usc, M_ALTQ); 544 if (cl->cl_q != NULL) 545 kfree(cl->cl_q, M_ALTQ); 546 kfree(cl, M_ALTQ); 547 return (NULL); 548 } 549 550 static int 551 hfsc_class_destroy(struct hfsc_class *cl) 552 { 553 struct hfsc_if *hif; 554 int i; 555 556 if (cl == NULL) 557 return (0); 558 hif = cl->cl_hif; 559 560 if (is_a_parent_class(cl)) 561 return (EBUSY); 562 563 crit_enter(); 564 565 if (!qempty(cl->cl_q)) 566 hfsc_purgeq(cl); 567 568 if (cl->cl_parent == NULL) { 569 /* this is root class */ 570 } else { 571 struct hfsc_class *p = cl->cl_parent->cl_children; 572 573 if (p == cl) { 574 cl->cl_parent->cl_children = cl->cl_siblings; 575 } else { 576 do { 577 if (p->cl_siblings == cl) { 578 p->cl_siblings = cl->cl_siblings; 579 break; 580 } 581 } while ((p = p->cl_siblings) != NULL); 582 } 583 KKASSERT(p != NULL); 584 } 585 586 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 587 if (hif->hif_class_tbl[i] == cl) { 588 hif->hif_class_tbl[i] = NULL; 589 break; 590 } 591 } 592 593 hif->hif_classes--; 594 crit_exit(); 595 596 actlist_destroy(cl->cl_actc); 597 598 if (cl->cl_red != NULL) { 599 #ifdef ALTQ_RIO 600 if (q_is_rio(cl->cl_q)) 601 rio_destroy((rio_t *)cl->cl_red); 602 #endif 603 #ifdef ALTQ_RED 604 if (q_is_red(cl->cl_q)) 605 red_destroy(cl->cl_red); 606 #endif 607 } 608 609 if (cl == hif->hif_rootclass) 610 hif->hif_rootclass = NULL; 611 if (cl == hif->hif_defaultclass) 612 hif->hif_defaultclass = NULL; 613 if (cl == hif->hif_pollcache) 614 hif->hif_pollcache = NULL; 615 616 if (cl->cl_usc != NULL) 617 kfree(cl->cl_usc, M_ALTQ); 618 if (cl->cl_fsc != NULL) 619 kfree(cl->cl_fsc, M_ALTQ); 620 if (cl->cl_rsc != NULL) 621 kfree(cl->cl_rsc, M_ALTQ); 622 kfree(cl->cl_q, M_ALTQ); 623 kfree(cl, M_ALTQ); 624 625 return (0); 626 } 627 628 /* 629 * hfsc_nextclass returns the next class in the tree. 630 * usage: 631 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 632 * do_something; 633 */ 634 static struct hfsc_class * 635 hfsc_nextclass(struct hfsc_class *cl) 636 { 637 if (cl->cl_children != NULL) { 638 cl = cl->cl_children; 639 } else if (cl->cl_siblings != NULL) { 640 cl = cl->cl_siblings; 641 } else { 642 while ((cl = cl->cl_parent) != NULL) { 643 if (cl->cl_siblings != NULL) { 644 cl = cl->cl_siblings; 645 break; 646 } 647 } 648 } 649 650 return (cl); 651 } 652 653 /* 654 * hfsc_enqueue is an enqueue function to be registered to 655 * (*altq_enqueue) in struct ifaltq. 656 */ 657 static int 658 hfsc_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 659 struct altq_pktattr *pktattr) 660 { 661 struct ifaltq *ifq = ifsq->ifsq_altq; 662 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 663 struct hfsc_class *cl; 664 int len; 665 666 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) { 667 /* 668 * Race happened, the unrelated subqueue was 669 * picked during the packet scheduler transition. 670 */ 671 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 672 m_freem(m); 673 return ENOBUFS; 674 } 675 676 /* grab class set by classifier */ 677 if ((m->m_flags & M_PKTHDR) == 0) { 678 /* should not happen */ 679 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n"); 680 m_freem(m); 681 return (ENOBUFS); 682 } 683 crit_enter(); 684 if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE) 685 cl = clh_to_clp(hif, m->m_pkthdr.pf.qid); 686 else 687 cl = NULL; 688 if (cl == NULL || is_a_parent_class(cl)) { 689 cl = hif->hif_defaultclass; 690 if (cl == NULL) { 691 m_freem(m); 692 crit_exit(); 693 return (ENOBUFS); 694 } 695 } 696 cl->cl_pktattr = NULL; 697 len = m_pktlen(m); 698 if (hfsc_addq(cl, m) != 0) { 699 /* drop occurred. mbuf was freed in hfsc_addq. */ 700 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 701 crit_exit(); 702 return (ENOBUFS); 703 } 704 ifsq->ifq_len++; 705 cl->cl_hif->hif_packets++; 706 707 /* successfully queued. */ 708 if (qlen(cl->cl_q) == 1) 709 set_active(cl, m_pktlen(m)); 710 crit_exit(); 711 return (0); 712 } 713 714 /* 715 * hfsc_dequeue is a dequeue function to be registered to 716 * (*altq_dequeue) in struct ifaltq. 717 * 718 * note: ALTDQ_POLL returns the next packet without removing the packet 719 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 720 * ALTDQ_REMOVE must return the same packet if called immediately 721 * after ALTDQ_POLL. 722 */ 723 static struct mbuf * 724 hfsc_dequeue(struct ifaltq_subque *ifsq, struct mbuf *mpolled, int op) 725 { 726 struct ifaltq *ifq = ifsq->ifsq_altq; 727 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 728 struct hfsc_class *cl; 729 struct mbuf *m; 730 int len, next_len; 731 int realtime = 0; 732 uint64_t cur_time; 733 734 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) { 735 /* 736 * Race happened, the unrelated subqueue was 737 * picked during the packet scheduler transition. 738 */ 739 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 740 return NULL; 741 } 742 743 if (hif->hif_packets == 0) { 744 /* no packet in the tree */ 745 return (NULL); 746 } 747 748 crit_enter(); 749 cur_time = read_machclk(); 750 751 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 752 cl = hif->hif_pollcache; 753 hif->hif_pollcache = NULL; 754 /* check if the class was scheduled by real-time criteria */ 755 if (cl->cl_rsc != NULL) 756 realtime = (cl->cl_e <= cur_time); 757 } else { 758 /* 759 * if there are eligible classes, use real-time criteria. 760 * find the class with the minimum deadline among 761 * the eligible classes. 762 */ 763 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time)) != NULL) { 764 realtime = 1; 765 } else { 766 #ifdef ALTQ_DEBUG 767 int fits = 0; 768 #endif 769 /* 770 * use link-sharing criteria 771 * get the class with the minimum vt in the hierarchy 772 */ 773 cl = hif->hif_rootclass; 774 while (is_a_parent_class(cl)) { 775 776 cl = actlist_firstfit(cl, cur_time); 777 if (cl == NULL) { 778 #ifdef ALTQ_DEBUG 779 if (fits > 0) 780 kprintf("%d fit but none found\n",fits); 781 #endif 782 m = NULL; 783 goto done; 784 } 785 /* 786 * update parent's cl_cvtmin. 787 * don't update if the new vt is smaller. 788 */ 789 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 790 cl->cl_parent->cl_cvtmin = cl->cl_vt; 791 #ifdef ALTQ_DEBUG 792 fits++; 793 #endif 794 } 795 } 796 797 if (op == ALTDQ_POLL) { 798 hif->hif_pollcache = cl; 799 m = hfsc_pollq(cl); 800 goto done; 801 } 802 } 803 804 m = hfsc_getq(cl); 805 if (m == NULL) 806 panic("hfsc_dequeue:"); 807 len = m_pktlen(m); 808 cl->cl_hif->hif_packets--; 809 ifsq->ifq_len--; 810 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 811 812 update_vf(cl, len, cur_time); 813 if (realtime) 814 cl->cl_cumul += len; 815 816 if (!qempty(cl->cl_q)) { 817 if (cl->cl_rsc != NULL) { 818 /* update ed */ 819 next_len = m_pktlen(qhead(cl->cl_q)); 820 821 if (realtime) 822 update_ed(cl, next_len); 823 else 824 update_d(cl, next_len); 825 } 826 } else { 827 /* the class becomes passive */ 828 set_passive(cl); 829 } 830 done: 831 crit_exit(); 832 KKASSERT(mpolled == NULL || m == mpolled); 833 return (m); 834 } 835 836 static int 837 hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 838 { 839 840 #ifdef ALTQ_RIO 841 if (q_is_rio(cl->cl_q)) 842 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 843 m, cl->cl_pktattr); 844 #endif 845 #ifdef ALTQ_RED 846 if (q_is_red(cl->cl_q)) 847 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 848 #endif 849 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 850 m_freem(m); 851 return (-1); 852 } 853 854 if (cl->cl_flags & HFCF_CLEARDSCP) 855 write_dsfield(m, cl->cl_pktattr, 0); 856 857 _addq(cl->cl_q, m); 858 859 return (0); 860 } 861 862 static struct mbuf * 863 hfsc_getq(struct hfsc_class *cl) 864 { 865 #ifdef ALTQ_RIO 866 if (q_is_rio(cl->cl_q)) 867 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 868 #endif 869 #ifdef ALTQ_RED 870 if (q_is_red(cl->cl_q)) 871 return red_getq(cl->cl_red, cl->cl_q); 872 #endif 873 return _getq(cl->cl_q); 874 } 875 876 static struct mbuf * 877 hfsc_pollq(struct hfsc_class *cl) 878 { 879 return qhead(cl->cl_q); 880 } 881 882 static void 883 hfsc_purgeq(struct hfsc_class *cl) 884 { 885 struct mbuf *m; 886 887 if (qempty(cl->cl_q)) 888 return; 889 890 while ((m = _getq(cl->cl_q)) != NULL) { 891 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 892 m_freem(m); 893 cl->cl_hif->hif_packets--; 894 cl->cl_hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX].ifq_len--; 895 } 896 KKASSERT(qlen(cl->cl_q) == 0); 897 898 update_vf(cl, 0, 0); /* remove cl from the actlist */ 899 set_passive(cl); 900 } 901 902 static void 903 set_active(struct hfsc_class *cl, int len) 904 { 905 if (cl->cl_rsc != NULL) 906 init_ed(cl, len); 907 if (cl->cl_fsc != NULL) 908 init_vf(cl, len); 909 910 cl->cl_stats.period++; 911 } 912 913 static void 914 set_passive(struct hfsc_class *cl) 915 { 916 if (cl->cl_rsc != NULL) 917 ellist_remove(cl); 918 919 /* 920 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 921 * needs to be called explicitly to remove a class from actlist 922 */ 923 } 924 925 static void 926 init_ed(struct hfsc_class *cl, int next_len) 927 { 928 uint64_t cur_time; 929 930 cur_time = read_machclk(); 931 932 /* update the deadline curve */ 933 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 934 935 /* 936 * update the eligible curve. 937 * for concave, it is equal to the deadline curve. 938 * for convex, it is a linear curve with slope m2. 939 */ 940 cl->cl_eligible = cl->cl_deadline; 941 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 942 cl->cl_eligible.dx = 0; 943 cl->cl_eligible.dy = 0; 944 } 945 946 /* compute e and d */ 947 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 948 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 949 950 ellist_insert(cl); 951 } 952 953 static void 954 update_ed(struct hfsc_class *cl, int next_len) 955 { 956 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 957 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 958 959 ellist_update(cl); 960 } 961 962 static void 963 update_d(struct hfsc_class *cl, int next_len) 964 { 965 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 966 } 967 968 static void 969 init_vf(struct hfsc_class *cl, int len) 970 { 971 struct hfsc_class *max_cl, *p; 972 uint64_t vt, f, cur_time; 973 int go_active; 974 975 cur_time = 0; 976 go_active = 1; 977 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 978 if (go_active && cl->cl_nactive++ == 0) 979 go_active = 1; 980 else 981 go_active = 0; 982 983 if (go_active) { 984 max_cl = actlist_last(cl->cl_parent->cl_actc); 985 if (max_cl != NULL) { 986 /* 987 * set vt to the average of the min and max 988 * classes. if the parent's period didn't 989 * change, don't decrease vt of the class. 990 */ 991 vt = max_cl->cl_vt; 992 if (cl->cl_parent->cl_cvtmin != 0) 993 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 994 995 if (cl->cl_parent->cl_vtperiod != 996 cl->cl_parentperiod || vt > cl->cl_vt) 997 cl->cl_vt = vt; 998 } else { 999 /* 1000 * first child for a new parent backlog period. 1001 * add parent's cvtmax to vtoff of children 1002 * to make a new vt (vtoff + vt) larger than 1003 * the vt in the last period for all children. 1004 */ 1005 vt = cl->cl_parent->cl_cvtmax; 1006 for (p = cl->cl_parent->cl_children; p != NULL; 1007 p = p->cl_siblings) 1008 p->cl_vtoff += vt; 1009 cl->cl_vt = 0; 1010 cl->cl_parent->cl_cvtmax = 0; 1011 cl->cl_parent->cl_cvtmin = 0; 1012 } 1013 cl->cl_initvt = cl->cl_vt; 1014 1015 /* update the virtual curve */ 1016 vt = cl->cl_vt + cl->cl_vtoff; 1017 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 1018 if (cl->cl_virtual.x == vt) { 1019 cl->cl_virtual.x -= cl->cl_vtoff; 1020 cl->cl_vtoff = 0; 1021 } 1022 cl->cl_vtadj = 0; 1023 1024 cl->cl_vtperiod++; /* increment vt period */ 1025 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 1026 if (cl->cl_parent->cl_nactive == 0) 1027 cl->cl_parentperiod++; 1028 cl->cl_f = 0; 1029 1030 actlist_insert(cl); 1031 1032 if (cl->cl_usc != NULL) { 1033 /* class has upper limit curve */ 1034 if (cur_time == 0) 1035 cur_time = read_machclk(); 1036 1037 /* update the ulimit curve */ 1038 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 1039 cl->cl_total); 1040 /* compute myf */ 1041 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 1042 cl->cl_total); 1043 cl->cl_myfadj = 0; 1044 } 1045 } 1046 1047 if (cl->cl_myf > cl->cl_cfmin) 1048 f = cl->cl_myf; 1049 else 1050 f = cl->cl_cfmin; 1051 if (f != cl->cl_f) { 1052 cl->cl_f = f; 1053 update_cfmin(cl->cl_parent); 1054 } 1055 } 1056 } 1057 1058 static void 1059 update_vf(struct hfsc_class *cl, int len, uint64_t cur_time) 1060 { 1061 uint64_t f, myf_bound, delta; 1062 int go_passive; 1063 1064 go_passive = qempty(cl->cl_q); 1065 1066 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 1067 cl->cl_total += len; 1068 1069 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 1070 continue; 1071 1072 if (go_passive && --cl->cl_nactive == 0) 1073 go_passive = 1; 1074 else 1075 go_passive = 0; 1076 1077 if (go_passive) { 1078 /* no more active child, going passive */ 1079 1080 /* update cvtmax of the parent class */ 1081 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1082 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1083 1084 /* remove this class from the vt list */ 1085 actlist_remove(cl); 1086 1087 update_cfmin(cl->cl_parent); 1088 1089 continue; 1090 } 1091 1092 /* 1093 * update vt and f 1094 */ 1095 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1096 - cl->cl_vtoff + cl->cl_vtadj; 1097 1098 /* 1099 * if vt of the class is smaller than cvtmin, 1100 * the class was skipped in the past due to non-fit. 1101 * if so, we need to adjust vtadj. 1102 */ 1103 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1104 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1105 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1106 } 1107 1108 /* update the vt list */ 1109 actlist_update(cl); 1110 1111 if (cl->cl_usc != NULL) { 1112 cl->cl_myf = cl->cl_myfadj 1113 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1114 1115 /* 1116 * if myf lags behind by more than one clock tick 1117 * from the current time, adjust myfadj to prevent 1118 * a rate-limited class from going greedy. 1119 * in a steady state under rate-limiting, myf 1120 * fluctuates within one clock tick. 1121 */ 1122 myf_bound = cur_time - machclk_per_tick; 1123 if (cl->cl_myf < myf_bound) { 1124 delta = cur_time - cl->cl_myf; 1125 cl->cl_myfadj += delta; 1126 cl->cl_myf += delta; 1127 } 1128 } 1129 1130 /* cl_f is max(cl_myf, cl_cfmin) */ 1131 if (cl->cl_myf > cl->cl_cfmin) 1132 f = cl->cl_myf; 1133 else 1134 f = cl->cl_cfmin; 1135 if (f != cl->cl_f) { 1136 cl->cl_f = f; 1137 update_cfmin(cl->cl_parent); 1138 } 1139 } 1140 } 1141 1142 static void 1143 update_cfmin(struct hfsc_class *cl) 1144 { 1145 struct hfsc_class *p; 1146 uint64_t cfmin; 1147 1148 if (TAILQ_EMPTY(cl->cl_actc)) { 1149 cl->cl_cfmin = 0; 1150 return; 1151 } 1152 cfmin = HT_INFINITY; 1153 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1154 if (p->cl_f == 0) { 1155 cl->cl_cfmin = 0; 1156 return; 1157 } 1158 if (p->cl_f < cfmin) 1159 cfmin = p->cl_f; 1160 } 1161 cl->cl_cfmin = cfmin; 1162 } 1163 1164 /* 1165 * TAILQ based ellist and actlist implementation 1166 * (ion wanted to make a calendar queue based implementation) 1167 */ 1168 /* 1169 * eligible list holds backlogged classes being sorted by their eligible times. 1170 * there is one eligible list per interface. 1171 */ 1172 1173 static ellist_t * 1174 ellist_alloc(void) 1175 { 1176 ellist_t *head; 1177 1178 head = kmalloc(sizeof(ellist_t *), M_ALTQ, M_WAITOK); 1179 TAILQ_INIT(head); 1180 return (head); 1181 } 1182 1183 static void 1184 ellist_destroy(ellist_t *head) 1185 { 1186 kfree(head, M_ALTQ); 1187 } 1188 1189 static void 1190 ellist_insert(struct hfsc_class *cl) 1191 { 1192 struct hfsc_if *hif = cl->cl_hif; 1193 struct hfsc_class *p; 1194 1195 /* check the last entry first */ 1196 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL || 1197 p->cl_e <= cl->cl_e) { 1198 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1199 return; 1200 } 1201 1202 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) { 1203 if (cl->cl_e < p->cl_e) { 1204 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1205 return; 1206 } 1207 } 1208 KKASSERT(0); /* should not reach here */ 1209 } 1210 1211 static void 1212 ellist_remove(struct hfsc_class *cl) 1213 { 1214 struct hfsc_if *hif = cl->cl_hif; 1215 1216 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1217 } 1218 1219 static void 1220 ellist_update(struct hfsc_class *cl) 1221 { 1222 struct hfsc_if *hif = cl->cl_hif; 1223 struct hfsc_class *p, *last; 1224 1225 /* 1226 * the eligible time of a class increases monotonically. 1227 * if the next entry has a larger eligible time, nothing to do. 1228 */ 1229 p = TAILQ_NEXT(cl, cl_ellist); 1230 if (p == NULL || cl->cl_e <= p->cl_e) 1231 return; 1232 1233 /* check the last entry */ 1234 last = TAILQ_LAST(hif->hif_eligible, _eligible); 1235 KKASSERT(last != NULL); 1236 if (last->cl_e <= cl->cl_e) { 1237 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1238 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1239 return; 1240 } 1241 1242 /* 1243 * the new position must be between the next entry 1244 * and the last entry 1245 */ 1246 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1247 if (cl->cl_e < p->cl_e) { 1248 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1249 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1250 return; 1251 } 1252 } 1253 KKASSERT(0); /* should not reach here */ 1254 } 1255 1256 /* find the class with the minimum deadline among the eligible classes */ 1257 struct hfsc_class * 1258 ellist_get_mindl(ellist_t *head, uint64_t cur_time) 1259 { 1260 struct hfsc_class *p, *cl = NULL; 1261 1262 TAILQ_FOREACH(p, head, cl_ellist) { 1263 if (p->cl_e > cur_time) 1264 break; 1265 if (cl == NULL || p->cl_d < cl->cl_d) 1266 cl = p; 1267 } 1268 return (cl); 1269 } 1270 1271 /* 1272 * active children list holds backlogged child classes being sorted 1273 * by their virtual time. 1274 * each intermediate class has one active children list. 1275 */ 1276 static actlist_t * 1277 actlist_alloc(void) 1278 { 1279 actlist_t *head; 1280 1281 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK); 1282 TAILQ_INIT(head); 1283 return (head); 1284 } 1285 1286 static void 1287 actlist_destroy(actlist_t *head) 1288 { 1289 kfree(head, M_ALTQ); 1290 } 1291 static void 1292 actlist_insert(struct hfsc_class *cl) 1293 { 1294 struct hfsc_class *p; 1295 1296 /* check the last entry first */ 1297 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL 1298 || p->cl_vt <= cl->cl_vt) { 1299 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1300 return; 1301 } 1302 1303 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) { 1304 if (cl->cl_vt < p->cl_vt) { 1305 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1306 return; 1307 } 1308 } 1309 KKASSERT(0); /* should not reach here */ 1310 } 1311 1312 static void 1313 actlist_remove(struct hfsc_class *cl) 1314 { 1315 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1316 } 1317 1318 static void 1319 actlist_update(struct hfsc_class *cl) 1320 { 1321 struct hfsc_class *p, *last; 1322 1323 /* 1324 * the virtual time of a class increases monotonically during its 1325 * backlogged period. 1326 * if the next entry has a larger virtual time, nothing to do. 1327 */ 1328 p = TAILQ_NEXT(cl, cl_actlist); 1329 if (p == NULL || cl->cl_vt < p->cl_vt) 1330 return; 1331 1332 /* check the last entry */ 1333 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active); 1334 KKASSERT(last != NULL); 1335 if (last->cl_vt <= cl->cl_vt) { 1336 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1337 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1338 return; 1339 } 1340 1341 /* 1342 * the new position must be between the next entry 1343 * and the last entry 1344 */ 1345 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1346 if (cl->cl_vt < p->cl_vt) { 1347 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1348 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1349 return; 1350 } 1351 } 1352 KKASSERT(0); /* should not reach here */ 1353 } 1354 1355 static struct hfsc_class * 1356 actlist_firstfit(struct hfsc_class *cl, uint64_t cur_time) 1357 { 1358 struct hfsc_class *p; 1359 1360 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1361 if (p->cl_f <= cur_time) 1362 return (p); 1363 } 1364 return (NULL); 1365 } 1366 1367 /* 1368 * service curve support functions 1369 * 1370 * external service curve parameters 1371 * m: bits/sec 1372 * d: msec 1373 * internal service curve parameters 1374 * sm: (bytes/tsc_interval) << SM_SHIFT 1375 * ism: (tsc_count/byte) << ISM_SHIFT 1376 * dx: tsc_count 1377 * 1378 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. 1379 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU 1380 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective 1381 * digits in decimal using the following table. 1382 * 1383 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 1384 * ----------+------------------------------------------------------- 1385 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6 1386 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6 1387 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6 1388 * 1389 * nsec/byte 80000 8000 800 80 8 1390 * ism(500MHz) 40000 4000 400 40 4 1391 * ism(200MHz) 16000 1600 160 16 1.6 1392 */ 1393 #define SM_SHIFT 24 1394 #define ISM_SHIFT 10 1395 1396 #define SM_MASK ((1LL << SM_SHIFT) - 1) 1397 #define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1398 1399 static __inline uint64_t 1400 seg_x2y(uint64_t x, uint64_t sm) 1401 { 1402 uint64_t y; 1403 1404 /* 1405 * compute 1406 * y = x * sm >> SM_SHIFT 1407 * but divide it for the upper and lower bits to avoid overflow 1408 */ 1409 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1410 return (y); 1411 } 1412 1413 static __inline uint64_t 1414 seg_y2x(uint64_t y, uint64_t ism) 1415 { 1416 uint64_t x; 1417 1418 if (y == 0) 1419 x = 0; 1420 else if (ism == HT_INFINITY) 1421 x = HT_INFINITY; 1422 else 1423 x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1424 1425 return (x); 1426 } 1427 1428 static __inline uint64_t 1429 m2sm(u_int m) 1430 { 1431 uint64_t sm; 1432 1433 sm = ((uint64_t)m << SM_SHIFT) / 8 / machclk_freq; 1434 return (sm); 1435 } 1436 1437 static __inline uint64_t 1438 m2ism(u_int m) 1439 { 1440 uint64_t ism; 1441 1442 if (m == 0) 1443 ism = HT_INFINITY; 1444 else 1445 ism = ((uint64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1446 return (ism); 1447 } 1448 1449 static __inline uint64_t 1450 d2dx(u_int d) 1451 { 1452 uint64_t dx; 1453 1454 dx = ((uint64_t)d * machclk_freq) / 1000; 1455 return (dx); 1456 } 1457 1458 static u_int 1459 sm2m(uint64_t sm) 1460 { 1461 uint64_t m; 1462 1463 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1464 return ((u_int)m); 1465 } 1466 1467 static u_int 1468 dx2d(uint64_t dx) 1469 { 1470 uint64_t d; 1471 1472 d = dx * 1000 / machclk_freq; 1473 return ((u_int)d); 1474 } 1475 1476 static void 1477 sc2isc(struct service_curve *sc, struct internal_sc *isc) 1478 { 1479 isc->sm1 = m2sm(sc->m1); 1480 isc->ism1 = m2ism(sc->m1); 1481 isc->dx = d2dx(sc->d); 1482 isc->dy = seg_x2y(isc->dx, isc->sm1); 1483 isc->sm2 = m2sm(sc->m2); 1484 isc->ism2 = m2ism(sc->m2); 1485 } 1486 1487 /* 1488 * initialize the runtime service curve with the given internal 1489 * service curve starting at (x, y). 1490 */ 1491 static void 1492 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1493 { 1494 rtsc->x = x; 1495 rtsc->y = y; 1496 rtsc->sm1 = isc->sm1; 1497 rtsc->ism1 = isc->ism1; 1498 rtsc->dx = isc->dx; 1499 rtsc->dy = isc->dy; 1500 rtsc->sm2 = isc->sm2; 1501 rtsc->ism2 = isc->ism2; 1502 } 1503 1504 /* 1505 * calculate the y-projection of the runtime service curve by the 1506 * given x-projection value 1507 */ 1508 static uint64_t 1509 rtsc_y2x(struct runtime_sc *rtsc, uint64_t y) 1510 { 1511 uint64_t x; 1512 1513 if (y < rtsc->y) { 1514 x = rtsc->x; 1515 } else if (y <= rtsc->y + rtsc->dy) { 1516 /* x belongs to the 1st segment */ 1517 if (rtsc->dy == 0) 1518 x = rtsc->x + rtsc->dx; 1519 else 1520 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1521 } else { 1522 /* x belongs to the 2nd segment */ 1523 x = rtsc->x + rtsc->dx 1524 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1525 } 1526 return (x); 1527 } 1528 1529 static uint64_t 1530 rtsc_x2y(struct runtime_sc *rtsc, uint64_t x) 1531 { 1532 uint64_t y; 1533 1534 if (x <= rtsc->x) { 1535 y = rtsc->y; 1536 } else if (x <= rtsc->x + rtsc->dx) { 1537 /* y belongs to the 1st segment */ 1538 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1539 } else 1540 /* y belongs to the 2nd segment */ 1541 y = rtsc->y + rtsc->dy 1542 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1543 return (y); 1544 } 1545 1546 /* 1547 * update the runtime service curve by taking the minimum of the current 1548 * runtime service curve and the service curve starting at (x, y). 1549 */ 1550 static void 1551 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1552 { 1553 uint64_t y1, y2, dx, dy; 1554 1555 if (isc->sm1 <= isc->sm2) { 1556 /* service curve is convex */ 1557 y1 = rtsc_x2y(rtsc, x); 1558 if (y1 < y) 1559 /* the current rtsc is smaller */ 1560 return; 1561 rtsc->x = x; 1562 rtsc->y = y; 1563 return; 1564 } 1565 1566 /* 1567 * service curve is concave 1568 * compute the two y values of the current rtsc 1569 * y1: at x 1570 * y2: at (x + dx) 1571 */ 1572 y1 = rtsc_x2y(rtsc, x); 1573 if (y1 <= y) { 1574 /* rtsc is below isc, no change to rtsc */ 1575 return; 1576 } 1577 1578 y2 = rtsc_x2y(rtsc, x + isc->dx); 1579 if (y2 >= y + isc->dy) { 1580 /* rtsc is above isc, replace rtsc by isc */ 1581 rtsc->x = x; 1582 rtsc->y = y; 1583 rtsc->dx = isc->dx; 1584 rtsc->dy = isc->dy; 1585 return; 1586 } 1587 1588 /* 1589 * the two curves intersect 1590 * compute the offsets (dx, dy) using the reverse 1591 * function of seg_x2y() 1592 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1593 */ 1594 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1595 /* 1596 * check if (x, y1) belongs to the 1st segment of rtsc. 1597 * if so, add the offset. 1598 */ 1599 if (rtsc->x + rtsc->dx > x) 1600 dx += rtsc->x + rtsc->dx - x; 1601 dy = seg_x2y(dx, isc->sm1); 1602 1603 rtsc->x = x; 1604 rtsc->y = y; 1605 rtsc->dx = dx; 1606 rtsc->dy = dy; 1607 } 1608 1609 static void 1610 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl) 1611 { 1612 sp->class_id = cl->cl_id; 1613 sp->class_handle = cl->cl_handle; 1614 1615 if (cl->cl_rsc != NULL) { 1616 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1617 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1618 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1619 } else { 1620 sp->rsc.m1 = 0; 1621 sp->rsc.d = 0; 1622 sp->rsc.m2 = 0; 1623 } 1624 if (cl->cl_fsc != NULL) { 1625 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1626 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1627 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1628 } else { 1629 sp->fsc.m1 = 0; 1630 sp->fsc.d = 0; 1631 sp->fsc.m2 = 0; 1632 } 1633 if (cl->cl_usc != NULL) { 1634 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1635 sp->usc.d = dx2d(cl->cl_usc->dx); 1636 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1637 } else { 1638 sp->usc.m1 = 0; 1639 sp->usc.d = 0; 1640 sp->usc.m2 = 0; 1641 } 1642 1643 sp->total = cl->cl_total; 1644 sp->cumul = cl->cl_cumul; 1645 1646 sp->d = cl->cl_d; 1647 sp->e = cl->cl_e; 1648 sp->vt = cl->cl_vt; 1649 sp->f = cl->cl_f; 1650 1651 sp->initvt = cl->cl_initvt; 1652 sp->vtperiod = cl->cl_vtperiod; 1653 sp->parentperiod = cl->cl_parentperiod; 1654 sp->nactive = cl->cl_nactive; 1655 sp->vtoff = cl->cl_vtoff; 1656 sp->cvtmax = cl->cl_cvtmax; 1657 sp->myf = cl->cl_myf; 1658 sp->cfmin = cl->cl_cfmin; 1659 sp->cvtmin = cl->cl_cvtmin; 1660 sp->myfadj = cl->cl_myfadj; 1661 sp->vtadj = cl->cl_vtadj; 1662 1663 sp->cur_time = read_machclk(); 1664 sp->machclk_freq = machclk_freq; 1665 1666 sp->qlength = qlen(cl->cl_q); 1667 sp->qlimit = qlimit(cl->cl_q); 1668 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1669 sp->drop_cnt = cl->cl_stats.drop_cnt; 1670 sp->period = cl->cl_stats.period; 1671 1672 sp->qtype = qtype(cl->cl_q); 1673 #ifdef ALTQ_RED 1674 if (q_is_red(cl->cl_q)) 1675 red_getstats(cl->cl_red, &sp->red[0]); 1676 #endif 1677 #ifdef ALTQ_RIO 1678 if (q_is_rio(cl->cl_q)) 1679 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1680 #endif 1681 } 1682 1683 /* convert a class handle to the corresponding class pointer */ 1684 static struct hfsc_class * 1685 clh_to_clp(struct hfsc_if *hif, uint32_t chandle) 1686 { 1687 int i; 1688 struct hfsc_class *cl; 1689 1690 if (chandle == 0) 1691 return (NULL); 1692 /* 1693 * first, try optimistically the slot matching the lower bits of 1694 * the handle. if it fails, do the linear table search. 1695 */ 1696 i = chandle % HFSC_MAX_CLASSES; 1697 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1698 return (cl); 1699 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1700 if ((cl = hif->hif_class_tbl[i]) != NULL && 1701 cl->cl_handle == chandle) 1702 return (cl); 1703 return (NULL); 1704 } 1705 1706 #endif /* ALTQ_HFSC */ 1707