1 /* $KAME: altq_hfsc.c,v 1.25 2004/04/17 10:54:48 kjc Exp $ */ 2 3 /* 4 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 5 * 6 * Permission to use, copy, modify, and distribute this software and 7 * its documentation is hereby granted (including for commercial or 8 * for-profit use), provided that both the copyright notice and this 9 * permission notice appear in all copies of the software, derivative 10 * works, or modified versions, and any portions thereof. 11 * 12 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 13 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 14 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 20 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 21 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 24 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 25 * DAMAGE. 26 * 27 * Carnegie Mellon encourages (but does not require) users of this 28 * software to return any improvements or extensions that they make, 29 * and to grant Carnegie Mellon the rights to redistribute these 30 * changes without encumbrance. 31 */ 32 /* 33 * H-FSC is described in Proceedings of SIGCOMM'97, 34 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 35 * Real-Time and Priority Service" 36 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 37 * 38 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 39 * when a class has an upperlimit, the fit-time is computed from the 40 * upperlimit service curve. the link-sharing scheduler does not schedule 41 * a class whose fit-time exceeds the current time. 42 */ 43 44 #include "opt_altq.h" 45 #include "opt_inet.h" 46 #include "opt_inet6.h" 47 48 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 49 50 #include <sys/param.h> 51 #include <sys/malloc.h> 52 #include <sys/mbuf.h> 53 #include <sys/socket.h> 54 #include <sys/systm.h> 55 #include <sys/errno.h> 56 #include <sys/queue.h> 57 #include <sys/thread.h> 58 59 #include <net/if.h> 60 #include <net/ifq_var.h> 61 #include <netinet/in.h> 62 63 #include <net/pf/pfvar.h> 64 #include <net/altq/altq.h> 65 #include <net/altq/altq_hfsc.h> 66 67 #include <sys/thread2.h> 68 69 #define HFSC_SUBQ_INDEX ALTQ_SUBQ_INDEX_DEFAULT 70 #define HFSC_LOCK(ifq) \ 71 ALTQ_SQ_LOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX]) 72 #define HFSC_UNLOCK(ifq) \ 73 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX]) 74 75 /* 76 * function prototypes 77 */ 78 static int hfsc_clear_interface(struct hfsc_if *); 79 static int hfsc_request(struct ifaltq_subque *, int, void *); 80 static void hfsc_purge(struct hfsc_if *); 81 static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 82 struct service_curve *, 83 struct service_curve *, 84 struct service_curve *, 85 struct hfsc_class *, int, int, int); 86 static int hfsc_class_destroy(struct hfsc_class *); 87 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 88 static int hfsc_enqueue(struct ifaltq_subque *, struct mbuf *, 89 struct altq_pktattr *); 90 static struct mbuf *hfsc_dequeue(struct ifaltq_subque *, int); 91 92 static int hfsc_addq(struct hfsc_class *, struct mbuf *); 93 static struct mbuf *hfsc_getq(struct hfsc_class *); 94 static struct mbuf *hfsc_pollq(struct hfsc_class *); 95 static void hfsc_purgeq(struct hfsc_class *); 96 97 static void update_cfmin(struct hfsc_class *); 98 static void set_active(struct hfsc_class *, int); 99 static void set_passive(struct hfsc_class *); 100 101 static void init_ed(struct hfsc_class *, int); 102 static void update_ed(struct hfsc_class *, int); 103 static void update_d(struct hfsc_class *, int); 104 static void init_vf(struct hfsc_class *, int); 105 static void update_vf(struct hfsc_class *, int, uint64_t); 106 static ellist_t *ellist_alloc(void); 107 static void ellist_destroy(ellist_t *); 108 static void ellist_insert(struct hfsc_class *); 109 static void ellist_remove(struct hfsc_class *); 110 static void ellist_update(struct hfsc_class *); 111 struct hfsc_class *ellist_get_mindl(ellist_t *, uint64_t); 112 static actlist_t *actlist_alloc(void); 113 static void actlist_destroy(actlist_t *); 114 static void actlist_insert(struct hfsc_class *); 115 static void actlist_remove(struct hfsc_class *); 116 static void actlist_update(struct hfsc_class *); 117 118 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, uint64_t); 119 120 static __inline uint64_t seg_x2y(uint64_t, uint64_t); 121 static __inline uint64_t seg_y2x(uint64_t, uint64_t); 122 static __inline uint64_t m2sm(u_int); 123 static __inline uint64_t m2ism(u_int); 124 static __inline uint64_t d2dx(u_int); 125 static u_int sm2m(uint64_t); 126 static u_int dx2d(uint64_t); 127 128 static void sc2isc(struct service_curve *, struct internal_sc *); 129 static void rtsc_init(struct runtime_sc *, struct internal_sc *, 130 uint64_t, uint64_t); 131 static uint64_t rtsc_y2x(struct runtime_sc *, uint64_t); 132 static uint64_t rtsc_x2y(struct runtime_sc *, uint64_t); 133 static void rtsc_min(struct runtime_sc *, struct internal_sc *, 134 uint64_t, uint64_t); 135 136 static void get_class_stats(struct hfsc_classstats *, struct hfsc_class *); 137 static struct hfsc_class *clh_to_clp(struct hfsc_if *, uint32_t); 138 139 /* 140 * macros 141 */ 142 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 143 144 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */ 145 146 int 147 hfsc_pfattach(struct pf_altq *a, struct ifaltq *ifq) 148 { 149 return altq_attach(ifq, ALTQT_HFSC, a->altq_disc, ifq_mapsubq_default, 150 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL); 151 } 152 153 int 154 hfsc_add_altq(struct pf_altq *a) 155 { 156 struct hfsc_if *hif; 157 struct ifnet *ifp; 158 159 if ((ifp = ifunit(a->ifname)) == NULL) 160 return (EINVAL); 161 if (!ifq_is_ready(&ifp->if_snd)) 162 return (ENODEV); 163 164 hif = kmalloc(sizeof(struct hfsc_if), M_ALTQ, M_WAITOK | M_ZERO); 165 166 hif->hif_eligible = ellist_alloc(); 167 hif->hif_ifq = &ifp->if_snd; 168 ifq_purge_all(&ifp->if_snd); 169 170 /* keep the state in pf_altq */ 171 a->altq_disc = hif; 172 173 return (0); 174 } 175 176 int 177 hfsc_remove_altq(struct pf_altq *a) 178 { 179 struct hfsc_if *hif; 180 181 if ((hif = a->altq_disc) == NULL) 182 return (EINVAL); 183 a->altq_disc = NULL; 184 185 hfsc_clear_interface(hif); 186 hfsc_class_destroy(hif->hif_rootclass); 187 188 ellist_destroy(hif->hif_eligible); 189 190 kfree(hif, M_ALTQ); 191 192 return (0); 193 } 194 195 static int 196 hfsc_add_queue_locked(struct pf_altq *a, struct hfsc_if *hif) 197 { 198 struct hfsc_class *cl, *parent; 199 struct hfsc_opts *opts; 200 struct service_curve rtsc, lssc, ulsc; 201 202 KKASSERT(a->qid != 0); 203 204 opts = &a->pq_u.hfsc_opts; 205 206 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL) 207 parent = NULL; 208 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 209 return (EINVAL); 210 211 if (clh_to_clp(hif, a->qid) != NULL) 212 return (EBUSY); 213 214 rtsc.m1 = opts->rtsc_m1; 215 rtsc.d = opts->rtsc_d; 216 rtsc.m2 = opts->rtsc_m2; 217 lssc.m1 = opts->lssc_m1; 218 lssc.d = opts->lssc_d; 219 lssc.m2 = opts->lssc_m2; 220 ulsc.m1 = opts->ulsc_m1; 221 ulsc.d = opts->ulsc_d; 222 ulsc.m2 = opts->ulsc_m2; 223 224 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, parent, a->qlimit, 225 opts->flags, a->qid); 226 if (cl == NULL) 227 return (ENOMEM); 228 229 return (0); 230 } 231 232 int 233 hfsc_add_queue(struct pf_altq *a) 234 { 235 struct hfsc_if *hif; 236 struct ifaltq *ifq; 237 int error; 238 239 if (a->qid == 0) 240 return (EINVAL); 241 242 /* XXX not MP safe */ 243 if ((hif = a->altq_disc) == NULL) 244 return (EINVAL); 245 ifq = hif->hif_ifq; 246 247 HFSC_LOCK(ifq); 248 error = hfsc_add_queue_locked(a, hif); 249 HFSC_UNLOCK(ifq); 250 251 return error; 252 } 253 254 static int 255 hfsc_remove_queue_locked(struct pf_altq *a, struct hfsc_if *hif) 256 { 257 struct hfsc_class *cl; 258 259 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 260 return (EINVAL); 261 262 return (hfsc_class_destroy(cl)); 263 } 264 265 int 266 hfsc_remove_queue(struct pf_altq *a) 267 { 268 struct hfsc_if *hif; 269 struct ifaltq *ifq; 270 int error; 271 272 /* XXX not MP safe */ 273 if ((hif = a->altq_disc) == NULL) 274 return (EINVAL); 275 ifq = hif->hif_ifq; 276 277 HFSC_LOCK(ifq); 278 error = hfsc_remove_queue_locked(a, hif); 279 HFSC_UNLOCK(ifq); 280 281 return error; 282 } 283 284 int 285 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) 286 { 287 struct hfsc_if *hif; 288 struct hfsc_class *cl; 289 struct hfsc_classstats stats; 290 struct ifaltq *ifq; 291 int error = 0; 292 293 if (*nbytes < sizeof(stats)) 294 return (EINVAL); 295 296 /* XXX not MP safe */ 297 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 298 return (EBADF); 299 ifq = hif->hif_ifq; 300 301 HFSC_LOCK(ifq); 302 303 if ((cl = clh_to_clp(hif, a->qid)) == NULL) { 304 HFSC_UNLOCK(ifq); 305 return (EINVAL); 306 } 307 308 get_class_stats(&stats, cl); 309 310 HFSC_UNLOCK(ifq); 311 312 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) 313 return (error); 314 *nbytes = sizeof(stats); 315 return (0); 316 } 317 318 /* 319 * bring the interface back to the initial state by discarding 320 * all the filters and classes except the root class. 321 */ 322 static int 323 hfsc_clear_interface(struct hfsc_if *hif) 324 { 325 struct hfsc_class *cl; 326 327 if (hif->hif_rootclass == NULL) 328 return (0); 329 330 331 /* clear out the classes */ 332 while ((cl = hif->hif_rootclass->cl_children) != NULL) { 333 /* 334 * remove the first leaf class found in the hierarchy 335 * then start over 336 */ 337 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 338 if (!is_a_parent_class(cl)) { 339 hfsc_class_destroy(cl); 340 break; 341 } 342 } 343 } 344 345 return (0); 346 } 347 348 static int 349 hfsc_request(struct ifaltq_subque *ifsq, int req, void *arg) 350 { 351 struct ifaltq *ifq = ifsq->ifsq_altq; 352 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 353 354 crit_enter(); 355 switch (req) { 356 case ALTRQ_PURGE: 357 if (ifsq_get_index(ifsq) == HFSC_SUBQ_INDEX) { 358 hfsc_purge(hif); 359 } else { 360 /* 361 * Race happened, the unrelated subqueue was 362 * picked during the packet scheduler transition. 363 */ 364 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 365 } 366 break; 367 } 368 crit_exit(); 369 return (0); 370 } 371 372 /* discard all the queued packets on the interface */ 373 static void 374 hfsc_purge(struct hfsc_if *hif) 375 { 376 struct hfsc_class *cl; 377 378 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) { 379 if (!qempty(cl->cl_q)) 380 hfsc_purgeq(cl); 381 } 382 if (ifq_is_enabled(hif->hif_ifq)) 383 ALTQ_SQ_CNTR_RESET(&hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX]); 384 } 385 386 struct hfsc_class * 387 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 388 struct service_curve *fsc, struct service_curve *usc, 389 struct hfsc_class *parent, int qlimit, int flags, int qid) 390 { 391 struct hfsc_class *cl, *p; 392 int i; 393 394 if (hif->hif_classes >= HFSC_MAX_CLASSES) 395 return (NULL); 396 397 #ifndef ALTQ_RED 398 if (flags & HFCF_RED) { 399 #ifdef ALTQ_DEBUG 400 kprintf("hfsc_class_create: RED not configured for HFSC!\n"); 401 #endif 402 return (NULL); 403 } 404 #endif 405 406 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO); 407 cl->cl_q = kmalloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO); 408 cl->cl_actc = actlist_alloc(); 409 410 if (qlimit == 0) 411 qlimit = 50; /* use default */ 412 qlimit(cl->cl_q) = qlimit; 413 qtype(cl->cl_q) = Q_DROPTAIL; 414 qlen(cl->cl_q) = 0; 415 cl->cl_flags = flags; 416 #ifdef ALTQ_RED 417 if (flags & (HFCF_RED|HFCF_RIO)) { 418 int red_flags, red_pkttime; 419 u_int m2; 420 421 m2 = 0; 422 if (rsc != NULL && rsc->m2 > m2) 423 m2 = rsc->m2; 424 if (fsc != NULL && fsc->m2 > m2) 425 m2 = fsc->m2; 426 if (usc != NULL && usc->m2 > m2) 427 m2 = usc->m2; 428 429 red_flags = 0; 430 if (flags & HFCF_ECN) 431 red_flags |= REDF_ECN; 432 #ifdef ALTQ_RIO 433 if (flags & HFCF_CLEARDSCP) 434 red_flags |= RIOF_CLEARDSCP; 435 #endif 436 if (m2 < 8) 437 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 438 else 439 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 440 * 1000 * 1000 * 1000 / (m2 / 8); 441 if (flags & HFCF_RED) { 442 cl->cl_red = red_alloc(0, 0, 443 qlimit(cl->cl_q) * 10/100, 444 qlimit(cl->cl_q) * 30/100, 445 red_flags, red_pkttime); 446 if (cl->cl_red != NULL) 447 qtype(cl->cl_q) = Q_RED; 448 } 449 #ifdef ALTQ_RIO 450 else { 451 cl->cl_red = (red_t *)rio_alloc(0, NULL, 452 red_flags, red_pkttime); 453 if (cl->cl_red != NULL) 454 qtype(cl->cl_q) = Q_RIO; 455 } 456 #endif 457 } 458 #endif /* ALTQ_RED */ 459 460 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 461 cl->cl_rsc = kmalloc(sizeof(*cl->cl_rsc), M_ALTQ, M_WAITOK); 462 sc2isc(rsc, cl->cl_rsc); 463 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 464 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 465 } 466 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 467 cl->cl_fsc = kmalloc(sizeof(*cl->cl_fsc), M_ALTQ, M_WAITOK); 468 sc2isc(fsc, cl->cl_fsc); 469 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 470 } 471 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 472 cl->cl_usc = kmalloc(sizeof(*cl->cl_usc), M_ALTQ, M_WAITOK); 473 sc2isc(usc, cl->cl_usc); 474 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 475 } 476 477 cl->cl_id = hif->hif_classid++; 478 cl->cl_handle = qid; 479 cl->cl_hif = hif; 480 cl->cl_parent = parent; 481 482 crit_enter(); 483 hif->hif_classes++; 484 485 /* 486 * find a free slot in the class table. if the slot matching 487 * the lower bits of qid is free, use this slot. otherwise, 488 * use the first free slot. 489 */ 490 i = qid % HFSC_MAX_CLASSES; 491 if (hif->hif_class_tbl[i] == NULL) 492 hif->hif_class_tbl[i] = cl; 493 else { 494 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 495 if (hif->hif_class_tbl[i] == NULL) { 496 hif->hif_class_tbl[i] = cl; 497 break; 498 } 499 } 500 if (i == HFSC_MAX_CLASSES) { 501 crit_exit(); 502 goto err_ret; 503 } 504 } 505 506 if (flags & HFCF_DEFAULTCLASS) 507 hif->hif_defaultclass = cl; 508 509 if (parent == NULL) { 510 /* this is root class */ 511 hif->hif_rootclass = cl; 512 } else if (parent->cl_children == NULL) { 513 /* add this class to the children list of the parent */ 514 parent->cl_children = cl; 515 } else { 516 p = parent->cl_children; 517 while (p->cl_siblings != NULL) 518 p = p->cl_siblings; 519 p->cl_siblings = cl; 520 } 521 crit_exit(); 522 523 return (cl); 524 525 err_ret: 526 if (cl->cl_actc != NULL) 527 actlist_destroy(cl->cl_actc); 528 if (cl->cl_red != NULL) { 529 #ifdef ALTQ_RIO 530 if (q_is_rio(cl->cl_q)) 531 rio_destroy((rio_t *)cl->cl_red); 532 #endif 533 #ifdef ALTQ_RED 534 if (q_is_red(cl->cl_q)) 535 red_destroy(cl->cl_red); 536 #endif 537 } 538 if (cl->cl_fsc != NULL) 539 kfree(cl->cl_fsc, M_ALTQ); 540 if (cl->cl_rsc != NULL) 541 kfree(cl->cl_rsc, M_ALTQ); 542 if (cl->cl_usc != NULL) 543 kfree(cl->cl_usc, M_ALTQ); 544 if (cl->cl_q != NULL) 545 kfree(cl->cl_q, M_ALTQ); 546 kfree(cl, M_ALTQ); 547 return (NULL); 548 } 549 550 static int 551 hfsc_class_destroy(struct hfsc_class *cl) 552 { 553 struct hfsc_if *hif; 554 int i; 555 556 if (cl == NULL) 557 return (0); 558 hif = cl->cl_hif; 559 560 if (is_a_parent_class(cl)) 561 return (EBUSY); 562 563 crit_enter(); 564 565 if (!qempty(cl->cl_q)) 566 hfsc_purgeq(cl); 567 568 if (cl->cl_parent == NULL) { 569 /* this is root class */ 570 } else { 571 struct hfsc_class *p = cl->cl_parent->cl_children; 572 573 if (p == cl) { 574 cl->cl_parent->cl_children = cl->cl_siblings; 575 } else { 576 do { 577 if (p->cl_siblings == cl) { 578 p->cl_siblings = cl->cl_siblings; 579 break; 580 } 581 } while ((p = p->cl_siblings) != NULL); 582 } 583 KKASSERT(p != NULL); 584 } 585 586 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 587 if (hif->hif_class_tbl[i] == cl) { 588 hif->hif_class_tbl[i] = NULL; 589 break; 590 } 591 } 592 593 hif->hif_classes--; 594 crit_exit(); 595 596 actlist_destroy(cl->cl_actc); 597 598 if (cl->cl_red != NULL) { 599 #ifdef ALTQ_RIO 600 if (q_is_rio(cl->cl_q)) 601 rio_destroy((rio_t *)cl->cl_red); 602 #endif 603 #ifdef ALTQ_RED 604 if (q_is_red(cl->cl_q)) 605 red_destroy(cl->cl_red); 606 #endif 607 } 608 609 if (cl == hif->hif_rootclass) 610 hif->hif_rootclass = NULL; 611 if (cl == hif->hif_defaultclass) 612 hif->hif_defaultclass = NULL; 613 if (cl == hif->hif_pollcache) 614 hif->hif_pollcache = NULL; 615 616 if (cl->cl_usc != NULL) 617 kfree(cl->cl_usc, M_ALTQ); 618 if (cl->cl_fsc != NULL) 619 kfree(cl->cl_fsc, M_ALTQ); 620 if (cl->cl_rsc != NULL) 621 kfree(cl->cl_rsc, M_ALTQ); 622 kfree(cl->cl_q, M_ALTQ); 623 kfree(cl, M_ALTQ); 624 625 return (0); 626 } 627 628 /* 629 * hfsc_nextclass returns the next class in the tree. 630 * usage: 631 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 632 * do_something; 633 */ 634 static struct hfsc_class * 635 hfsc_nextclass(struct hfsc_class *cl) 636 { 637 if (cl->cl_children != NULL) { 638 cl = cl->cl_children; 639 } else if (cl->cl_siblings != NULL) { 640 cl = cl->cl_siblings; 641 } else { 642 while ((cl = cl->cl_parent) != NULL) { 643 if (cl->cl_siblings != NULL) { 644 cl = cl->cl_siblings; 645 break; 646 } 647 } 648 } 649 650 return (cl); 651 } 652 653 /* 654 * hfsc_enqueue is an enqueue function to be registered to 655 * (*altq_enqueue) in struct ifaltq. 656 */ 657 static int 658 hfsc_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 659 struct altq_pktattr *pktattr) 660 { 661 struct ifaltq *ifq = ifsq->ifsq_altq; 662 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 663 struct hfsc_class *cl; 664 int len; 665 666 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) { 667 /* 668 * Race happened, the unrelated subqueue was 669 * picked during the packet scheduler transition. 670 */ 671 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 672 m_freem(m); 673 return ENOBUFS; 674 } 675 676 /* grab class set by classifier */ 677 M_ASSERTPKTHDR(m); 678 crit_enter(); 679 if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE) 680 cl = clh_to_clp(hif, m->m_pkthdr.pf.qid); 681 else 682 cl = NULL; 683 if (cl == NULL || is_a_parent_class(cl)) { 684 cl = hif->hif_defaultclass; 685 if (cl == NULL) { 686 m_freem(m); 687 crit_exit(); 688 return (ENOBUFS); 689 } 690 } 691 cl->cl_pktattr = NULL; 692 len = m_pktlen(m); 693 if (hfsc_addq(cl, m) != 0) { 694 /* drop occurred. mbuf was freed in hfsc_addq. */ 695 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 696 crit_exit(); 697 return (ENOBUFS); 698 } 699 ALTQ_SQ_CNTR_INC(ifsq, len); 700 cl->cl_hif->hif_packets++; 701 702 /* successfully queued. */ 703 if (qlen(cl->cl_q) == 1) 704 set_active(cl, m_pktlen(m)); 705 crit_exit(); 706 return (0); 707 } 708 709 /* 710 * hfsc_dequeue is a dequeue function to be registered to 711 * (*altq_dequeue) in struct ifaltq. 712 * 713 * note: ALTDQ_POLL returns the next packet without removing the packet 714 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 715 */ 716 static struct mbuf * 717 hfsc_dequeue(struct ifaltq_subque *ifsq, int op) 718 { 719 struct ifaltq *ifq = ifsq->ifsq_altq; 720 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 721 struct hfsc_class *cl; 722 struct mbuf *m; 723 int len, next_len; 724 int realtime = 0; 725 uint64_t cur_time; 726 727 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) { 728 /* 729 * Race happened, the unrelated subqueue was 730 * picked during the packet scheduler transition. 731 */ 732 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 733 return NULL; 734 } 735 736 if (hif->hif_packets == 0) { 737 /* no packet in the tree */ 738 return (NULL); 739 } 740 741 crit_enter(); 742 cur_time = read_machclk(); 743 744 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 745 cl = hif->hif_pollcache; 746 hif->hif_pollcache = NULL; 747 /* check if the class was scheduled by real-time criteria */ 748 if (cl->cl_rsc != NULL) 749 realtime = (cl->cl_e <= cur_time); 750 } else { 751 /* 752 * if there are eligible classes, use real-time criteria. 753 * find the class with the minimum deadline among 754 * the eligible classes. 755 */ 756 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time)) != NULL) { 757 realtime = 1; 758 } else { 759 #ifdef ALTQ_DEBUG 760 int fits = 0; 761 #endif 762 /* 763 * use link-sharing criteria 764 * get the class with the minimum vt in the hierarchy 765 */ 766 cl = hif->hif_rootclass; 767 while (is_a_parent_class(cl)) { 768 769 cl = actlist_firstfit(cl, cur_time); 770 if (cl == NULL) { 771 #ifdef ALTQ_DEBUG 772 if (fits > 0) 773 kprintf("%d fit but none found\n",fits); 774 #endif 775 m = NULL; 776 goto done; 777 } 778 /* 779 * update parent's cl_cvtmin. 780 * don't update if the new vt is smaller. 781 */ 782 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 783 cl->cl_parent->cl_cvtmin = cl->cl_vt; 784 #ifdef ALTQ_DEBUG 785 fits++; 786 #endif 787 } 788 } 789 790 if (op == ALTDQ_POLL) { 791 #ifdef foo 792 /* 793 * Don't use poll cache; the poll/dequeue 794 * model is no longer applicable to SMP 795 * system. e.g. 796 * CPU-A CPU-B 797 * : : 798 * poll : 799 * : poll 800 * dequeue (+) : 801 * 802 * The dequeue at (+) will hit the poll 803 * cache set by CPU-B. 804 */ 805 hif->hif_pollcache = cl; 806 #endif 807 m = hfsc_pollq(cl); 808 goto done; 809 } 810 } 811 812 m = hfsc_getq(cl); 813 if (m == NULL) 814 panic("hfsc_dequeue:"); 815 len = m_pktlen(m); 816 cl->cl_hif->hif_packets--; 817 ALTQ_SQ_CNTR_DEC(ifsq, len); 818 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 819 820 update_vf(cl, len, cur_time); 821 if (realtime) 822 cl->cl_cumul += len; 823 824 if (!qempty(cl->cl_q)) { 825 if (cl->cl_rsc != NULL) { 826 /* update ed */ 827 next_len = m_pktlen(qhead(cl->cl_q)); 828 829 if (realtime) 830 update_ed(cl, next_len); 831 else 832 update_d(cl, next_len); 833 } 834 } else { 835 /* the class becomes passive */ 836 set_passive(cl); 837 } 838 done: 839 crit_exit(); 840 return (m); 841 } 842 843 static int 844 hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 845 { 846 847 #ifdef ALTQ_RIO 848 if (q_is_rio(cl->cl_q)) 849 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 850 m, cl->cl_pktattr); 851 #endif 852 #ifdef ALTQ_RED 853 if (q_is_red(cl->cl_q)) 854 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 855 #endif 856 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 857 m_freem(m); 858 return (-1); 859 } 860 861 if (cl->cl_flags & HFCF_CLEARDSCP) 862 write_dsfield(m, cl->cl_pktattr, 0); 863 864 _addq(cl->cl_q, m); 865 866 return (0); 867 } 868 869 static struct mbuf * 870 hfsc_getq(struct hfsc_class *cl) 871 { 872 #ifdef ALTQ_RIO 873 if (q_is_rio(cl->cl_q)) 874 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 875 #endif 876 #ifdef ALTQ_RED 877 if (q_is_red(cl->cl_q)) 878 return red_getq(cl->cl_red, cl->cl_q); 879 #endif 880 return _getq(cl->cl_q); 881 } 882 883 static struct mbuf * 884 hfsc_pollq(struct hfsc_class *cl) 885 { 886 return qhead(cl->cl_q); 887 } 888 889 static void 890 hfsc_purgeq(struct hfsc_class *cl) 891 { 892 struct mbuf *m; 893 894 if (qempty(cl->cl_q)) 895 return; 896 897 while ((m = _getq(cl->cl_q)) != NULL) { 898 ALTQ_SQ_CNTR_DEC( 899 &cl->cl_hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX], 900 m_pktlen(m)); 901 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 902 m_freem(m); 903 cl->cl_hif->hif_packets--; 904 } 905 KKASSERT(qlen(cl->cl_q) == 0); 906 907 update_vf(cl, 0, 0); /* remove cl from the actlist */ 908 set_passive(cl); 909 } 910 911 static void 912 set_active(struct hfsc_class *cl, int len) 913 { 914 if (cl->cl_rsc != NULL) 915 init_ed(cl, len); 916 if (cl->cl_fsc != NULL) 917 init_vf(cl, len); 918 919 cl->cl_stats.period++; 920 } 921 922 static void 923 set_passive(struct hfsc_class *cl) 924 { 925 if (cl->cl_rsc != NULL) 926 ellist_remove(cl); 927 928 /* 929 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 930 * needs to be called explicitly to remove a class from actlist 931 */ 932 } 933 934 static void 935 init_ed(struct hfsc_class *cl, int next_len) 936 { 937 uint64_t cur_time; 938 939 cur_time = read_machclk(); 940 941 /* update the deadline curve */ 942 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 943 944 /* 945 * update the eligible curve. 946 * for concave, it is equal to the deadline curve. 947 * for convex, it is a linear curve with slope m2. 948 */ 949 cl->cl_eligible = cl->cl_deadline; 950 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 951 cl->cl_eligible.dx = 0; 952 cl->cl_eligible.dy = 0; 953 } 954 955 /* compute e and d */ 956 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 957 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 958 959 ellist_insert(cl); 960 } 961 962 static void 963 update_ed(struct hfsc_class *cl, int next_len) 964 { 965 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 966 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 967 968 ellist_update(cl); 969 } 970 971 static void 972 update_d(struct hfsc_class *cl, int next_len) 973 { 974 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 975 } 976 977 static void 978 init_vf(struct hfsc_class *cl, int len) 979 { 980 struct hfsc_class *max_cl, *p; 981 uint64_t vt, f, cur_time; 982 int go_active; 983 984 cur_time = 0; 985 go_active = 1; 986 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 987 if (go_active && cl->cl_nactive++ == 0) 988 go_active = 1; 989 else 990 go_active = 0; 991 992 if (go_active) { 993 max_cl = actlist_last(cl->cl_parent->cl_actc); 994 if (max_cl != NULL) { 995 /* 996 * set vt to the average of the min and max 997 * classes. if the parent's period didn't 998 * change, don't decrease vt of the class. 999 */ 1000 vt = max_cl->cl_vt; 1001 if (cl->cl_parent->cl_cvtmin != 0) 1002 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 1003 1004 if (cl->cl_parent->cl_vtperiod != 1005 cl->cl_parentperiod || vt > cl->cl_vt) 1006 cl->cl_vt = vt; 1007 } else { 1008 /* 1009 * first child for a new parent backlog period. 1010 * add parent's cvtmax to vtoff of children 1011 * to make a new vt (vtoff + vt) larger than 1012 * the vt in the last period for all children. 1013 */ 1014 vt = cl->cl_parent->cl_cvtmax; 1015 for (p = cl->cl_parent->cl_children; p != NULL; 1016 p = p->cl_siblings) 1017 p->cl_vtoff += vt; 1018 cl->cl_vt = 0; 1019 cl->cl_parent->cl_cvtmax = 0; 1020 cl->cl_parent->cl_cvtmin = 0; 1021 } 1022 cl->cl_initvt = cl->cl_vt; 1023 1024 /* update the virtual curve */ 1025 vt = cl->cl_vt + cl->cl_vtoff; 1026 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 1027 if (cl->cl_virtual.x == vt) { 1028 cl->cl_virtual.x -= cl->cl_vtoff; 1029 cl->cl_vtoff = 0; 1030 } 1031 cl->cl_vtadj = 0; 1032 1033 cl->cl_vtperiod++; /* increment vt period */ 1034 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 1035 if (cl->cl_parent->cl_nactive == 0) 1036 cl->cl_parentperiod++; 1037 cl->cl_f = 0; 1038 1039 actlist_insert(cl); 1040 1041 if (cl->cl_usc != NULL) { 1042 /* class has upper limit curve */ 1043 if (cur_time == 0) 1044 cur_time = read_machclk(); 1045 1046 /* update the ulimit curve */ 1047 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 1048 cl->cl_total); 1049 /* compute myf */ 1050 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 1051 cl->cl_total); 1052 cl->cl_myfadj = 0; 1053 } 1054 } 1055 1056 if (cl->cl_myf > cl->cl_cfmin) 1057 f = cl->cl_myf; 1058 else 1059 f = cl->cl_cfmin; 1060 if (f != cl->cl_f) { 1061 cl->cl_f = f; 1062 update_cfmin(cl->cl_parent); 1063 } 1064 } 1065 } 1066 1067 static void 1068 update_vf(struct hfsc_class *cl, int len, uint64_t cur_time) 1069 { 1070 uint64_t f, myf_bound, delta; 1071 int go_passive; 1072 1073 go_passive = qempty(cl->cl_q); 1074 1075 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 1076 cl->cl_total += len; 1077 1078 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 1079 continue; 1080 1081 if (go_passive && --cl->cl_nactive == 0) 1082 go_passive = 1; 1083 else 1084 go_passive = 0; 1085 1086 if (go_passive) { 1087 /* no more active child, going passive */ 1088 1089 /* update cvtmax of the parent class */ 1090 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1091 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1092 1093 /* remove this class from the vt list */ 1094 actlist_remove(cl); 1095 1096 update_cfmin(cl->cl_parent); 1097 1098 continue; 1099 } 1100 1101 /* 1102 * update vt and f 1103 */ 1104 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1105 - cl->cl_vtoff + cl->cl_vtadj; 1106 1107 /* 1108 * if vt of the class is smaller than cvtmin, 1109 * the class was skipped in the past due to non-fit. 1110 * if so, we need to adjust vtadj. 1111 */ 1112 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1113 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1114 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1115 } 1116 1117 /* update the vt list */ 1118 actlist_update(cl); 1119 1120 if (cl->cl_usc != NULL) { 1121 cl->cl_myf = cl->cl_myfadj 1122 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1123 1124 /* 1125 * if myf lags behind by more than one clock tick 1126 * from the current time, adjust myfadj to prevent 1127 * a rate-limited class from going greedy. 1128 * in a steady state under rate-limiting, myf 1129 * fluctuates within one clock tick. 1130 */ 1131 myf_bound = cur_time - machclk_per_tick; 1132 if (cl->cl_myf < myf_bound) { 1133 delta = cur_time - cl->cl_myf; 1134 cl->cl_myfadj += delta; 1135 cl->cl_myf += delta; 1136 } 1137 } 1138 1139 /* cl_f is max(cl_myf, cl_cfmin) */ 1140 if (cl->cl_myf > cl->cl_cfmin) 1141 f = cl->cl_myf; 1142 else 1143 f = cl->cl_cfmin; 1144 if (f != cl->cl_f) { 1145 cl->cl_f = f; 1146 update_cfmin(cl->cl_parent); 1147 } 1148 } 1149 } 1150 1151 static void 1152 update_cfmin(struct hfsc_class *cl) 1153 { 1154 struct hfsc_class *p; 1155 uint64_t cfmin; 1156 1157 if (TAILQ_EMPTY(cl->cl_actc)) { 1158 cl->cl_cfmin = 0; 1159 return; 1160 } 1161 cfmin = HT_INFINITY; 1162 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1163 if (p->cl_f == 0) { 1164 cl->cl_cfmin = 0; 1165 return; 1166 } 1167 if (p->cl_f < cfmin) 1168 cfmin = p->cl_f; 1169 } 1170 cl->cl_cfmin = cfmin; 1171 } 1172 1173 /* 1174 * TAILQ based ellist and actlist implementation 1175 * (ion wanted to make a calendar queue based implementation) 1176 */ 1177 /* 1178 * eligible list holds backlogged classes being sorted by their eligible times. 1179 * there is one eligible list per interface. 1180 */ 1181 1182 static ellist_t * 1183 ellist_alloc(void) 1184 { 1185 ellist_t *head; 1186 1187 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK); 1188 TAILQ_INIT(head); 1189 return (head); 1190 } 1191 1192 static void 1193 ellist_destroy(ellist_t *head) 1194 { 1195 kfree(head, M_ALTQ); 1196 } 1197 1198 static void 1199 ellist_insert(struct hfsc_class *cl) 1200 { 1201 struct hfsc_if *hif = cl->cl_hif; 1202 struct hfsc_class *p; 1203 1204 /* check the last entry first */ 1205 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL || 1206 p->cl_e <= cl->cl_e) { 1207 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1208 return; 1209 } 1210 1211 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) { 1212 if (cl->cl_e < p->cl_e) { 1213 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1214 return; 1215 } 1216 } 1217 KKASSERT(0); /* should not reach here */ 1218 } 1219 1220 static void 1221 ellist_remove(struct hfsc_class *cl) 1222 { 1223 struct hfsc_if *hif = cl->cl_hif; 1224 1225 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1226 } 1227 1228 static void 1229 ellist_update(struct hfsc_class *cl) 1230 { 1231 struct hfsc_if *hif = cl->cl_hif; 1232 struct hfsc_class *p, *last; 1233 1234 /* 1235 * the eligible time of a class increases monotonically. 1236 * if the next entry has a larger eligible time, nothing to do. 1237 */ 1238 p = TAILQ_NEXT(cl, cl_ellist); 1239 if (p == NULL || cl->cl_e <= p->cl_e) 1240 return; 1241 1242 /* check the last entry */ 1243 last = TAILQ_LAST(hif->hif_eligible, _eligible); 1244 KKASSERT(last != NULL); 1245 if (last->cl_e <= cl->cl_e) { 1246 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1247 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1248 return; 1249 } 1250 1251 /* 1252 * the new position must be between the next entry 1253 * and the last entry 1254 */ 1255 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1256 if (cl->cl_e < p->cl_e) { 1257 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1258 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1259 return; 1260 } 1261 } 1262 KKASSERT(0); /* should not reach here */ 1263 } 1264 1265 /* find the class with the minimum deadline among the eligible classes */ 1266 struct hfsc_class * 1267 ellist_get_mindl(ellist_t *head, uint64_t cur_time) 1268 { 1269 struct hfsc_class *p, *cl = NULL; 1270 1271 TAILQ_FOREACH(p, head, cl_ellist) { 1272 if (p->cl_e > cur_time) 1273 break; 1274 if (cl == NULL || p->cl_d < cl->cl_d) 1275 cl = p; 1276 } 1277 return (cl); 1278 } 1279 1280 /* 1281 * active children list holds backlogged child classes being sorted 1282 * by their virtual time. 1283 * each intermediate class has one active children list. 1284 */ 1285 static actlist_t * 1286 actlist_alloc(void) 1287 { 1288 actlist_t *head; 1289 1290 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK); 1291 TAILQ_INIT(head); 1292 return (head); 1293 } 1294 1295 static void 1296 actlist_destroy(actlist_t *head) 1297 { 1298 kfree(head, M_ALTQ); 1299 } 1300 static void 1301 actlist_insert(struct hfsc_class *cl) 1302 { 1303 struct hfsc_class *p; 1304 1305 /* check the last entry first */ 1306 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL 1307 || p->cl_vt <= cl->cl_vt) { 1308 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1309 return; 1310 } 1311 1312 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) { 1313 if (cl->cl_vt < p->cl_vt) { 1314 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1315 return; 1316 } 1317 } 1318 KKASSERT(0); /* should not reach here */ 1319 } 1320 1321 static void 1322 actlist_remove(struct hfsc_class *cl) 1323 { 1324 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1325 } 1326 1327 static void 1328 actlist_update(struct hfsc_class *cl) 1329 { 1330 struct hfsc_class *p, *last; 1331 1332 /* 1333 * the virtual time of a class increases monotonically during its 1334 * backlogged period. 1335 * if the next entry has a larger virtual time, nothing to do. 1336 */ 1337 p = TAILQ_NEXT(cl, cl_actlist); 1338 if (p == NULL || cl->cl_vt < p->cl_vt) 1339 return; 1340 1341 /* check the last entry */ 1342 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active); 1343 KKASSERT(last != NULL); 1344 if (last->cl_vt <= cl->cl_vt) { 1345 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1346 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1347 return; 1348 } 1349 1350 /* 1351 * the new position must be between the next entry 1352 * and the last entry 1353 */ 1354 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1355 if (cl->cl_vt < p->cl_vt) { 1356 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1357 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1358 return; 1359 } 1360 } 1361 KKASSERT(0); /* should not reach here */ 1362 } 1363 1364 static struct hfsc_class * 1365 actlist_firstfit(struct hfsc_class *cl, uint64_t cur_time) 1366 { 1367 struct hfsc_class *p; 1368 1369 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1370 if (p->cl_f <= cur_time) 1371 return (p); 1372 } 1373 return (NULL); 1374 } 1375 1376 /* 1377 * service curve support functions 1378 * 1379 * external service curve parameters 1380 * m: bits/sec 1381 * d: msec 1382 * internal service curve parameters 1383 * sm: (bytes/tsc_interval) << SM_SHIFT 1384 * ism: (tsc_count/byte) << ISM_SHIFT 1385 * dx: tsc_count 1386 * 1387 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. 1388 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU 1389 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective 1390 * digits in decimal using the following table. 1391 * 1392 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 1393 * ----------+------------------------------------------------------- 1394 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6 1395 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6 1396 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6 1397 * 1398 * nsec/byte 80000 8000 800 80 8 1399 * ism(500MHz) 40000 4000 400 40 4 1400 * ism(200MHz) 16000 1600 160 16 1.6 1401 */ 1402 #define SM_SHIFT 24 1403 #define ISM_SHIFT 10 1404 1405 #define SM_MASK ((1LL << SM_SHIFT) - 1) 1406 #define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1407 1408 static __inline uint64_t 1409 seg_x2y(uint64_t x, uint64_t sm) 1410 { 1411 uint64_t y; 1412 1413 /* 1414 * compute 1415 * y = x * sm >> SM_SHIFT 1416 * but divide it for the upper and lower bits to avoid overflow 1417 */ 1418 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1419 return (y); 1420 } 1421 1422 static __inline uint64_t 1423 seg_y2x(uint64_t y, uint64_t ism) 1424 { 1425 uint64_t x; 1426 1427 if (y == 0) 1428 x = 0; 1429 else if (ism == HT_INFINITY) 1430 x = HT_INFINITY; 1431 else 1432 x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1433 1434 return (x); 1435 } 1436 1437 static __inline uint64_t 1438 m2sm(u_int m) 1439 { 1440 uint64_t sm; 1441 1442 sm = ((uint64_t)m << SM_SHIFT) / 8 / machclk_freq; 1443 return (sm); 1444 } 1445 1446 static __inline uint64_t 1447 m2ism(u_int m) 1448 { 1449 uint64_t ism; 1450 1451 if (m == 0) 1452 ism = HT_INFINITY; 1453 else 1454 ism = ((uint64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1455 return (ism); 1456 } 1457 1458 static __inline uint64_t 1459 d2dx(u_int d) 1460 { 1461 uint64_t dx; 1462 1463 dx = ((uint64_t)d * machclk_freq) / 1000; 1464 return (dx); 1465 } 1466 1467 static u_int 1468 sm2m(uint64_t sm) 1469 { 1470 uint64_t m; 1471 1472 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1473 return ((u_int)m); 1474 } 1475 1476 static u_int 1477 dx2d(uint64_t dx) 1478 { 1479 uint64_t d; 1480 1481 d = dx * 1000 / machclk_freq; 1482 return ((u_int)d); 1483 } 1484 1485 static void 1486 sc2isc(struct service_curve *sc, struct internal_sc *isc) 1487 { 1488 isc->sm1 = m2sm(sc->m1); 1489 isc->ism1 = m2ism(sc->m1); 1490 isc->dx = d2dx(sc->d); 1491 isc->dy = seg_x2y(isc->dx, isc->sm1); 1492 isc->sm2 = m2sm(sc->m2); 1493 isc->ism2 = m2ism(sc->m2); 1494 } 1495 1496 /* 1497 * initialize the runtime service curve with the given internal 1498 * service curve starting at (x, y). 1499 */ 1500 static void 1501 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1502 { 1503 rtsc->x = x; 1504 rtsc->y = y; 1505 rtsc->sm1 = isc->sm1; 1506 rtsc->ism1 = isc->ism1; 1507 rtsc->dx = isc->dx; 1508 rtsc->dy = isc->dy; 1509 rtsc->sm2 = isc->sm2; 1510 rtsc->ism2 = isc->ism2; 1511 } 1512 1513 /* 1514 * calculate the y-projection of the runtime service curve by the 1515 * given x-projection value 1516 */ 1517 static uint64_t 1518 rtsc_y2x(struct runtime_sc *rtsc, uint64_t y) 1519 { 1520 uint64_t x; 1521 1522 if (y < rtsc->y) { 1523 x = rtsc->x; 1524 } else if (y <= rtsc->y + rtsc->dy) { 1525 /* x belongs to the 1st segment */ 1526 if (rtsc->dy == 0) 1527 x = rtsc->x + rtsc->dx; 1528 else 1529 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1530 } else { 1531 /* x belongs to the 2nd segment */ 1532 x = rtsc->x + rtsc->dx 1533 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1534 } 1535 return (x); 1536 } 1537 1538 static uint64_t 1539 rtsc_x2y(struct runtime_sc *rtsc, uint64_t x) 1540 { 1541 uint64_t y; 1542 1543 if (x <= rtsc->x) { 1544 y = rtsc->y; 1545 } else if (x <= rtsc->x + rtsc->dx) { 1546 /* y belongs to the 1st segment */ 1547 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1548 } else 1549 /* y belongs to the 2nd segment */ 1550 y = rtsc->y + rtsc->dy 1551 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1552 return (y); 1553 } 1554 1555 /* 1556 * update the runtime service curve by taking the minimum of the current 1557 * runtime service curve and the service curve starting at (x, y). 1558 */ 1559 static void 1560 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1561 { 1562 uint64_t y1, y2, dx, dy; 1563 1564 if (isc->sm1 <= isc->sm2) { 1565 /* service curve is convex */ 1566 y1 = rtsc_x2y(rtsc, x); 1567 if (y1 < y) 1568 /* the current rtsc is smaller */ 1569 return; 1570 rtsc->x = x; 1571 rtsc->y = y; 1572 return; 1573 } 1574 1575 /* 1576 * service curve is concave 1577 * compute the two y values of the current rtsc 1578 * y1: at x 1579 * y2: at (x + dx) 1580 */ 1581 y1 = rtsc_x2y(rtsc, x); 1582 if (y1 <= y) { 1583 /* rtsc is below isc, no change to rtsc */ 1584 return; 1585 } 1586 1587 y2 = rtsc_x2y(rtsc, x + isc->dx); 1588 if (y2 >= y + isc->dy) { 1589 /* rtsc is above isc, replace rtsc by isc */ 1590 rtsc->x = x; 1591 rtsc->y = y; 1592 rtsc->dx = isc->dx; 1593 rtsc->dy = isc->dy; 1594 return; 1595 } 1596 1597 /* 1598 * the two curves intersect 1599 * compute the offsets (dx, dy) using the reverse 1600 * function of seg_x2y() 1601 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1602 */ 1603 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1604 /* 1605 * check if (x, y1) belongs to the 1st segment of rtsc. 1606 * if so, add the offset. 1607 */ 1608 if (rtsc->x + rtsc->dx > x) 1609 dx += rtsc->x + rtsc->dx - x; 1610 dy = seg_x2y(dx, isc->sm1); 1611 1612 rtsc->x = x; 1613 rtsc->y = y; 1614 rtsc->dx = dx; 1615 rtsc->dy = dy; 1616 } 1617 1618 static void 1619 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl) 1620 { 1621 sp->class_id = cl->cl_id; 1622 sp->class_handle = cl->cl_handle; 1623 1624 if (cl->cl_rsc != NULL) { 1625 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1626 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1627 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1628 } else { 1629 sp->rsc.m1 = 0; 1630 sp->rsc.d = 0; 1631 sp->rsc.m2 = 0; 1632 } 1633 if (cl->cl_fsc != NULL) { 1634 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1635 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1636 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1637 } else { 1638 sp->fsc.m1 = 0; 1639 sp->fsc.d = 0; 1640 sp->fsc.m2 = 0; 1641 } 1642 if (cl->cl_usc != NULL) { 1643 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1644 sp->usc.d = dx2d(cl->cl_usc->dx); 1645 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1646 } else { 1647 sp->usc.m1 = 0; 1648 sp->usc.d = 0; 1649 sp->usc.m2 = 0; 1650 } 1651 1652 sp->total = cl->cl_total; 1653 sp->cumul = cl->cl_cumul; 1654 1655 sp->d = cl->cl_d; 1656 sp->e = cl->cl_e; 1657 sp->vt = cl->cl_vt; 1658 sp->f = cl->cl_f; 1659 1660 sp->initvt = cl->cl_initvt; 1661 sp->vtperiod = cl->cl_vtperiod; 1662 sp->parentperiod = cl->cl_parentperiod; 1663 sp->nactive = cl->cl_nactive; 1664 sp->vtoff = cl->cl_vtoff; 1665 sp->cvtmax = cl->cl_cvtmax; 1666 sp->myf = cl->cl_myf; 1667 sp->cfmin = cl->cl_cfmin; 1668 sp->cvtmin = cl->cl_cvtmin; 1669 sp->myfadj = cl->cl_myfadj; 1670 sp->vtadj = cl->cl_vtadj; 1671 1672 sp->cur_time = read_machclk(); 1673 sp->machclk_freq = machclk_freq; 1674 1675 sp->qlength = qlen(cl->cl_q); 1676 sp->qlimit = qlimit(cl->cl_q); 1677 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1678 sp->drop_cnt = cl->cl_stats.drop_cnt; 1679 sp->period = cl->cl_stats.period; 1680 1681 sp->qtype = qtype(cl->cl_q); 1682 #ifdef ALTQ_RED 1683 if (q_is_red(cl->cl_q)) 1684 red_getstats(cl->cl_red, &sp->red[0]); 1685 #endif 1686 #ifdef ALTQ_RIO 1687 if (q_is_rio(cl->cl_q)) 1688 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1689 #endif 1690 } 1691 1692 /* convert a class handle to the corresponding class pointer */ 1693 static struct hfsc_class * 1694 clh_to_clp(struct hfsc_if *hif, uint32_t chandle) 1695 { 1696 int i; 1697 struct hfsc_class *cl; 1698 1699 if (chandle == 0) 1700 return (NULL); 1701 /* 1702 * first, try optimistically the slot matching the lower bits of 1703 * the handle. if it fails, do the linear table search. 1704 */ 1705 i = chandle % HFSC_MAX_CLASSES; 1706 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1707 return (cl); 1708 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1709 if ((cl = hif->hif_class_tbl[i]) != NULL && 1710 cl->cl_handle == chandle) 1711 return (cl); 1712 return (NULL); 1713 } 1714 1715 #endif /* ALTQ_HFSC */ 1716