1 /* $KAME: altq_hfsc.c,v 1.25 2004/04/17 10:54:48 kjc Exp $ */ 2 3 /* 4 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 5 * 6 * Permission to use, copy, modify, and distribute this software and 7 * its documentation is hereby granted (including for commercial or 8 * for-profit use), provided that both the copyright notice and this 9 * permission notice appear in all copies of the software, derivative 10 * works, or modified versions, and any portions thereof. 11 * 12 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 13 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 14 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 20 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 21 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 24 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 25 * DAMAGE. 26 * 27 * Carnegie Mellon encourages (but does not require) users of this 28 * software to return any improvements or extensions that they make, 29 * and to grant Carnegie Mellon the rights to redistribute these 30 * changes without encumbrance. 31 */ 32 /* 33 * H-FSC is described in Proceedings of SIGCOMM'97, 34 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 35 * Real-Time and Priority Service" 36 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 37 * 38 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 39 * when a class has an upperlimit, the fit-time is computed from the 40 * upperlimit service curve. the link-sharing scheduler does not schedule 41 * a class whose fit-time exceeds the current time. 42 */ 43 44 #include "opt_altq.h" 45 #include "opt_inet.h" 46 #include "opt_inet6.h" 47 48 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 49 50 #include <sys/param.h> 51 #include <sys/malloc.h> 52 #include <sys/mbuf.h> 53 #include <sys/socket.h> 54 #include <sys/systm.h> 55 #include <sys/errno.h> 56 #include <sys/queue.h> 57 #include <sys/thread.h> 58 59 #include <net/if.h> 60 #include <net/ifq_var.h> 61 #include <netinet/in.h> 62 63 #include <net/pf/pfvar.h> 64 #include <net/altq/altq.h> 65 #include <net/altq/altq_hfsc.h> 66 67 #include <sys/thread2.h> 68 69 #define HFSC_SUBQ_INDEX ALTQ_SUBQ_INDEX_DEFAULT 70 #define HFSC_LOCK(ifq) \ 71 ALTQ_SQ_LOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX]) 72 #define HFSC_UNLOCK(ifq) \ 73 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX]) 74 75 /* 76 * function prototypes 77 */ 78 static int hfsc_clear_interface(struct hfsc_if *); 79 static int hfsc_request(struct ifaltq_subque *, int, void *); 80 static void hfsc_purge(struct hfsc_if *); 81 static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 82 struct service_curve *, 83 struct service_curve *, 84 struct service_curve *, 85 struct hfsc_class *, int, int, int); 86 static int hfsc_class_destroy(struct hfsc_class *); 87 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 88 static int hfsc_enqueue(struct ifaltq_subque *, struct mbuf *, 89 struct altq_pktattr *); 90 static struct mbuf *hfsc_dequeue(struct ifaltq_subque *, int); 91 92 static int hfsc_addq(struct hfsc_class *, struct mbuf *); 93 static struct mbuf *hfsc_getq(struct hfsc_class *); 94 static struct mbuf *hfsc_pollq(struct hfsc_class *); 95 static void hfsc_purgeq(struct hfsc_class *); 96 97 static void update_cfmin(struct hfsc_class *); 98 static void set_active(struct hfsc_class *, int); 99 static void set_passive(struct hfsc_class *); 100 101 static void init_ed(struct hfsc_class *, int); 102 static void update_ed(struct hfsc_class *, int); 103 static void update_d(struct hfsc_class *, int); 104 static void init_vf(struct hfsc_class *, int); 105 static void update_vf(struct hfsc_class *, int, uint64_t); 106 static ellist_t *ellist_alloc(void); 107 static void ellist_destroy(ellist_t *); 108 static void ellist_insert(struct hfsc_class *); 109 static void ellist_remove(struct hfsc_class *); 110 static void ellist_update(struct hfsc_class *); 111 struct hfsc_class *ellist_get_mindl(ellist_t *, uint64_t); 112 static actlist_t *actlist_alloc(void); 113 static void actlist_destroy(actlist_t *); 114 static void actlist_insert(struct hfsc_class *); 115 static void actlist_remove(struct hfsc_class *); 116 static void actlist_update(struct hfsc_class *); 117 118 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, uint64_t); 119 120 static __inline uint64_t seg_x2y(uint64_t, uint64_t); 121 static __inline uint64_t seg_y2x(uint64_t, uint64_t); 122 static __inline uint64_t m2sm(u_int); 123 static __inline uint64_t m2ism(u_int); 124 static __inline uint64_t d2dx(u_int); 125 static u_int sm2m(uint64_t); 126 static u_int dx2d(uint64_t); 127 128 static void sc2isc(struct service_curve *, struct internal_sc *); 129 static void rtsc_init(struct runtime_sc *, struct internal_sc *, 130 uint64_t, uint64_t); 131 static uint64_t rtsc_y2x(struct runtime_sc *, uint64_t); 132 static uint64_t rtsc_x2y(struct runtime_sc *, uint64_t); 133 static void rtsc_min(struct runtime_sc *, struct internal_sc *, 134 uint64_t, uint64_t); 135 136 static void get_class_stats(struct hfsc_classstats *, struct hfsc_class *); 137 static struct hfsc_class *clh_to_clp(struct hfsc_if *, uint32_t); 138 139 /* 140 * macros 141 */ 142 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 143 144 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */ 145 146 int 147 hfsc_pfattach(struct pf_altq *a, struct ifaltq *ifq) 148 { 149 return altq_attach(ifq, ALTQT_HFSC, a->altq_disc, ifq_mapsubq_default, 150 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL); 151 } 152 153 int 154 hfsc_add_altq(struct pf_altq *a) 155 { 156 struct hfsc_if *hif; 157 struct ifnet *ifp; 158 159 if ((ifp = ifunit(a->ifname)) == NULL) 160 return (EINVAL); 161 if (!ifq_is_ready(&ifp->if_snd)) 162 return (ENODEV); 163 164 hif = kmalloc(sizeof(struct hfsc_if), M_ALTQ, M_WAITOK | M_ZERO); 165 166 hif->hif_eligible = ellist_alloc(); 167 hif->hif_ifq = &ifp->if_snd; 168 ifq_purge_all(&ifp->if_snd); 169 170 /* keep the state in pf_altq */ 171 a->altq_disc = hif; 172 173 return (0); 174 } 175 176 int 177 hfsc_remove_altq(struct pf_altq *a) 178 { 179 struct hfsc_if *hif; 180 181 if ((hif = a->altq_disc) == NULL) 182 return (EINVAL); 183 a->altq_disc = NULL; 184 185 hfsc_clear_interface(hif); 186 hfsc_class_destroy(hif->hif_rootclass); 187 188 ellist_destroy(hif->hif_eligible); 189 190 kfree(hif, M_ALTQ); 191 192 return (0); 193 } 194 195 static int 196 hfsc_add_queue_locked(struct pf_altq *a, struct hfsc_if *hif) 197 { 198 struct hfsc_class *cl, *parent; 199 struct hfsc_opts *opts; 200 struct service_curve rtsc, lssc, ulsc; 201 202 KKASSERT(a->qid != 0); 203 204 opts = &a->pq_u.hfsc_opts; 205 206 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL) 207 parent = NULL; 208 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 209 return (EINVAL); 210 211 if (clh_to_clp(hif, a->qid) != NULL) 212 return (EBUSY); 213 214 rtsc.m1 = opts->rtsc_m1; 215 rtsc.d = opts->rtsc_d; 216 rtsc.m2 = opts->rtsc_m2; 217 lssc.m1 = opts->lssc_m1; 218 lssc.d = opts->lssc_d; 219 lssc.m2 = opts->lssc_m2; 220 ulsc.m1 = opts->ulsc_m1; 221 ulsc.d = opts->ulsc_d; 222 ulsc.m2 = opts->ulsc_m2; 223 224 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, parent, a->qlimit, 225 opts->flags, a->qid); 226 if (cl == NULL) 227 return (ENOMEM); 228 229 return (0); 230 } 231 232 int 233 hfsc_add_queue(struct pf_altq *a) 234 { 235 struct hfsc_if *hif; 236 struct ifaltq *ifq; 237 int error; 238 239 if (a->qid == 0) 240 return (EINVAL); 241 242 /* XXX not MP safe */ 243 if ((hif = a->altq_disc) == NULL) 244 return (EINVAL); 245 ifq = hif->hif_ifq; 246 247 HFSC_LOCK(ifq); 248 error = hfsc_add_queue_locked(a, hif); 249 HFSC_UNLOCK(ifq); 250 251 return error; 252 } 253 254 static int 255 hfsc_remove_queue_locked(struct pf_altq *a, struct hfsc_if *hif) 256 { 257 struct hfsc_class *cl; 258 259 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 260 return (EINVAL); 261 262 return (hfsc_class_destroy(cl)); 263 } 264 265 int 266 hfsc_remove_queue(struct pf_altq *a) 267 { 268 struct hfsc_if *hif; 269 struct ifaltq *ifq; 270 int error; 271 272 /* XXX not MP safe */ 273 if ((hif = a->altq_disc) == NULL) 274 return (EINVAL); 275 ifq = hif->hif_ifq; 276 277 HFSC_LOCK(ifq); 278 error = hfsc_remove_queue_locked(a, hif); 279 HFSC_UNLOCK(ifq); 280 281 return error; 282 } 283 284 int 285 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) 286 { 287 struct hfsc_if *hif; 288 struct hfsc_class *cl; 289 struct hfsc_classstats stats; 290 struct ifaltq *ifq; 291 int error = 0; 292 293 if (*nbytes < sizeof(stats)) 294 return (EINVAL); 295 296 /* XXX not MP safe */ 297 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 298 return (EBADF); 299 ifq = hif->hif_ifq; 300 301 HFSC_LOCK(ifq); 302 303 if ((cl = clh_to_clp(hif, a->qid)) == NULL) { 304 HFSC_UNLOCK(ifq); 305 return (EINVAL); 306 } 307 308 get_class_stats(&stats, cl); 309 310 HFSC_UNLOCK(ifq); 311 312 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) 313 return (error); 314 *nbytes = sizeof(stats); 315 return (0); 316 } 317 318 /* 319 * bring the interface back to the initial state by discarding 320 * all the filters and classes except the root class. 321 */ 322 static int 323 hfsc_clear_interface(struct hfsc_if *hif) 324 { 325 struct hfsc_class *cl; 326 327 if (hif->hif_rootclass == NULL) 328 return (0); 329 330 331 /* clear out the classes */ 332 while ((cl = hif->hif_rootclass->cl_children) != NULL) { 333 /* 334 * remove the first leaf class found in the hierarchy 335 * then start over 336 */ 337 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 338 if (!is_a_parent_class(cl)) { 339 hfsc_class_destroy(cl); 340 break; 341 } 342 } 343 } 344 345 return (0); 346 } 347 348 static int 349 hfsc_request(struct ifaltq_subque *ifsq, int req, void *arg) 350 { 351 struct ifaltq *ifq = ifsq->ifsq_altq; 352 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 353 354 crit_enter(); 355 switch (req) { 356 case ALTRQ_PURGE: 357 if (ifsq_get_index(ifsq) == HFSC_SUBQ_INDEX) { 358 hfsc_purge(hif); 359 } else { 360 /* 361 * Race happened, the unrelated subqueue was 362 * picked during the packet scheduler transition. 363 */ 364 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 365 } 366 break; 367 } 368 crit_exit(); 369 return (0); 370 } 371 372 /* discard all the queued packets on the interface */ 373 static void 374 hfsc_purge(struct hfsc_if *hif) 375 { 376 struct hfsc_class *cl; 377 378 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) { 379 if (!qempty(cl->cl_q)) 380 hfsc_purgeq(cl); 381 } 382 if (ifq_is_enabled(hif->hif_ifq)) 383 ALTQ_SQ_CNTR_RESET(&hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX]); 384 } 385 386 struct hfsc_class * 387 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 388 struct service_curve *fsc, struct service_curve *usc, 389 struct hfsc_class *parent, int qlimit, int flags, int qid) 390 { 391 struct hfsc_class *cl, *p; 392 int i; 393 394 if (hif->hif_classes >= HFSC_MAX_CLASSES) 395 return (NULL); 396 397 #ifndef ALTQ_RED 398 if (flags & HFCF_RED) { 399 #ifdef ALTQ_DEBUG 400 kprintf("hfsc_class_create: RED not configured for HFSC!\n"); 401 #endif 402 return (NULL); 403 } 404 #endif 405 406 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO); 407 cl->cl_q = kmalloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO); 408 cl->cl_actc = actlist_alloc(); 409 410 if (qlimit == 0) 411 qlimit = 50; /* use default */ 412 qlimit(cl->cl_q) = qlimit; 413 qtype(cl->cl_q) = Q_DROPTAIL; 414 qlen(cl->cl_q) = 0; 415 cl->cl_flags = flags; 416 #ifdef ALTQ_RED 417 if (flags & (HFCF_RED|HFCF_RIO)) { 418 int red_flags, red_pkttime; 419 u_int m2; 420 421 m2 = 0; 422 if (rsc != NULL && rsc->m2 > m2) 423 m2 = rsc->m2; 424 if (fsc != NULL && fsc->m2 > m2) 425 m2 = fsc->m2; 426 if (usc != NULL && usc->m2 > m2) 427 m2 = usc->m2; 428 429 red_flags = 0; 430 if (flags & HFCF_ECN) 431 red_flags |= REDF_ECN; 432 #ifdef ALTQ_RIO 433 if (flags & HFCF_CLEARDSCP) 434 red_flags |= RIOF_CLEARDSCP; 435 #endif 436 if (m2 < 8) 437 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 438 else 439 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 440 * 1000 * 1000 * 1000 / (m2 / 8); 441 if (flags & HFCF_RED) { 442 cl->cl_red = red_alloc(0, 0, 443 qlimit(cl->cl_q) * 10/100, 444 qlimit(cl->cl_q) * 30/100, 445 red_flags, red_pkttime); 446 if (cl->cl_red != NULL) 447 qtype(cl->cl_q) = Q_RED; 448 } 449 #ifdef ALTQ_RIO 450 else { 451 cl->cl_red = (red_t *)rio_alloc(0, NULL, 452 red_flags, red_pkttime); 453 if (cl->cl_red != NULL) 454 qtype(cl->cl_q) = Q_RIO; 455 } 456 #endif 457 } 458 #endif /* ALTQ_RED */ 459 460 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 461 cl->cl_rsc = kmalloc(sizeof(*cl->cl_rsc), M_ALTQ, M_WAITOK); 462 sc2isc(rsc, cl->cl_rsc); 463 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 464 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 465 } 466 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 467 cl->cl_fsc = kmalloc(sizeof(*cl->cl_fsc), M_ALTQ, M_WAITOK); 468 sc2isc(fsc, cl->cl_fsc); 469 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 470 } 471 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 472 cl->cl_usc = kmalloc(sizeof(*cl->cl_usc), M_ALTQ, M_WAITOK); 473 sc2isc(usc, cl->cl_usc); 474 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 475 } 476 477 cl->cl_id = hif->hif_classid++; 478 cl->cl_handle = qid; 479 cl->cl_hif = hif; 480 cl->cl_parent = parent; 481 482 crit_enter(); 483 hif->hif_classes++; 484 485 /* 486 * find a free slot in the class table. if the slot matching 487 * the lower bits of qid is free, use this slot. otherwise, 488 * use the first free slot. 489 */ 490 i = qid % HFSC_MAX_CLASSES; 491 if (hif->hif_class_tbl[i] == NULL) 492 hif->hif_class_tbl[i] = cl; 493 else { 494 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 495 if (hif->hif_class_tbl[i] == NULL) { 496 hif->hif_class_tbl[i] = cl; 497 break; 498 } 499 } 500 if (i == HFSC_MAX_CLASSES) { 501 crit_exit(); 502 goto err_ret; 503 } 504 } 505 506 if (flags & HFCF_DEFAULTCLASS) 507 hif->hif_defaultclass = cl; 508 509 if (parent == NULL) { 510 /* this is root class */ 511 hif->hif_rootclass = cl; 512 } else if (parent->cl_children == NULL) { 513 /* add this class to the children list of the parent */ 514 parent->cl_children = cl; 515 } else { 516 p = parent->cl_children; 517 while (p->cl_siblings != NULL) 518 p = p->cl_siblings; 519 p->cl_siblings = cl; 520 } 521 crit_exit(); 522 523 return (cl); 524 525 err_ret: 526 if (cl->cl_actc != NULL) 527 actlist_destroy(cl->cl_actc); 528 if (cl->cl_red != NULL) { 529 #ifdef ALTQ_RIO 530 if (q_is_rio(cl->cl_q)) 531 rio_destroy((rio_t *)cl->cl_red); 532 #endif 533 #ifdef ALTQ_RED 534 if (q_is_red(cl->cl_q)) 535 red_destroy(cl->cl_red); 536 #endif 537 } 538 if (cl->cl_fsc != NULL) 539 kfree(cl->cl_fsc, M_ALTQ); 540 if (cl->cl_rsc != NULL) 541 kfree(cl->cl_rsc, M_ALTQ); 542 if (cl->cl_usc != NULL) 543 kfree(cl->cl_usc, M_ALTQ); 544 if (cl->cl_q != NULL) 545 kfree(cl->cl_q, M_ALTQ); 546 kfree(cl, M_ALTQ); 547 return (NULL); 548 } 549 550 static int 551 hfsc_class_destroy(struct hfsc_class *cl) 552 { 553 struct hfsc_if *hif; 554 int i; 555 556 if (cl == NULL) 557 return (0); 558 hif = cl->cl_hif; 559 560 if (is_a_parent_class(cl)) 561 return (EBUSY); 562 563 crit_enter(); 564 565 if (!qempty(cl->cl_q)) 566 hfsc_purgeq(cl); 567 568 if (cl->cl_parent == NULL) { 569 /* this is root class */ 570 } else { 571 struct hfsc_class *p = cl->cl_parent->cl_children; 572 573 if (p == cl) { 574 cl->cl_parent->cl_children = cl->cl_siblings; 575 } else { 576 do { 577 if (p->cl_siblings == cl) { 578 p->cl_siblings = cl->cl_siblings; 579 break; 580 } 581 } while ((p = p->cl_siblings) != NULL); 582 } 583 KKASSERT(p != NULL); 584 } 585 586 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 587 if (hif->hif_class_tbl[i] == cl) { 588 hif->hif_class_tbl[i] = NULL; 589 break; 590 } 591 } 592 593 hif->hif_classes--; 594 crit_exit(); 595 596 actlist_destroy(cl->cl_actc); 597 598 if (cl->cl_red != NULL) { 599 #ifdef ALTQ_RIO 600 if (q_is_rio(cl->cl_q)) 601 rio_destroy((rio_t *)cl->cl_red); 602 #endif 603 #ifdef ALTQ_RED 604 if (q_is_red(cl->cl_q)) 605 red_destroy(cl->cl_red); 606 #endif 607 } 608 609 if (cl == hif->hif_rootclass) 610 hif->hif_rootclass = NULL; 611 if (cl == hif->hif_defaultclass) 612 hif->hif_defaultclass = NULL; 613 if (cl == hif->hif_pollcache) 614 hif->hif_pollcache = NULL; 615 616 if (cl->cl_usc != NULL) 617 kfree(cl->cl_usc, M_ALTQ); 618 if (cl->cl_fsc != NULL) 619 kfree(cl->cl_fsc, M_ALTQ); 620 if (cl->cl_rsc != NULL) 621 kfree(cl->cl_rsc, M_ALTQ); 622 kfree(cl->cl_q, M_ALTQ); 623 kfree(cl, M_ALTQ); 624 625 return (0); 626 } 627 628 /* 629 * hfsc_nextclass returns the next class in the tree. 630 * usage: 631 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 632 * do_something; 633 */ 634 static struct hfsc_class * 635 hfsc_nextclass(struct hfsc_class *cl) 636 { 637 if (cl->cl_children != NULL) { 638 cl = cl->cl_children; 639 } else if (cl->cl_siblings != NULL) { 640 cl = cl->cl_siblings; 641 } else { 642 while ((cl = cl->cl_parent) != NULL) { 643 if (cl->cl_siblings != NULL) { 644 cl = cl->cl_siblings; 645 break; 646 } 647 } 648 } 649 650 return (cl); 651 } 652 653 /* 654 * hfsc_enqueue is an enqueue function to be registered to 655 * (*ifsq_enqueue) in struct ifaltq_subque. 656 */ 657 static int 658 hfsc_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 659 struct altq_pktattr *pktattr) 660 { 661 struct ifaltq *ifq = ifsq->ifsq_altq; 662 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 663 struct hfsc_class *cl; 664 int len; 665 666 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) { 667 /* 668 * Race happened, the unrelated subqueue was 669 * picked during the packet scheduler transition. 670 */ 671 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 672 m_freem(m); 673 return ENOBUFS; 674 } 675 676 /* grab class set by classifier */ 677 M_ASSERTPKTHDR(m); 678 crit_enter(); 679 if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE) 680 cl = clh_to_clp(hif, m->m_pkthdr.pf.qid); 681 else 682 cl = NULL; 683 if (cl == NULL || is_a_parent_class(cl)) { 684 cl = hif->hif_defaultclass; 685 if (cl == NULL) { 686 m_freem(m); 687 crit_exit(); 688 return (ENOBUFS); 689 } 690 } 691 cl->cl_pktattr = NULL; 692 len = m_pktlen(m); 693 if (hfsc_addq(cl, m) != 0) { 694 /* drop occurred. mbuf was freed in hfsc_addq. */ 695 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 696 crit_exit(); 697 return (ENOBUFS); 698 } 699 ALTQ_SQ_PKTCNT_INC(ifsq); 700 cl->cl_hif->hif_packets++; 701 702 /* successfully queued. */ 703 if (qlen(cl->cl_q) == 1) 704 set_active(cl, m_pktlen(m)); 705 crit_exit(); 706 return (0); 707 } 708 709 /* 710 * hfsc_dequeue is a dequeue function to be registered to 711 * (*ifsq_dequeue) in struct ifaltq_subque. 712 * 713 * note: ALTDQ_POLL returns the next packet without removing the packet 714 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 715 */ 716 static struct mbuf * 717 hfsc_dequeue(struct ifaltq_subque *ifsq, int op) 718 { 719 struct ifaltq *ifq = ifsq->ifsq_altq; 720 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 721 struct hfsc_class *cl; 722 struct mbuf *m; 723 int len, next_len; 724 int realtime = 0; 725 uint64_t cur_time; 726 727 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) { 728 /* 729 * Race happened, the unrelated subqueue was 730 * picked during the packet scheduler transition. 731 */ 732 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 733 return NULL; 734 } 735 736 if (hif->hif_packets == 0) { 737 /* no packet in the tree */ 738 return (NULL); 739 } 740 741 crit_enter(); 742 cur_time = read_machclk(); 743 744 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 745 cl = hif->hif_pollcache; 746 hif->hif_pollcache = NULL; 747 /* check if the class was scheduled by real-time criteria */ 748 if (cl->cl_rsc != NULL) 749 realtime = (cl->cl_e <= cur_time); 750 } else { 751 /* 752 * if there are eligible classes, use real-time criteria. 753 * find the class with the minimum deadline among 754 * the eligible classes. 755 */ 756 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time)) != NULL) { 757 realtime = 1; 758 } else { 759 #ifdef ALTQ_DEBUG 760 int fits = 0; 761 #endif 762 /* 763 * use link-sharing criteria 764 * get the class with the minimum vt in the hierarchy 765 */ 766 cl = hif->hif_rootclass; 767 while (is_a_parent_class(cl)) { 768 769 cl = actlist_firstfit(cl, cur_time); 770 if (cl == NULL) { 771 #ifdef ALTQ_DEBUG 772 if (fits > 0) 773 kprintf("%d fit but none found\n",fits); 774 #endif 775 m = NULL; 776 goto done; 777 } 778 /* 779 * update parent's cl_cvtmin. 780 * don't update if the new vt is smaller. 781 */ 782 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 783 cl->cl_parent->cl_cvtmin = cl->cl_vt; 784 #ifdef ALTQ_DEBUG 785 fits++; 786 #endif 787 } 788 } 789 790 if (op == ALTDQ_POLL) { 791 #ifdef foo 792 /* 793 * Don't use poll cache; the poll/dequeue 794 * model is no longer applicable to SMP 795 * system. e.g. 796 * CPU-A CPU-B 797 * : : 798 * poll : 799 * : poll 800 * dequeue (+) : 801 * 802 * The dequeue at (+) will hit the poll 803 * cache set by CPU-B. 804 */ 805 hif->hif_pollcache = cl; 806 #endif 807 m = hfsc_pollq(cl); 808 goto done; 809 } 810 } 811 812 m = hfsc_getq(cl); 813 if (m == NULL) 814 panic("hfsc_dequeue:"); 815 len = m_pktlen(m); 816 cl->cl_hif->hif_packets--; 817 ALTQ_SQ_PKTCNT_DEC(ifsq); 818 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 819 820 update_vf(cl, len, cur_time); 821 if (realtime) 822 cl->cl_cumul += len; 823 824 if (!qempty(cl->cl_q)) { 825 if (cl->cl_rsc != NULL) { 826 /* update ed */ 827 next_len = m_pktlen(qhead(cl->cl_q)); 828 829 if (realtime) 830 update_ed(cl, next_len); 831 else 832 update_d(cl, next_len); 833 } 834 } else { 835 /* the class becomes passive */ 836 set_passive(cl); 837 } 838 done: 839 crit_exit(); 840 return (m); 841 } 842 843 static int 844 hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 845 { 846 847 #ifdef ALTQ_RIO 848 if (q_is_rio(cl->cl_q)) 849 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 850 m, cl->cl_pktattr); 851 #endif 852 #ifdef ALTQ_RED 853 if (q_is_red(cl->cl_q)) 854 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 855 #endif 856 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 857 m_freem(m); 858 return (-1); 859 } 860 861 if (cl->cl_flags & HFCF_CLEARDSCP) 862 write_dsfield(m, cl->cl_pktattr, 0); 863 864 _addq(cl->cl_q, m); 865 866 return (0); 867 } 868 869 static struct mbuf * 870 hfsc_getq(struct hfsc_class *cl) 871 { 872 #ifdef ALTQ_RIO 873 if (q_is_rio(cl->cl_q)) 874 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 875 #endif 876 #ifdef ALTQ_RED 877 if (q_is_red(cl->cl_q)) 878 return red_getq(cl->cl_red, cl->cl_q); 879 #endif 880 return _getq(cl->cl_q); 881 } 882 883 static struct mbuf * 884 hfsc_pollq(struct hfsc_class *cl) 885 { 886 return qhead(cl->cl_q); 887 } 888 889 static void 890 hfsc_purgeq(struct hfsc_class *cl) 891 { 892 struct mbuf *m; 893 894 if (qempty(cl->cl_q)) 895 return; 896 897 while ((m = _getq(cl->cl_q)) != NULL) { 898 ALTQ_SQ_PKTCNT_DEC( 899 &cl->cl_hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX]); 900 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 901 m_freem(m); 902 cl->cl_hif->hif_packets--; 903 } 904 KKASSERT(qlen(cl->cl_q) == 0); 905 906 update_vf(cl, 0, 0); /* remove cl from the actlist */ 907 set_passive(cl); 908 } 909 910 static void 911 set_active(struct hfsc_class *cl, int len) 912 { 913 if (cl->cl_rsc != NULL) 914 init_ed(cl, len); 915 if (cl->cl_fsc != NULL) 916 init_vf(cl, len); 917 918 cl->cl_stats.period++; 919 } 920 921 static void 922 set_passive(struct hfsc_class *cl) 923 { 924 if (cl->cl_rsc != NULL) 925 ellist_remove(cl); 926 927 /* 928 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 929 * needs to be called explicitly to remove a class from actlist 930 */ 931 } 932 933 static void 934 init_ed(struct hfsc_class *cl, int next_len) 935 { 936 uint64_t cur_time; 937 938 cur_time = read_machclk(); 939 940 /* update the deadline curve */ 941 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 942 943 /* 944 * update the eligible curve. 945 * for concave, it is equal to the deadline curve. 946 * for convex, it is a linear curve with slope m2. 947 */ 948 cl->cl_eligible = cl->cl_deadline; 949 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 950 cl->cl_eligible.dx = 0; 951 cl->cl_eligible.dy = 0; 952 } 953 954 /* compute e and d */ 955 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 956 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 957 958 ellist_insert(cl); 959 } 960 961 static void 962 update_ed(struct hfsc_class *cl, int next_len) 963 { 964 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 965 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 966 967 ellist_update(cl); 968 } 969 970 static void 971 update_d(struct hfsc_class *cl, int next_len) 972 { 973 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 974 } 975 976 static void 977 init_vf(struct hfsc_class *cl, int len) 978 { 979 struct hfsc_class *max_cl, *p; 980 uint64_t vt, f, cur_time; 981 int go_active; 982 983 cur_time = 0; 984 go_active = 1; 985 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 986 if (go_active && cl->cl_nactive++ == 0) 987 go_active = 1; 988 else 989 go_active = 0; 990 991 if (go_active) { 992 max_cl = actlist_last(cl->cl_parent->cl_actc); 993 if (max_cl != NULL) { 994 /* 995 * set vt to the average of the min and max 996 * classes. if the parent's period didn't 997 * change, don't decrease vt of the class. 998 */ 999 vt = max_cl->cl_vt; 1000 if (cl->cl_parent->cl_cvtmin != 0) 1001 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 1002 1003 if (cl->cl_parent->cl_vtperiod != 1004 cl->cl_parentperiod || vt > cl->cl_vt) 1005 cl->cl_vt = vt; 1006 } else { 1007 /* 1008 * first child for a new parent backlog period. 1009 * add parent's cvtmax to vtoff of children 1010 * to make a new vt (vtoff + vt) larger than 1011 * the vt in the last period for all children. 1012 */ 1013 vt = cl->cl_parent->cl_cvtmax; 1014 for (p = cl->cl_parent->cl_children; p != NULL; 1015 p = p->cl_siblings) 1016 p->cl_vtoff += vt; 1017 cl->cl_vt = 0; 1018 cl->cl_parent->cl_cvtmax = 0; 1019 cl->cl_parent->cl_cvtmin = 0; 1020 } 1021 cl->cl_initvt = cl->cl_vt; 1022 1023 /* update the virtual curve */ 1024 vt = cl->cl_vt + cl->cl_vtoff; 1025 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 1026 if (cl->cl_virtual.x == vt) { 1027 cl->cl_virtual.x -= cl->cl_vtoff; 1028 cl->cl_vtoff = 0; 1029 } 1030 cl->cl_vtadj = 0; 1031 1032 cl->cl_vtperiod++; /* increment vt period */ 1033 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 1034 if (cl->cl_parent->cl_nactive == 0) 1035 cl->cl_parentperiod++; 1036 cl->cl_f = 0; 1037 1038 actlist_insert(cl); 1039 1040 if (cl->cl_usc != NULL) { 1041 /* class has upper limit curve */ 1042 if (cur_time == 0) 1043 cur_time = read_machclk(); 1044 1045 /* update the ulimit curve */ 1046 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 1047 cl->cl_total); 1048 /* compute myf */ 1049 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 1050 cl->cl_total); 1051 cl->cl_myfadj = 0; 1052 } 1053 } 1054 1055 if (cl->cl_myf > cl->cl_cfmin) 1056 f = cl->cl_myf; 1057 else 1058 f = cl->cl_cfmin; 1059 if (f != cl->cl_f) { 1060 cl->cl_f = f; 1061 update_cfmin(cl->cl_parent); 1062 } 1063 } 1064 } 1065 1066 static void 1067 update_vf(struct hfsc_class *cl, int len, uint64_t cur_time) 1068 { 1069 uint64_t f, myf_bound, delta; 1070 int go_passive; 1071 1072 go_passive = qempty(cl->cl_q); 1073 1074 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 1075 cl->cl_total += len; 1076 1077 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 1078 continue; 1079 1080 if (go_passive && --cl->cl_nactive == 0) 1081 go_passive = 1; 1082 else 1083 go_passive = 0; 1084 1085 if (go_passive) { 1086 /* no more active child, going passive */ 1087 1088 /* update cvtmax of the parent class */ 1089 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1090 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1091 1092 /* remove this class from the vt list */ 1093 actlist_remove(cl); 1094 1095 update_cfmin(cl->cl_parent); 1096 1097 continue; 1098 } 1099 1100 /* 1101 * update vt and f 1102 */ 1103 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1104 - cl->cl_vtoff + cl->cl_vtadj; 1105 1106 /* 1107 * if vt of the class is smaller than cvtmin, 1108 * the class was skipped in the past due to non-fit. 1109 * if so, we need to adjust vtadj. 1110 */ 1111 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1112 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1113 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1114 } 1115 1116 /* update the vt list */ 1117 actlist_update(cl); 1118 1119 if (cl->cl_usc != NULL) { 1120 cl->cl_myf = cl->cl_myfadj 1121 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1122 1123 /* 1124 * if myf lags behind by more than one clock tick 1125 * from the current time, adjust myfadj to prevent 1126 * a rate-limited class from going greedy. 1127 * in a steady state under rate-limiting, myf 1128 * fluctuates within one clock tick. 1129 */ 1130 myf_bound = cur_time - machclk_per_tick; 1131 if (cl->cl_myf < myf_bound) { 1132 delta = cur_time - cl->cl_myf; 1133 cl->cl_myfadj += delta; 1134 cl->cl_myf += delta; 1135 } 1136 } 1137 1138 /* cl_f is max(cl_myf, cl_cfmin) */ 1139 if (cl->cl_myf > cl->cl_cfmin) 1140 f = cl->cl_myf; 1141 else 1142 f = cl->cl_cfmin; 1143 if (f != cl->cl_f) { 1144 cl->cl_f = f; 1145 update_cfmin(cl->cl_parent); 1146 } 1147 } 1148 } 1149 1150 static void 1151 update_cfmin(struct hfsc_class *cl) 1152 { 1153 struct hfsc_class *p; 1154 uint64_t cfmin; 1155 1156 if (TAILQ_EMPTY(cl->cl_actc)) { 1157 cl->cl_cfmin = 0; 1158 return; 1159 } 1160 cfmin = HT_INFINITY; 1161 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1162 if (p->cl_f == 0) { 1163 cl->cl_cfmin = 0; 1164 return; 1165 } 1166 if (p->cl_f < cfmin) 1167 cfmin = p->cl_f; 1168 } 1169 cl->cl_cfmin = cfmin; 1170 } 1171 1172 /* 1173 * TAILQ based ellist and actlist implementation 1174 * (ion wanted to make a calendar queue based implementation) 1175 */ 1176 /* 1177 * eligible list holds backlogged classes being sorted by their eligible times. 1178 * there is one eligible list per interface. 1179 */ 1180 1181 static ellist_t * 1182 ellist_alloc(void) 1183 { 1184 ellist_t *head; 1185 1186 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK); 1187 TAILQ_INIT(head); 1188 return (head); 1189 } 1190 1191 static void 1192 ellist_destroy(ellist_t *head) 1193 { 1194 kfree(head, M_ALTQ); 1195 } 1196 1197 static void 1198 ellist_insert(struct hfsc_class *cl) 1199 { 1200 struct hfsc_if *hif = cl->cl_hif; 1201 struct hfsc_class *p; 1202 1203 /* check the last entry first */ 1204 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL || 1205 p->cl_e <= cl->cl_e) { 1206 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1207 return; 1208 } 1209 1210 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) { 1211 if (cl->cl_e < p->cl_e) { 1212 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1213 return; 1214 } 1215 } 1216 KKASSERT(0); /* should not reach here */ 1217 } 1218 1219 static void 1220 ellist_remove(struct hfsc_class *cl) 1221 { 1222 struct hfsc_if *hif = cl->cl_hif; 1223 1224 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1225 } 1226 1227 static void 1228 ellist_update(struct hfsc_class *cl) 1229 { 1230 struct hfsc_if *hif = cl->cl_hif; 1231 struct hfsc_class *p, *last; 1232 1233 /* 1234 * the eligible time of a class increases monotonically. 1235 * if the next entry has a larger eligible time, nothing to do. 1236 */ 1237 p = TAILQ_NEXT(cl, cl_ellist); 1238 if (p == NULL || cl->cl_e <= p->cl_e) 1239 return; 1240 1241 /* check the last entry */ 1242 last = TAILQ_LAST(hif->hif_eligible, _eligible); 1243 KKASSERT(last != NULL); 1244 if (last->cl_e <= cl->cl_e) { 1245 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1246 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1247 return; 1248 } 1249 1250 /* 1251 * the new position must be between the next entry 1252 * and the last entry 1253 */ 1254 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1255 if (cl->cl_e < p->cl_e) { 1256 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1257 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1258 return; 1259 } 1260 } 1261 KKASSERT(0); /* should not reach here */ 1262 } 1263 1264 /* find the class with the minimum deadline among the eligible classes */ 1265 struct hfsc_class * 1266 ellist_get_mindl(ellist_t *head, uint64_t cur_time) 1267 { 1268 struct hfsc_class *p, *cl = NULL; 1269 1270 TAILQ_FOREACH(p, head, cl_ellist) { 1271 if (p->cl_e > cur_time) 1272 break; 1273 if (cl == NULL || p->cl_d < cl->cl_d) 1274 cl = p; 1275 } 1276 return (cl); 1277 } 1278 1279 /* 1280 * active children list holds backlogged child classes being sorted 1281 * by their virtual time. 1282 * each intermediate class has one active children list. 1283 */ 1284 static actlist_t * 1285 actlist_alloc(void) 1286 { 1287 actlist_t *head; 1288 1289 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK); 1290 TAILQ_INIT(head); 1291 return (head); 1292 } 1293 1294 static void 1295 actlist_destroy(actlist_t *head) 1296 { 1297 kfree(head, M_ALTQ); 1298 } 1299 static void 1300 actlist_insert(struct hfsc_class *cl) 1301 { 1302 struct hfsc_class *p; 1303 1304 /* check the last entry first */ 1305 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL 1306 || p->cl_vt <= cl->cl_vt) { 1307 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1308 return; 1309 } 1310 1311 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) { 1312 if (cl->cl_vt < p->cl_vt) { 1313 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1314 return; 1315 } 1316 } 1317 KKASSERT(0); /* should not reach here */ 1318 } 1319 1320 static void 1321 actlist_remove(struct hfsc_class *cl) 1322 { 1323 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1324 } 1325 1326 static void 1327 actlist_update(struct hfsc_class *cl) 1328 { 1329 struct hfsc_class *p, *last; 1330 1331 /* 1332 * the virtual time of a class increases monotonically during its 1333 * backlogged period. 1334 * if the next entry has a larger virtual time, nothing to do. 1335 */ 1336 p = TAILQ_NEXT(cl, cl_actlist); 1337 if (p == NULL || cl->cl_vt < p->cl_vt) 1338 return; 1339 1340 /* check the last entry */ 1341 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active); 1342 KKASSERT(last != NULL); 1343 if (last->cl_vt <= cl->cl_vt) { 1344 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1345 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1346 return; 1347 } 1348 1349 /* 1350 * the new position must be between the next entry 1351 * and the last entry 1352 */ 1353 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1354 if (cl->cl_vt < p->cl_vt) { 1355 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1356 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1357 return; 1358 } 1359 } 1360 KKASSERT(0); /* should not reach here */ 1361 } 1362 1363 static struct hfsc_class * 1364 actlist_firstfit(struct hfsc_class *cl, uint64_t cur_time) 1365 { 1366 struct hfsc_class *p; 1367 1368 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1369 if (p->cl_f <= cur_time) 1370 return (p); 1371 } 1372 return (NULL); 1373 } 1374 1375 /* 1376 * service curve support functions 1377 * 1378 * external service curve parameters 1379 * m: bits/sec 1380 * d: msec 1381 * internal service curve parameters 1382 * sm: (bytes/tsc_interval) << SM_SHIFT 1383 * ism: (tsc_count/byte) << ISM_SHIFT 1384 * dx: tsc_count 1385 * 1386 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. 1387 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU 1388 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective 1389 * digits in decimal using the following table. 1390 * 1391 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 1392 * ----------+------------------------------------------------------- 1393 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6 1394 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6 1395 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6 1396 * 1397 * nsec/byte 80000 8000 800 80 8 1398 * ism(500MHz) 40000 4000 400 40 4 1399 * ism(200MHz) 16000 1600 160 16 1.6 1400 */ 1401 #define SM_SHIFT 24 1402 #define ISM_SHIFT 10 1403 1404 #define SM_MASK ((1LL << SM_SHIFT) - 1) 1405 #define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1406 1407 static __inline uint64_t 1408 seg_x2y(uint64_t x, uint64_t sm) 1409 { 1410 uint64_t y; 1411 1412 /* 1413 * compute 1414 * y = x * sm >> SM_SHIFT 1415 * but divide it for the upper and lower bits to avoid overflow 1416 */ 1417 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1418 return (y); 1419 } 1420 1421 static __inline uint64_t 1422 seg_y2x(uint64_t y, uint64_t ism) 1423 { 1424 uint64_t x; 1425 1426 if (y == 0) 1427 x = 0; 1428 else if (ism == HT_INFINITY) 1429 x = HT_INFINITY; 1430 else 1431 x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1432 1433 return (x); 1434 } 1435 1436 static __inline uint64_t 1437 m2sm(u_int m) 1438 { 1439 uint64_t sm; 1440 1441 sm = ((uint64_t)m << SM_SHIFT) / 8 / machclk_freq; 1442 return (sm); 1443 } 1444 1445 static __inline uint64_t 1446 m2ism(u_int m) 1447 { 1448 uint64_t ism; 1449 1450 if (m == 0) 1451 ism = HT_INFINITY; 1452 else 1453 ism = ((uint64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1454 return (ism); 1455 } 1456 1457 static __inline uint64_t 1458 d2dx(u_int d) 1459 { 1460 uint64_t dx; 1461 1462 dx = ((uint64_t)d * machclk_freq) / 1000; 1463 return (dx); 1464 } 1465 1466 static u_int 1467 sm2m(uint64_t sm) 1468 { 1469 uint64_t m; 1470 1471 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1472 return ((u_int)m); 1473 } 1474 1475 static u_int 1476 dx2d(uint64_t dx) 1477 { 1478 uint64_t d; 1479 1480 d = dx * 1000 / machclk_freq; 1481 return ((u_int)d); 1482 } 1483 1484 static void 1485 sc2isc(struct service_curve *sc, struct internal_sc *isc) 1486 { 1487 isc->sm1 = m2sm(sc->m1); 1488 isc->ism1 = m2ism(sc->m1); 1489 isc->dx = d2dx(sc->d); 1490 isc->dy = seg_x2y(isc->dx, isc->sm1); 1491 isc->sm2 = m2sm(sc->m2); 1492 isc->ism2 = m2ism(sc->m2); 1493 } 1494 1495 /* 1496 * initialize the runtime service curve with the given internal 1497 * service curve starting at (x, y). 1498 */ 1499 static void 1500 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1501 { 1502 rtsc->x = x; 1503 rtsc->y = y; 1504 rtsc->sm1 = isc->sm1; 1505 rtsc->ism1 = isc->ism1; 1506 rtsc->dx = isc->dx; 1507 rtsc->dy = isc->dy; 1508 rtsc->sm2 = isc->sm2; 1509 rtsc->ism2 = isc->ism2; 1510 } 1511 1512 /* 1513 * calculate the y-projection of the runtime service curve by the 1514 * given x-projection value 1515 */ 1516 static uint64_t 1517 rtsc_y2x(struct runtime_sc *rtsc, uint64_t y) 1518 { 1519 uint64_t x; 1520 1521 if (y < rtsc->y) { 1522 x = rtsc->x; 1523 } else if (y <= rtsc->y + rtsc->dy) { 1524 /* x belongs to the 1st segment */ 1525 if (rtsc->dy == 0) 1526 x = rtsc->x + rtsc->dx; 1527 else 1528 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1529 } else { 1530 /* x belongs to the 2nd segment */ 1531 x = rtsc->x + rtsc->dx 1532 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1533 } 1534 return (x); 1535 } 1536 1537 static uint64_t 1538 rtsc_x2y(struct runtime_sc *rtsc, uint64_t x) 1539 { 1540 uint64_t y; 1541 1542 if (x <= rtsc->x) { 1543 y = rtsc->y; 1544 } else if (x <= rtsc->x + rtsc->dx) { 1545 /* y belongs to the 1st segment */ 1546 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1547 } else 1548 /* y belongs to the 2nd segment */ 1549 y = rtsc->y + rtsc->dy 1550 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1551 return (y); 1552 } 1553 1554 /* 1555 * update the runtime service curve by taking the minimum of the current 1556 * runtime service curve and the service curve starting at (x, y). 1557 */ 1558 static void 1559 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1560 { 1561 uint64_t y1, y2, dx, dy; 1562 1563 if (isc->sm1 <= isc->sm2) { 1564 /* service curve is convex */ 1565 y1 = rtsc_x2y(rtsc, x); 1566 if (y1 < y) 1567 /* the current rtsc is smaller */ 1568 return; 1569 rtsc->x = x; 1570 rtsc->y = y; 1571 return; 1572 } 1573 1574 /* 1575 * service curve is concave 1576 * compute the two y values of the current rtsc 1577 * y1: at x 1578 * y2: at (x + dx) 1579 */ 1580 y1 = rtsc_x2y(rtsc, x); 1581 if (y1 <= y) { 1582 /* rtsc is below isc, no change to rtsc */ 1583 return; 1584 } 1585 1586 y2 = rtsc_x2y(rtsc, x + isc->dx); 1587 if (y2 >= y + isc->dy) { 1588 /* rtsc is above isc, replace rtsc by isc */ 1589 rtsc->x = x; 1590 rtsc->y = y; 1591 rtsc->dx = isc->dx; 1592 rtsc->dy = isc->dy; 1593 return; 1594 } 1595 1596 /* 1597 * the two curves intersect 1598 * compute the offsets (dx, dy) using the reverse 1599 * function of seg_x2y() 1600 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1601 */ 1602 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1603 /* 1604 * check if (x, y1) belongs to the 1st segment of rtsc. 1605 * if so, add the offset. 1606 */ 1607 if (rtsc->x + rtsc->dx > x) 1608 dx += rtsc->x + rtsc->dx - x; 1609 dy = seg_x2y(dx, isc->sm1); 1610 1611 rtsc->x = x; 1612 rtsc->y = y; 1613 rtsc->dx = dx; 1614 rtsc->dy = dy; 1615 } 1616 1617 static void 1618 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl) 1619 { 1620 sp->class_id = cl->cl_id; 1621 sp->class_handle = cl->cl_handle; 1622 1623 if (cl->cl_rsc != NULL) { 1624 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1625 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1626 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1627 } else { 1628 sp->rsc.m1 = 0; 1629 sp->rsc.d = 0; 1630 sp->rsc.m2 = 0; 1631 } 1632 if (cl->cl_fsc != NULL) { 1633 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1634 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1635 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1636 } else { 1637 sp->fsc.m1 = 0; 1638 sp->fsc.d = 0; 1639 sp->fsc.m2 = 0; 1640 } 1641 if (cl->cl_usc != NULL) { 1642 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1643 sp->usc.d = dx2d(cl->cl_usc->dx); 1644 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1645 } else { 1646 sp->usc.m1 = 0; 1647 sp->usc.d = 0; 1648 sp->usc.m2 = 0; 1649 } 1650 1651 sp->total = cl->cl_total; 1652 sp->cumul = cl->cl_cumul; 1653 1654 sp->d = cl->cl_d; 1655 sp->e = cl->cl_e; 1656 sp->vt = cl->cl_vt; 1657 sp->f = cl->cl_f; 1658 1659 sp->initvt = cl->cl_initvt; 1660 sp->vtperiod = cl->cl_vtperiod; 1661 sp->parentperiod = cl->cl_parentperiod; 1662 sp->nactive = cl->cl_nactive; 1663 sp->vtoff = cl->cl_vtoff; 1664 sp->cvtmax = cl->cl_cvtmax; 1665 sp->myf = cl->cl_myf; 1666 sp->cfmin = cl->cl_cfmin; 1667 sp->cvtmin = cl->cl_cvtmin; 1668 sp->myfadj = cl->cl_myfadj; 1669 sp->vtadj = cl->cl_vtadj; 1670 1671 sp->cur_time = read_machclk(); 1672 sp->machclk_freq = machclk_freq; 1673 1674 sp->qlength = qlen(cl->cl_q); 1675 sp->qlimit = qlimit(cl->cl_q); 1676 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1677 sp->drop_cnt = cl->cl_stats.drop_cnt; 1678 sp->period = cl->cl_stats.period; 1679 1680 sp->qtype = qtype(cl->cl_q); 1681 #ifdef ALTQ_RED 1682 if (q_is_red(cl->cl_q)) 1683 red_getstats(cl->cl_red, &sp->red[0]); 1684 #endif 1685 #ifdef ALTQ_RIO 1686 if (q_is_rio(cl->cl_q)) 1687 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1688 #endif 1689 } 1690 1691 /* convert a class handle to the corresponding class pointer */ 1692 static struct hfsc_class * 1693 clh_to_clp(struct hfsc_if *hif, uint32_t chandle) 1694 { 1695 int i; 1696 struct hfsc_class *cl; 1697 1698 if (chandle == 0) 1699 return (NULL); 1700 /* 1701 * first, try optimistically the slot matching the lower bits of 1702 * the handle. if it fails, do the linear table search. 1703 */ 1704 i = chandle % HFSC_MAX_CLASSES; 1705 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1706 return (cl); 1707 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1708 if ((cl = hif->hif_class_tbl[i]) != NULL && 1709 cl->cl_handle == chandle) 1710 return (cl); 1711 return (NULL); 1712 } 1713 1714 #endif /* ALTQ_HFSC */ 1715