1 /* $KAME: altq_hfsc.c,v 1.25 2004/04/17 10:54:48 kjc Exp $ */ 2 3 /* 4 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. 5 * 6 * Permission to use, copy, modify, and distribute this software and 7 * its documentation is hereby granted (including for commercial or 8 * for-profit use), provided that both the copyright notice and this 9 * permission notice appear in all copies of the software, derivative 10 * works, or modified versions, and any portions thereof. 11 * 12 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF 13 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS 14 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED 15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 20 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 21 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 24 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 25 * DAMAGE. 26 * 27 * Carnegie Mellon encourages (but does not require) users of this 28 * software to return any improvements or extensions that they make, 29 * and to grant Carnegie Mellon the rights to redistribute these 30 * changes without encumbrance. 31 */ 32 /* 33 * H-FSC is described in Proceedings of SIGCOMM'97, 34 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 35 * Real-Time and Priority Service" 36 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. 37 * 38 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. 39 * when a class has an upperlimit, the fit-time is computed from the 40 * upperlimit service curve. the link-sharing scheduler does not schedule 41 * a class whose fit-time exceeds the current time. 42 */ 43 44 #include "opt_altq.h" 45 #include "opt_inet.h" 46 #include "opt_inet6.h" 47 48 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ 49 50 #include <sys/param.h> 51 #include <sys/malloc.h> 52 #include <sys/mbuf.h> 53 #include <sys/socket.h> 54 #include <sys/systm.h> 55 #include <sys/errno.h> 56 #include <sys/queue.h> 57 #include <sys/thread.h> 58 59 #include <net/if.h> 60 #include <net/ifq_var.h> 61 #include <netinet/in.h> 62 63 #include <net/pf/pfvar.h> 64 #include <net/altq/altq.h> 65 #include <net/altq/altq_hfsc.h> 66 67 #include <sys/thread2.h> 68 69 #define HFSC_SUBQ_INDEX ALTQ_SUBQ_INDEX_DEFAULT 70 #define HFSC_LOCK(ifq) \ 71 ALTQ_SQ_LOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX]) 72 #define HFSC_UNLOCK(ifq) \ 73 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX]) 74 75 /* 76 * function prototypes 77 */ 78 static int hfsc_clear_interface(struct hfsc_if *); 79 static int hfsc_request(struct ifaltq_subque *, int, void *); 80 static void hfsc_purge(struct hfsc_if *); 81 static struct hfsc_class *hfsc_class_create(struct hfsc_if *, 82 struct service_curve *, 83 struct service_curve *, 84 struct service_curve *, 85 struct hfsc_class *, int, int, int); 86 static int hfsc_class_destroy(struct hfsc_class *); 87 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *); 88 static int hfsc_enqueue(struct ifaltq_subque *, struct mbuf *, 89 struct altq_pktattr *); 90 static struct mbuf *hfsc_dequeue(struct ifaltq_subque *, int); 91 92 static int hfsc_addq(struct hfsc_class *, struct mbuf *); 93 static struct mbuf *hfsc_getq(struct hfsc_class *); 94 static struct mbuf *hfsc_pollq(struct hfsc_class *); 95 static void hfsc_purgeq(struct hfsc_class *); 96 97 static void update_cfmin(struct hfsc_class *); 98 static void set_active(struct hfsc_class *, int); 99 static void set_passive(struct hfsc_class *); 100 101 static void init_ed(struct hfsc_class *, int); 102 static void update_ed(struct hfsc_class *, int); 103 static void update_d(struct hfsc_class *, int); 104 static void init_vf(struct hfsc_class *, int); 105 static void update_vf(struct hfsc_class *, int, uint64_t); 106 static ellist_t *ellist_alloc(void); 107 static void ellist_destroy(ellist_t *); 108 static void ellist_insert(struct hfsc_class *); 109 static void ellist_remove(struct hfsc_class *); 110 static void ellist_update(struct hfsc_class *); 111 struct hfsc_class *ellist_get_mindl(ellist_t *, uint64_t); 112 static actlist_t *actlist_alloc(void); 113 static void actlist_destroy(actlist_t *); 114 static void actlist_insert(struct hfsc_class *); 115 static void actlist_remove(struct hfsc_class *); 116 static void actlist_update(struct hfsc_class *); 117 118 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, uint64_t); 119 120 static __inline uint64_t seg_x2y(uint64_t, uint64_t); 121 static __inline uint64_t seg_y2x(uint64_t, uint64_t); 122 static __inline uint64_t m2sm(u_int); 123 static __inline uint64_t m2ism(u_int); 124 static __inline uint64_t d2dx(u_int); 125 static u_int sm2m(uint64_t); 126 static u_int dx2d(uint64_t); 127 128 static void sc2isc(struct service_curve *, struct internal_sc *); 129 static void rtsc_init(struct runtime_sc *, struct internal_sc *, 130 uint64_t, uint64_t); 131 static uint64_t rtsc_y2x(struct runtime_sc *, uint64_t); 132 static uint64_t rtsc_x2y(struct runtime_sc *, uint64_t); 133 static void rtsc_min(struct runtime_sc *, struct internal_sc *, 134 uint64_t, uint64_t); 135 136 static void get_class_stats(struct hfsc_classstats *, struct hfsc_class *); 137 static struct hfsc_class *clh_to_clp(struct hfsc_if *, uint32_t); 138 139 /* 140 * macros 141 */ 142 #define is_a_parent_class(cl) ((cl)->cl_children != NULL) 143 144 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */ 145 146 int 147 hfsc_pfattach(struct pf_altq *a, struct ifaltq *ifq) 148 { 149 return altq_attach(ifq, ALTQT_HFSC, a->altq_disc, ifq_mapsubq_default, 150 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL); 151 } 152 153 int 154 hfsc_add_altq(struct pf_altq *a) 155 { 156 struct hfsc_if *hif; 157 struct ifnet *ifp; 158 159 if ((ifp = ifunit(a->ifname)) == NULL) 160 return (EINVAL); 161 if (!ifq_is_ready(&ifp->if_snd)) 162 return (ENODEV); 163 164 hif = kmalloc(sizeof(struct hfsc_if), M_ALTQ, M_WAITOK | M_ZERO); 165 166 hif->hif_eligible = ellist_alloc(); 167 hif->hif_ifq = &ifp->if_snd; 168 ifq_purge_all(&ifp->if_snd); 169 170 /* keep the state in pf_altq */ 171 a->altq_disc = hif; 172 173 return (0); 174 } 175 176 int 177 hfsc_remove_altq(struct pf_altq *a) 178 { 179 struct hfsc_if *hif; 180 181 if ((hif = a->altq_disc) == NULL) 182 return (EINVAL); 183 a->altq_disc = NULL; 184 185 hfsc_clear_interface(hif); 186 hfsc_class_destroy(hif->hif_rootclass); 187 188 ellist_destroy(hif->hif_eligible); 189 190 kfree(hif, M_ALTQ); 191 192 return (0); 193 } 194 195 static int 196 hfsc_add_queue_locked(struct pf_altq *a, struct hfsc_if *hif) 197 { 198 struct hfsc_class *cl, *parent; 199 struct hfsc_opts *opts; 200 struct service_curve rtsc, lssc, ulsc; 201 202 KKASSERT(a->qid != 0); 203 204 opts = &a->pq_u.hfsc_opts; 205 206 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL) 207 parent = NULL; 208 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL) 209 return (EINVAL); 210 211 if (clh_to_clp(hif, a->qid) != NULL) 212 return (EBUSY); 213 214 rtsc.m1 = opts->rtsc_m1; 215 rtsc.d = opts->rtsc_d; 216 rtsc.m2 = opts->rtsc_m2; 217 lssc.m1 = opts->lssc_m1; 218 lssc.d = opts->lssc_d; 219 lssc.m2 = opts->lssc_m2; 220 ulsc.m1 = opts->ulsc_m1; 221 ulsc.d = opts->ulsc_d; 222 ulsc.m2 = opts->ulsc_m2; 223 224 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, parent, a->qlimit, 225 opts->flags, a->qid); 226 if (cl == NULL) 227 return (ENOMEM); 228 229 return (0); 230 } 231 232 int 233 hfsc_add_queue(struct pf_altq *a) 234 { 235 struct hfsc_if *hif; 236 struct ifaltq *ifq; 237 int error; 238 239 if (a->qid == 0) 240 return (EINVAL); 241 242 /* XXX not MP safe */ 243 if ((hif = a->altq_disc) == NULL) 244 return (EINVAL); 245 ifq = hif->hif_ifq; 246 247 HFSC_LOCK(ifq); 248 error = hfsc_add_queue_locked(a, hif); 249 HFSC_UNLOCK(ifq); 250 251 return error; 252 } 253 254 static int 255 hfsc_remove_queue_locked(struct pf_altq *a, struct hfsc_if *hif) 256 { 257 struct hfsc_class *cl; 258 259 if ((cl = clh_to_clp(hif, a->qid)) == NULL) 260 return (EINVAL); 261 262 return (hfsc_class_destroy(cl)); 263 } 264 265 int 266 hfsc_remove_queue(struct pf_altq *a) 267 { 268 struct hfsc_if *hif; 269 struct ifaltq *ifq; 270 int error; 271 272 /* XXX not MP safe */ 273 if ((hif = a->altq_disc) == NULL) 274 return (EINVAL); 275 ifq = hif->hif_ifq; 276 277 HFSC_LOCK(ifq); 278 error = hfsc_remove_queue_locked(a, hif); 279 HFSC_UNLOCK(ifq); 280 281 return error; 282 } 283 284 int 285 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) 286 { 287 struct hfsc_if *hif; 288 struct hfsc_class *cl; 289 struct hfsc_classstats stats; 290 struct ifaltq *ifq; 291 int error = 0; 292 293 if (*nbytes < sizeof(stats)) 294 return (EINVAL); 295 296 /* XXX not MP safe */ 297 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) 298 return (EBADF); 299 ifq = hif->hif_ifq; 300 301 HFSC_LOCK(ifq); 302 303 if ((cl = clh_to_clp(hif, a->qid)) == NULL) { 304 HFSC_UNLOCK(ifq); 305 return (EINVAL); 306 } 307 308 get_class_stats(&stats, cl); 309 310 HFSC_UNLOCK(ifq); 311 312 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) 313 return (error); 314 *nbytes = sizeof(stats); 315 return (0); 316 } 317 318 /* 319 * bring the interface back to the initial state by discarding 320 * all the filters and classes except the root class. 321 */ 322 static int 323 hfsc_clear_interface(struct hfsc_if *hif) 324 { 325 struct hfsc_class *cl; 326 327 if (hif->hif_rootclass == NULL) 328 return (0); 329 330 331 /* clear out the classes */ 332 while ((cl = hif->hif_rootclass->cl_children) != NULL) { 333 /* 334 * remove the first leaf class found in the hierarchy 335 * then start over 336 */ 337 for (; cl != NULL; cl = hfsc_nextclass(cl)) { 338 if (!is_a_parent_class(cl)) { 339 hfsc_class_destroy(cl); 340 break; 341 } 342 } 343 } 344 345 return (0); 346 } 347 348 static int 349 hfsc_request(struct ifaltq_subque *ifsq, int req, void *arg) 350 { 351 struct ifaltq *ifq = ifsq->ifsq_altq; 352 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 353 354 crit_enter(); 355 switch (req) { 356 case ALTRQ_PURGE: 357 if (ifsq_get_index(ifsq) == HFSC_SUBQ_INDEX) { 358 hfsc_purge(hif); 359 } else { 360 /* 361 * Race happened, the unrelated subqueue was 362 * picked during the packet scheduler transition. 363 */ 364 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 365 } 366 break; 367 } 368 crit_exit(); 369 return (0); 370 } 371 372 /* discard all the queued packets on the interface */ 373 static void 374 hfsc_purge(struct hfsc_if *hif) 375 { 376 struct hfsc_class *cl; 377 378 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) { 379 if (!qempty(cl->cl_q)) 380 hfsc_purgeq(cl); 381 } 382 if (ifq_is_enabled(hif->hif_ifq)) 383 hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX].ifsq_len = 0; 384 } 385 386 struct hfsc_class * 387 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc, 388 struct service_curve *fsc, struct service_curve *usc, 389 struct hfsc_class *parent, int qlimit, int flags, int qid) 390 { 391 struct hfsc_class *cl, *p; 392 int i; 393 394 if (hif->hif_classes >= HFSC_MAX_CLASSES) 395 return (NULL); 396 397 #ifndef ALTQ_RED 398 if (flags & HFCF_RED) { 399 #ifdef ALTQ_DEBUG 400 kprintf("hfsc_class_create: RED not configured for HFSC!\n"); 401 #endif 402 return (NULL); 403 } 404 #endif 405 406 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO); 407 cl->cl_q = kmalloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO); 408 cl->cl_actc = actlist_alloc(); 409 410 if (qlimit == 0) 411 qlimit = 50; /* use default */ 412 qlimit(cl->cl_q) = qlimit; 413 qtype(cl->cl_q) = Q_DROPTAIL; 414 qlen(cl->cl_q) = 0; 415 cl->cl_flags = flags; 416 #ifdef ALTQ_RED 417 if (flags & (HFCF_RED|HFCF_RIO)) { 418 int red_flags, red_pkttime; 419 u_int m2; 420 421 m2 = 0; 422 if (rsc != NULL && rsc->m2 > m2) 423 m2 = rsc->m2; 424 if (fsc != NULL && fsc->m2 > m2) 425 m2 = fsc->m2; 426 if (usc != NULL && usc->m2 > m2) 427 m2 = usc->m2; 428 429 red_flags = 0; 430 if (flags & HFCF_ECN) 431 red_flags |= REDF_ECN; 432 #ifdef ALTQ_RIO 433 if (flags & HFCF_CLEARDSCP) 434 red_flags |= RIOF_CLEARDSCP; 435 #endif 436 if (m2 < 8) 437 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ 438 else 439 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu 440 * 1000 * 1000 * 1000 / (m2 / 8); 441 if (flags & HFCF_RED) { 442 cl->cl_red = red_alloc(0, 0, 443 qlimit(cl->cl_q) * 10/100, 444 qlimit(cl->cl_q) * 30/100, 445 red_flags, red_pkttime); 446 if (cl->cl_red != NULL) 447 qtype(cl->cl_q) = Q_RED; 448 } 449 #ifdef ALTQ_RIO 450 else { 451 cl->cl_red = (red_t *)rio_alloc(0, NULL, 452 red_flags, red_pkttime); 453 if (cl->cl_red != NULL) 454 qtype(cl->cl_q) = Q_RIO; 455 } 456 #endif 457 } 458 #endif /* ALTQ_RED */ 459 460 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) { 461 cl->cl_rsc = kmalloc(sizeof(*cl->cl_rsc), M_ALTQ, M_WAITOK); 462 sc2isc(rsc, cl->cl_rsc); 463 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0); 464 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0); 465 } 466 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) { 467 cl->cl_fsc = kmalloc(sizeof(*cl->cl_fsc), M_ALTQ, M_WAITOK); 468 sc2isc(fsc, cl->cl_fsc); 469 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0); 470 } 471 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) { 472 cl->cl_usc = kmalloc(sizeof(*cl->cl_usc), M_ALTQ, M_WAITOK); 473 sc2isc(usc, cl->cl_usc); 474 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0); 475 } 476 477 cl->cl_id = hif->hif_classid++; 478 cl->cl_handle = qid; 479 cl->cl_hif = hif; 480 cl->cl_parent = parent; 481 482 crit_enter(); 483 hif->hif_classes++; 484 485 /* 486 * find a free slot in the class table. if the slot matching 487 * the lower bits of qid is free, use this slot. otherwise, 488 * use the first free slot. 489 */ 490 i = qid % HFSC_MAX_CLASSES; 491 if (hif->hif_class_tbl[i] == NULL) 492 hif->hif_class_tbl[i] = cl; 493 else { 494 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 495 if (hif->hif_class_tbl[i] == NULL) { 496 hif->hif_class_tbl[i] = cl; 497 break; 498 } 499 } 500 if (i == HFSC_MAX_CLASSES) { 501 crit_exit(); 502 goto err_ret; 503 } 504 } 505 506 if (flags & HFCF_DEFAULTCLASS) 507 hif->hif_defaultclass = cl; 508 509 if (parent == NULL) { 510 /* this is root class */ 511 hif->hif_rootclass = cl; 512 } else if (parent->cl_children == NULL) { 513 /* add this class to the children list of the parent */ 514 parent->cl_children = cl; 515 } else { 516 p = parent->cl_children; 517 while (p->cl_siblings != NULL) 518 p = p->cl_siblings; 519 p->cl_siblings = cl; 520 } 521 crit_exit(); 522 523 return (cl); 524 525 err_ret: 526 if (cl->cl_actc != NULL) 527 actlist_destroy(cl->cl_actc); 528 if (cl->cl_red != NULL) { 529 #ifdef ALTQ_RIO 530 if (q_is_rio(cl->cl_q)) 531 rio_destroy((rio_t *)cl->cl_red); 532 #endif 533 #ifdef ALTQ_RED 534 if (q_is_red(cl->cl_q)) 535 red_destroy(cl->cl_red); 536 #endif 537 } 538 if (cl->cl_fsc != NULL) 539 kfree(cl->cl_fsc, M_ALTQ); 540 if (cl->cl_rsc != NULL) 541 kfree(cl->cl_rsc, M_ALTQ); 542 if (cl->cl_usc != NULL) 543 kfree(cl->cl_usc, M_ALTQ); 544 if (cl->cl_q != NULL) 545 kfree(cl->cl_q, M_ALTQ); 546 kfree(cl, M_ALTQ); 547 return (NULL); 548 } 549 550 static int 551 hfsc_class_destroy(struct hfsc_class *cl) 552 { 553 struct hfsc_if *hif; 554 int i; 555 556 if (cl == NULL) 557 return (0); 558 hif = cl->cl_hif; 559 560 if (is_a_parent_class(cl)) 561 return (EBUSY); 562 563 crit_enter(); 564 565 if (!qempty(cl->cl_q)) 566 hfsc_purgeq(cl); 567 568 if (cl->cl_parent == NULL) { 569 /* this is root class */ 570 } else { 571 struct hfsc_class *p = cl->cl_parent->cl_children; 572 573 if (p == cl) { 574 cl->cl_parent->cl_children = cl->cl_siblings; 575 } else { 576 do { 577 if (p->cl_siblings == cl) { 578 p->cl_siblings = cl->cl_siblings; 579 break; 580 } 581 } while ((p = p->cl_siblings) != NULL); 582 } 583 KKASSERT(p != NULL); 584 } 585 586 for (i = 0; i < HFSC_MAX_CLASSES; i++) { 587 if (hif->hif_class_tbl[i] == cl) { 588 hif->hif_class_tbl[i] = NULL; 589 break; 590 } 591 } 592 593 hif->hif_classes--; 594 crit_exit(); 595 596 actlist_destroy(cl->cl_actc); 597 598 if (cl->cl_red != NULL) { 599 #ifdef ALTQ_RIO 600 if (q_is_rio(cl->cl_q)) 601 rio_destroy((rio_t *)cl->cl_red); 602 #endif 603 #ifdef ALTQ_RED 604 if (q_is_red(cl->cl_q)) 605 red_destroy(cl->cl_red); 606 #endif 607 } 608 609 if (cl == hif->hif_rootclass) 610 hif->hif_rootclass = NULL; 611 if (cl == hif->hif_defaultclass) 612 hif->hif_defaultclass = NULL; 613 if (cl == hif->hif_pollcache) 614 hif->hif_pollcache = NULL; 615 616 if (cl->cl_usc != NULL) 617 kfree(cl->cl_usc, M_ALTQ); 618 if (cl->cl_fsc != NULL) 619 kfree(cl->cl_fsc, M_ALTQ); 620 if (cl->cl_rsc != NULL) 621 kfree(cl->cl_rsc, M_ALTQ); 622 kfree(cl->cl_q, M_ALTQ); 623 kfree(cl, M_ALTQ); 624 625 return (0); 626 } 627 628 /* 629 * hfsc_nextclass returns the next class in the tree. 630 * usage: 631 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) 632 * do_something; 633 */ 634 static struct hfsc_class * 635 hfsc_nextclass(struct hfsc_class *cl) 636 { 637 if (cl->cl_children != NULL) { 638 cl = cl->cl_children; 639 } else if (cl->cl_siblings != NULL) { 640 cl = cl->cl_siblings; 641 } else { 642 while ((cl = cl->cl_parent) != NULL) { 643 if (cl->cl_siblings != NULL) { 644 cl = cl->cl_siblings; 645 break; 646 } 647 } 648 } 649 650 return (cl); 651 } 652 653 /* 654 * hfsc_enqueue is an enqueue function to be registered to 655 * (*altq_enqueue) in struct ifaltq. 656 */ 657 static int 658 hfsc_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 659 struct altq_pktattr *pktattr) 660 { 661 struct ifaltq *ifq = ifsq->ifsq_altq; 662 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 663 struct hfsc_class *cl; 664 int len; 665 666 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) { 667 /* 668 * Race happened, the unrelated subqueue was 669 * picked during the packet scheduler transition. 670 */ 671 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 672 m_freem(m); 673 return ENOBUFS; 674 } 675 676 /* grab class set by classifier */ 677 if ((m->m_flags & M_PKTHDR) == 0) { 678 /* should not happen */ 679 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n"); 680 m_freem(m); 681 return (ENOBUFS); 682 } 683 crit_enter(); 684 if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE) 685 cl = clh_to_clp(hif, m->m_pkthdr.pf.qid); 686 else 687 cl = NULL; 688 if (cl == NULL || is_a_parent_class(cl)) { 689 cl = hif->hif_defaultclass; 690 if (cl == NULL) { 691 m_freem(m); 692 crit_exit(); 693 return (ENOBUFS); 694 } 695 } 696 cl->cl_pktattr = NULL; 697 len = m_pktlen(m); 698 if (hfsc_addq(cl, m) != 0) { 699 /* drop occurred. mbuf was freed in hfsc_addq. */ 700 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len); 701 crit_exit(); 702 return (ENOBUFS); 703 } 704 ifsq->ifsq_len++; 705 cl->cl_hif->hif_packets++; 706 707 /* successfully queued. */ 708 if (qlen(cl->cl_q) == 1) 709 set_active(cl, m_pktlen(m)); 710 crit_exit(); 711 return (0); 712 } 713 714 /* 715 * hfsc_dequeue is a dequeue function to be registered to 716 * (*altq_dequeue) in struct ifaltq. 717 * 718 * note: ALTDQ_POLL returns the next packet without removing the packet 719 * from the queue. ALTDQ_REMOVE is a normal dequeue operation. 720 */ 721 static struct mbuf * 722 hfsc_dequeue(struct ifaltq_subque *ifsq, int op) 723 { 724 struct ifaltq *ifq = ifsq->ifsq_altq; 725 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc; 726 struct hfsc_class *cl; 727 struct mbuf *m; 728 int len, next_len; 729 int realtime = 0; 730 uint64_t cur_time; 731 732 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) { 733 /* 734 * Race happened, the unrelated subqueue was 735 * picked during the packet scheduler transition. 736 */ 737 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); 738 return NULL; 739 } 740 741 if (hif->hif_packets == 0) { 742 /* no packet in the tree */ 743 return (NULL); 744 } 745 746 crit_enter(); 747 cur_time = read_machclk(); 748 749 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) { 750 cl = hif->hif_pollcache; 751 hif->hif_pollcache = NULL; 752 /* check if the class was scheduled by real-time criteria */ 753 if (cl->cl_rsc != NULL) 754 realtime = (cl->cl_e <= cur_time); 755 } else { 756 /* 757 * if there are eligible classes, use real-time criteria. 758 * find the class with the minimum deadline among 759 * the eligible classes. 760 */ 761 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time)) != NULL) { 762 realtime = 1; 763 } else { 764 #ifdef ALTQ_DEBUG 765 int fits = 0; 766 #endif 767 /* 768 * use link-sharing criteria 769 * get the class with the minimum vt in the hierarchy 770 */ 771 cl = hif->hif_rootclass; 772 while (is_a_parent_class(cl)) { 773 774 cl = actlist_firstfit(cl, cur_time); 775 if (cl == NULL) { 776 #ifdef ALTQ_DEBUG 777 if (fits > 0) 778 kprintf("%d fit but none found\n",fits); 779 #endif 780 m = NULL; 781 goto done; 782 } 783 /* 784 * update parent's cl_cvtmin. 785 * don't update if the new vt is smaller. 786 */ 787 if (cl->cl_parent->cl_cvtmin < cl->cl_vt) 788 cl->cl_parent->cl_cvtmin = cl->cl_vt; 789 #ifdef ALTQ_DEBUG 790 fits++; 791 #endif 792 } 793 } 794 795 if (op == ALTDQ_POLL) { 796 #ifdef foo 797 /* 798 * Don't use poll cache; the poll/dequeue 799 * model is no longer applicable to SMP 800 * system. e.g. 801 * CPU-A CPU-B 802 * : : 803 * poll : 804 * : poll 805 * dequeue (+) : 806 * 807 * The dequeue at (+) will hit the poll 808 * cache set by CPU-B. 809 */ 810 hif->hif_pollcache = cl; 811 #endif 812 m = hfsc_pollq(cl); 813 goto done; 814 } 815 } 816 817 m = hfsc_getq(cl); 818 if (m == NULL) 819 panic("hfsc_dequeue:"); 820 len = m_pktlen(m); 821 cl->cl_hif->hif_packets--; 822 ifsq->ifsq_len--; 823 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len); 824 825 update_vf(cl, len, cur_time); 826 if (realtime) 827 cl->cl_cumul += len; 828 829 if (!qempty(cl->cl_q)) { 830 if (cl->cl_rsc != NULL) { 831 /* update ed */ 832 next_len = m_pktlen(qhead(cl->cl_q)); 833 834 if (realtime) 835 update_ed(cl, next_len); 836 else 837 update_d(cl, next_len); 838 } 839 } else { 840 /* the class becomes passive */ 841 set_passive(cl); 842 } 843 done: 844 crit_exit(); 845 return (m); 846 } 847 848 static int 849 hfsc_addq(struct hfsc_class *cl, struct mbuf *m) 850 { 851 852 #ifdef ALTQ_RIO 853 if (q_is_rio(cl->cl_q)) 854 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, 855 m, cl->cl_pktattr); 856 #endif 857 #ifdef ALTQ_RED 858 if (q_is_red(cl->cl_q)) 859 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); 860 #endif 861 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { 862 m_freem(m); 863 return (-1); 864 } 865 866 if (cl->cl_flags & HFCF_CLEARDSCP) 867 write_dsfield(m, cl->cl_pktattr, 0); 868 869 _addq(cl->cl_q, m); 870 871 return (0); 872 } 873 874 static struct mbuf * 875 hfsc_getq(struct hfsc_class *cl) 876 { 877 #ifdef ALTQ_RIO 878 if (q_is_rio(cl->cl_q)) 879 return rio_getq((rio_t *)cl->cl_red, cl->cl_q); 880 #endif 881 #ifdef ALTQ_RED 882 if (q_is_red(cl->cl_q)) 883 return red_getq(cl->cl_red, cl->cl_q); 884 #endif 885 return _getq(cl->cl_q); 886 } 887 888 static struct mbuf * 889 hfsc_pollq(struct hfsc_class *cl) 890 { 891 return qhead(cl->cl_q); 892 } 893 894 static void 895 hfsc_purgeq(struct hfsc_class *cl) 896 { 897 struct mbuf *m; 898 899 if (qempty(cl->cl_q)) 900 return; 901 902 while ((m = _getq(cl->cl_q)) != NULL) { 903 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m)); 904 m_freem(m); 905 cl->cl_hif->hif_packets--; 906 cl->cl_hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX].ifsq_len--; 907 } 908 KKASSERT(qlen(cl->cl_q) == 0); 909 910 update_vf(cl, 0, 0); /* remove cl from the actlist */ 911 set_passive(cl); 912 } 913 914 static void 915 set_active(struct hfsc_class *cl, int len) 916 { 917 if (cl->cl_rsc != NULL) 918 init_ed(cl, len); 919 if (cl->cl_fsc != NULL) 920 init_vf(cl, len); 921 922 cl->cl_stats.period++; 923 } 924 925 static void 926 set_passive(struct hfsc_class *cl) 927 { 928 if (cl->cl_rsc != NULL) 929 ellist_remove(cl); 930 931 /* 932 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0) 933 * needs to be called explicitly to remove a class from actlist 934 */ 935 } 936 937 static void 938 init_ed(struct hfsc_class *cl, int next_len) 939 { 940 uint64_t cur_time; 941 942 cur_time = read_machclk(); 943 944 /* update the deadline curve */ 945 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul); 946 947 /* 948 * update the eligible curve. 949 * for concave, it is equal to the deadline curve. 950 * for convex, it is a linear curve with slope m2. 951 */ 952 cl->cl_eligible = cl->cl_deadline; 953 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) { 954 cl->cl_eligible.dx = 0; 955 cl->cl_eligible.dy = 0; 956 } 957 958 /* compute e and d */ 959 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 960 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 961 962 ellist_insert(cl); 963 } 964 965 static void 966 update_ed(struct hfsc_class *cl, int next_len) 967 { 968 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); 969 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 970 971 ellist_update(cl); 972 } 973 974 static void 975 update_d(struct hfsc_class *cl, int next_len) 976 { 977 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); 978 } 979 980 static void 981 init_vf(struct hfsc_class *cl, int len) 982 { 983 struct hfsc_class *max_cl, *p; 984 uint64_t vt, f, cur_time; 985 int go_active; 986 987 cur_time = 0; 988 go_active = 1; 989 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) { 990 if (go_active && cl->cl_nactive++ == 0) 991 go_active = 1; 992 else 993 go_active = 0; 994 995 if (go_active) { 996 max_cl = actlist_last(cl->cl_parent->cl_actc); 997 if (max_cl != NULL) { 998 /* 999 * set vt to the average of the min and max 1000 * classes. if the parent's period didn't 1001 * change, don't decrease vt of the class. 1002 */ 1003 vt = max_cl->cl_vt; 1004 if (cl->cl_parent->cl_cvtmin != 0) 1005 vt = (cl->cl_parent->cl_cvtmin + vt)/2; 1006 1007 if (cl->cl_parent->cl_vtperiod != 1008 cl->cl_parentperiod || vt > cl->cl_vt) 1009 cl->cl_vt = vt; 1010 } else { 1011 /* 1012 * first child for a new parent backlog period. 1013 * add parent's cvtmax to vtoff of children 1014 * to make a new vt (vtoff + vt) larger than 1015 * the vt in the last period for all children. 1016 */ 1017 vt = cl->cl_parent->cl_cvtmax; 1018 for (p = cl->cl_parent->cl_children; p != NULL; 1019 p = p->cl_siblings) 1020 p->cl_vtoff += vt; 1021 cl->cl_vt = 0; 1022 cl->cl_parent->cl_cvtmax = 0; 1023 cl->cl_parent->cl_cvtmin = 0; 1024 } 1025 cl->cl_initvt = cl->cl_vt; 1026 1027 /* update the virtual curve */ 1028 vt = cl->cl_vt + cl->cl_vtoff; 1029 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total); 1030 if (cl->cl_virtual.x == vt) { 1031 cl->cl_virtual.x -= cl->cl_vtoff; 1032 cl->cl_vtoff = 0; 1033 } 1034 cl->cl_vtadj = 0; 1035 1036 cl->cl_vtperiod++; /* increment vt period */ 1037 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; 1038 if (cl->cl_parent->cl_nactive == 0) 1039 cl->cl_parentperiod++; 1040 cl->cl_f = 0; 1041 1042 actlist_insert(cl); 1043 1044 if (cl->cl_usc != NULL) { 1045 /* class has upper limit curve */ 1046 if (cur_time == 0) 1047 cur_time = read_machclk(); 1048 1049 /* update the ulimit curve */ 1050 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time, 1051 cl->cl_total); 1052 /* compute myf */ 1053 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 1054 cl->cl_total); 1055 cl->cl_myfadj = 0; 1056 } 1057 } 1058 1059 if (cl->cl_myf > cl->cl_cfmin) 1060 f = cl->cl_myf; 1061 else 1062 f = cl->cl_cfmin; 1063 if (f != cl->cl_f) { 1064 cl->cl_f = f; 1065 update_cfmin(cl->cl_parent); 1066 } 1067 } 1068 } 1069 1070 static void 1071 update_vf(struct hfsc_class *cl, int len, uint64_t cur_time) 1072 { 1073 uint64_t f, myf_bound, delta; 1074 int go_passive; 1075 1076 go_passive = qempty(cl->cl_q); 1077 1078 for (; cl->cl_parent != NULL; cl = cl->cl_parent) { 1079 cl->cl_total += len; 1080 1081 if (cl->cl_fsc == NULL || cl->cl_nactive == 0) 1082 continue; 1083 1084 if (go_passive && --cl->cl_nactive == 0) 1085 go_passive = 1; 1086 else 1087 go_passive = 0; 1088 1089 if (go_passive) { 1090 /* no more active child, going passive */ 1091 1092 /* update cvtmax of the parent class */ 1093 if (cl->cl_vt > cl->cl_parent->cl_cvtmax) 1094 cl->cl_parent->cl_cvtmax = cl->cl_vt; 1095 1096 /* remove this class from the vt list */ 1097 actlist_remove(cl); 1098 1099 update_cfmin(cl->cl_parent); 1100 1101 continue; 1102 } 1103 1104 /* 1105 * update vt and f 1106 */ 1107 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 1108 - cl->cl_vtoff + cl->cl_vtadj; 1109 1110 /* 1111 * if vt of the class is smaller than cvtmin, 1112 * the class was skipped in the past due to non-fit. 1113 * if so, we need to adjust vtadj. 1114 */ 1115 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { 1116 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; 1117 cl->cl_vt = cl->cl_parent->cl_cvtmin; 1118 } 1119 1120 /* update the vt list */ 1121 actlist_update(cl); 1122 1123 if (cl->cl_usc != NULL) { 1124 cl->cl_myf = cl->cl_myfadj 1125 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); 1126 1127 /* 1128 * if myf lags behind by more than one clock tick 1129 * from the current time, adjust myfadj to prevent 1130 * a rate-limited class from going greedy. 1131 * in a steady state under rate-limiting, myf 1132 * fluctuates within one clock tick. 1133 */ 1134 myf_bound = cur_time - machclk_per_tick; 1135 if (cl->cl_myf < myf_bound) { 1136 delta = cur_time - cl->cl_myf; 1137 cl->cl_myfadj += delta; 1138 cl->cl_myf += delta; 1139 } 1140 } 1141 1142 /* cl_f is max(cl_myf, cl_cfmin) */ 1143 if (cl->cl_myf > cl->cl_cfmin) 1144 f = cl->cl_myf; 1145 else 1146 f = cl->cl_cfmin; 1147 if (f != cl->cl_f) { 1148 cl->cl_f = f; 1149 update_cfmin(cl->cl_parent); 1150 } 1151 } 1152 } 1153 1154 static void 1155 update_cfmin(struct hfsc_class *cl) 1156 { 1157 struct hfsc_class *p; 1158 uint64_t cfmin; 1159 1160 if (TAILQ_EMPTY(cl->cl_actc)) { 1161 cl->cl_cfmin = 0; 1162 return; 1163 } 1164 cfmin = HT_INFINITY; 1165 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1166 if (p->cl_f == 0) { 1167 cl->cl_cfmin = 0; 1168 return; 1169 } 1170 if (p->cl_f < cfmin) 1171 cfmin = p->cl_f; 1172 } 1173 cl->cl_cfmin = cfmin; 1174 } 1175 1176 /* 1177 * TAILQ based ellist and actlist implementation 1178 * (ion wanted to make a calendar queue based implementation) 1179 */ 1180 /* 1181 * eligible list holds backlogged classes being sorted by their eligible times. 1182 * there is one eligible list per interface. 1183 */ 1184 1185 static ellist_t * 1186 ellist_alloc(void) 1187 { 1188 ellist_t *head; 1189 1190 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK); 1191 TAILQ_INIT(head); 1192 return (head); 1193 } 1194 1195 static void 1196 ellist_destroy(ellist_t *head) 1197 { 1198 kfree(head, M_ALTQ); 1199 } 1200 1201 static void 1202 ellist_insert(struct hfsc_class *cl) 1203 { 1204 struct hfsc_if *hif = cl->cl_hif; 1205 struct hfsc_class *p; 1206 1207 /* check the last entry first */ 1208 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL || 1209 p->cl_e <= cl->cl_e) { 1210 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1211 return; 1212 } 1213 1214 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) { 1215 if (cl->cl_e < p->cl_e) { 1216 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1217 return; 1218 } 1219 } 1220 KKASSERT(0); /* should not reach here */ 1221 } 1222 1223 static void 1224 ellist_remove(struct hfsc_class *cl) 1225 { 1226 struct hfsc_if *hif = cl->cl_hif; 1227 1228 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1229 } 1230 1231 static void 1232 ellist_update(struct hfsc_class *cl) 1233 { 1234 struct hfsc_if *hif = cl->cl_hif; 1235 struct hfsc_class *p, *last; 1236 1237 /* 1238 * the eligible time of a class increases monotonically. 1239 * if the next entry has a larger eligible time, nothing to do. 1240 */ 1241 p = TAILQ_NEXT(cl, cl_ellist); 1242 if (p == NULL || cl->cl_e <= p->cl_e) 1243 return; 1244 1245 /* check the last entry */ 1246 last = TAILQ_LAST(hif->hif_eligible, _eligible); 1247 KKASSERT(last != NULL); 1248 if (last->cl_e <= cl->cl_e) { 1249 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1250 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist); 1251 return; 1252 } 1253 1254 /* 1255 * the new position must be between the next entry 1256 * and the last entry 1257 */ 1258 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) { 1259 if (cl->cl_e < p->cl_e) { 1260 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist); 1261 TAILQ_INSERT_BEFORE(p, cl, cl_ellist); 1262 return; 1263 } 1264 } 1265 KKASSERT(0); /* should not reach here */ 1266 } 1267 1268 /* find the class with the minimum deadline among the eligible classes */ 1269 struct hfsc_class * 1270 ellist_get_mindl(ellist_t *head, uint64_t cur_time) 1271 { 1272 struct hfsc_class *p, *cl = NULL; 1273 1274 TAILQ_FOREACH(p, head, cl_ellist) { 1275 if (p->cl_e > cur_time) 1276 break; 1277 if (cl == NULL || p->cl_d < cl->cl_d) 1278 cl = p; 1279 } 1280 return (cl); 1281 } 1282 1283 /* 1284 * active children list holds backlogged child classes being sorted 1285 * by their virtual time. 1286 * each intermediate class has one active children list. 1287 */ 1288 static actlist_t * 1289 actlist_alloc(void) 1290 { 1291 actlist_t *head; 1292 1293 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK); 1294 TAILQ_INIT(head); 1295 return (head); 1296 } 1297 1298 static void 1299 actlist_destroy(actlist_t *head) 1300 { 1301 kfree(head, M_ALTQ); 1302 } 1303 static void 1304 actlist_insert(struct hfsc_class *cl) 1305 { 1306 struct hfsc_class *p; 1307 1308 /* check the last entry first */ 1309 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL 1310 || p->cl_vt <= cl->cl_vt) { 1311 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1312 return; 1313 } 1314 1315 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) { 1316 if (cl->cl_vt < p->cl_vt) { 1317 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1318 return; 1319 } 1320 } 1321 KKASSERT(0); /* should not reach here */ 1322 } 1323 1324 static void 1325 actlist_remove(struct hfsc_class *cl) 1326 { 1327 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1328 } 1329 1330 static void 1331 actlist_update(struct hfsc_class *cl) 1332 { 1333 struct hfsc_class *p, *last; 1334 1335 /* 1336 * the virtual time of a class increases monotonically during its 1337 * backlogged period. 1338 * if the next entry has a larger virtual time, nothing to do. 1339 */ 1340 p = TAILQ_NEXT(cl, cl_actlist); 1341 if (p == NULL || cl->cl_vt < p->cl_vt) 1342 return; 1343 1344 /* check the last entry */ 1345 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active); 1346 KKASSERT(last != NULL); 1347 if (last->cl_vt <= cl->cl_vt) { 1348 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1349 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist); 1350 return; 1351 } 1352 1353 /* 1354 * the new position must be between the next entry 1355 * and the last entry 1356 */ 1357 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) { 1358 if (cl->cl_vt < p->cl_vt) { 1359 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist); 1360 TAILQ_INSERT_BEFORE(p, cl, cl_actlist); 1361 return; 1362 } 1363 } 1364 KKASSERT(0); /* should not reach here */ 1365 } 1366 1367 static struct hfsc_class * 1368 actlist_firstfit(struct hfsc_class *cl, uint64_t cur_time) 1369 { 1370 struct hfsc_class *p; 1371 1372 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) { 1373 if (p->cl_f <= cur_time) 1374 return (p); 1375 } 1376 return (NULL); 1377 } 1378 1379 /* 1380 * service curve support functions 1381 * 1382 * external service curve parameters 1383 * m: bits/sec 1384 * d: msec 1385 * internal service curve parameters 1386 * sm: (bytes/tsc_interval) << SM_SHIFT 1387 * ism: (tsc_count/byte) << ISM_SHIFT 1388 * dx: tsc_count 1389 * 1390 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits. 1391 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU 1392 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective 1393 * digits in decimal using the following table. 1394 * 1395 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps 1396 * ----------+------------------------------------------------------- 1397 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6 1398 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6 1399 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6 1400 * 1401 * nsec/byte 80000 8000 800 80 8 1402 * ism(500MHz) 40000 4000 400 40 4 1403 * ism(200MHz) 16000 1600 160 16 1.6 1404 */ 1405 #define SM_SHIFT 24 1406 #define ISM_SHIFT 10 1407 1408 #define SM_MASK ((1LL << SM_SHIFT) - 1) 1409 #define ISM_MASK ((1LL << ISM_SHIFT) - 1) 1410 1411 static __inline uint64_t 1412 seg_x2y(uint64_t x, uint64_t sm) 1413 { 1414 uint64_t y; 1415 1416 /* 1417 * compute 1418 * y = x * sm >> SM_SHIFT 1419 * but divide it for the upper and lower bits to avoid overflow 1420 */ 1421 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); 1422 return (y); 1423 } 1424 1425 static __inline uint64_t 1426 seg_y2x(uint64_t y, uint64_t ism) 1427 { 1428 uint64_t x; 1429 1430 if (y == 0) 1431 x = 0; 1432 else if (ism == HT_INFINITY) 1433 x = HT_INFINITY; 1434 else 1435 x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT); 1436 1437 return (x); 1438 } 1439 1440 static __inline uint64_t 1441 m2sm(u_int m) 1442 { 1443 uint64_t sm; 1444 1445 sm = ((uint64_t)m << SM_SHIFT) / 8 / machclk_freq; 1446 return (sm); 1447 } 1448 1449 static __inline uint64_t 1450 m2ism(u_int m) 1451 { 1452 uint64_t ism; 1453 1454 if (m == 0) 1455 ism = HT_INFINITY; 1456 else 1457 ism = ((uint64_t)machclk_freq << ISM_SHIFT) * 8 / m; 1458 return (ism); 1459 } 1460 1461 static __inline uint64_t 1462 d2dx(u_int d) 1463 { 1464 uint64_t dx; 1465 1466 dx = ((uint64_t)d * machclk_freq) / 1000; 1467 return (dx); 1468 } 1469 1470 static u_int 1471 sm2m(uint64_t sm) 1472 { 1473 uint64_t m; 1474 1475 m = (sm * 8 * machclk_freq) >> SM_SHIFT; 1476 return ((u_int)m); 1477 } 1478 1479 static u_int 1480 dx2d(uint64_t dx) 1481 { 1482 uint64_t d; 1483 1484 d = dx * 1000 / machclk_freq; 1485 return ((u_int)d); 1486 } 1487 1488 static void 1489 sc2isc(struct service_curve *sc, struct internal_sc *isc) 1490 { 1491 isc->sm1 = m2sm(sc->m1); 1492 isc->ism1 = m2ism(sc->m1); 1493 isc->dx = d2dx(sc->d); 1494 isc->dy = seg_x2y(isc->dx, isc->sm1); 1495 isc->sm2 = m2sm(sc->m2); 1496 isc->ism2 = m2ism(sc->m2); 1497 } 1498 1499 /* 1500 * initialize the runtime service curve with the given internal 1501 * service curve starting at (x, y). 1502 */ 1503 static void 1504 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1505 { 1506 rtsc->x = x; 1507 rtsc->y = y; 1508 rtsc->sm1 = isc->sm1; 1509 rtsc->ism1 = isc->ism1; 1510 rtsc->dx = isc->dx; 1511 rtsc->dy = isc->dy; 1512 rtsc->sm2 = isc->sm2; 1513 rtsc->ism2 = isc->ism2; 1514 } 1515 1516 /* 1517 * calculate the y-projection of the runtime service curve by the 1518 * given x-projection value 1519 */ 1520 static uint64_t 1521 rtsc_y2x(struct runtime_sc *rtsc, uint64_t y) 1522 { 1523 uint64_t x; 1524 1525 if (y < rtsc->y) { 1526 x = rtsc->x; 1527 } else if (y <= rtsc->y + rtsc->dy) { 1528 /* x belongs to the 1st segment */ 1529 if (rtsc->dy == 0) 1530 x = rtsc->x + rtsc->dx; 1531 else 1532 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); 1533 } else { 1534 /* x belongs to the 2nd segment */ 1535 x = rtsc->x + rtsc->dx 1536 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); 1537 } 1538 return (x); 1539 } 1540 1541 static uint64_t 1542 rtsc_x2y(struct runtime_sc *rtsc, uint64_t x) 1543 { 1544 uint64_t y; 1545 1546 if (x <= rtsc->x) { 1547 y = rtsc->y; 1548 } else if (x <= rtsc->x + rtsc->dx) { 1549 /* y belongs to the 1st segment */ 1550 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); 1551 } else 1552 /* y belongs to the 2nd segment */ 1553 y = rtsc->y + rtsc->dy 1554 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); 1555 return (y); 1556 } 1557 1558 /* 1559 * update the runtime service curve by taking the minimum of the current 1560 * runtime service curve and the service curve starting at (x, y). 1561 */ 1562 static void 1563 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y) 1564 { 1565 uint64_t y1, y2, dx, dy; 1566 1567 if (isc->sm1 <= isc->sm2) { 1568 /* service curve is convex */ 1569 y1 = rtsc_x2y(rtsc, x); 1570 if (y1 < y) 1571 /* the current rtsc is smaller */ 1572 return; 1573 rtsc->x = x; 1574 rtsc->y = y; 1575 return; 1576 } 1577 1578 /* 1579 * service curve is concave 1580 * compute the two y values of the current rtsc 1581 * y1: at x 1582 * y2: at (x + dx) 1583 */ 1584 y1 = rtsc_x2y(rtsc, x); 1585 if (y1 <= y) { 1586 /* rtsc is below isc, no change to rtsc */ 1587 return; 1588 } 1589 1590 y2 = rtsc_x2y(rtsc, x + isc->dx); 1591 if (y2 >= y + isc->dy) { 1592 /* rtsc is above isc, replace rtsc by isc */ 1593 rtsc->x = x; 1594 rtsc->y = y; 1595 rtsc->dx = isc->dx; 1596 rtsc->dy = isc->dy; 1597 return; 1598 } 1599 1600 /* 1601 * the two curves intersect 1602 * compute the offsets (dx, dy) using the reverse 1603 * function of seg_x2y() 1604 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) 1605 */ 1606 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2); 1607 /* 1608 * check if (x, y1) belongs to the 1st segment of rtsc. 1609 * if so, add the offset. 1610 */ 1611 if (rtsc->x + rtsc->dx > x) 1612 dx += rtsc->x + rtsc->dx - x; 1613 dy = seg_x2y(dx, isc->sm1); 1614 1615 rtsc->x = x; 1616 rtsc->y = y; 1617 rtsc->dx = dx; 1618 rtsc->dy = dy; 1619 } 1620 1621 static void 1622 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl) 1623 { 1624 sp->class_id = cl->cl_id; 1625 sp->class_handle = cl->cl_handle; 1626 1627 if (cl->cl_rsc != NULL) { 1628 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1); 1629 sp->rsc.d = dx2d(cl->cl_rsc->dx); 1630 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2); 1631 } else { 1632 sp->rsc.m1 = 0; 1633 sp->rsc.d = 0; 1634 sp->rsc.m2 = 0; 1635 } 1636 if (cl->cl_fsc != NULL) { 1637 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1); 1638 sp->fsc.d = dx2d(cl->cl_fsc->dx); 1639 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2); 1640 } else { 1641 sp->fsc.m1 = 0; 1642 sp->fsc.d = 0; 1643 sp->fsc.m2 = 0; 1644 } 1645 if (cl->cl_usc != NULL) { 1646 sp->usc.m1 = sm2m(cl->cl_usc->sm1); 1647 sp->usc.d = dx2d(cl->cl_usc->dx); 1648 sp->usc.m2 = sm2m(cl->cl_usc->sm2); 1649 } else { 1650 sp->usc.m1 = 0; 1651 sp->usc.d = 0; 1652 sp->usc.m2 = 0; 1653 } 1654 1655 sp->total = cl->cl_total; 1656 sp->cumul = cl->cl_cumul; 1657 1658 sp->d = cl->cl_d; 1659 sp->e = cl->cl_e; 1660 sp->vt = cl->cl_vt; 1661 sp->f = cl->cl_f; 1662 1663 sp->initvt = cl->cl_initvt; 1664 sp->vtperiod = cl->cl_vtperiod; 1665 sp->parentperiod = cl->cl_parentperiod; 1666 sp->nactive = cl->cl_nactive; 1667 sp->vtoff = cl->cl_vtoff; 1668 sp->cvtmax = cl->cl_cvtmax; 1669 sp->myf = cl->cl_myf; 1670 sp->cfmin = cl->cl_cfmin; 1671 sp->cvtmin = cl->cl_cvtmin; 1672 sp->myfadj = cl->cl_myfadj; 1673 sp->vtadj = cl->cl_vtadj; 1674 1675 sp->cur_time = read_machclk(); 1676 sp->machclk_freq = machclk_freq; 1677 1678 sp->qlength = qlen(cl->cl_q); 1679 sp->qlimit = qlimit(cl->cl_q); 1680 sp->xmit_cnt = cl->cl_stats.xmit_cnt; 1681 sp->drop_cnt = cl->cl_stats.drop_cnt; 1682 sp->period = cl->cl_stats.period; 1683 1684 sp->qtype = qtype(cl->cl_q); 1685 #ifdef ALTQ_RED 1686 if (q_is_red(cl->cl_q)) 1687 red_getstats(cl->cl_red, &sp->red[0]); 1688 #endif 1689 #ifdef ALTQ_RIO 1690 if (q_is_rio(cl->cl_q)) 1691 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); 1692 #endif 1693 } 1694 1695 /* convert a class handle to the corresponding class pointer */ 1696 static struct hfsc_class * 1697 clh_to_clp(struct hfsc_if *hif, uint32_t chandle) 1698 { 1699 int i; 1700 struct hfsc_class *cl; 1701 1702 if (chandle == 0) 1703 return (NULL); 1704 /* 1705 * first, try optimistically the slot matching the lower bits of 1706 * the handle. if it fails, do the linear table search. 1707 */ 1708 i = chandle % HFSC_MAX_CLASSES; 1709 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle) 1710 return (cl); 1711 for (i = 0; i < HFSC_MAX_CLASSES; i++) 1712 if ((cl = hif->hif_class_tbl[i]) != NULL && 1713 cl->cl_handle == chandle) 1714 return (cl); 1715 return (NULL); 1716 } 1717 1718 #endif /* ALTQ_HFSC */ 1719