1 /* 2 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa 3 * Portions Copyright (c) 2000 Akamba Corp. 4 * All rights reserved 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.24.2.22 2003/05/13 09:31:06 maxim Exp $ 28 * $DragonFly: src/sys/net/dummynet/ip_dummynet.c,v 1.55 2008/09/16 12:30:57 sephe Exp $ 29 */ 30 31 #include "opt_ipdn.h" 32 33 /* 34 * This module implements IP dummynet, a bandwidth limiter/delay emulator. 35 * Description of the data structures used is in ip_dummynet.h 36 * Here you mainly find the following blocks of code: 37 * + variable declarations; 38 * + heap management functions; 39 * + scheduler and dummynet functions; 40 * + configuration and initialization. 41 * 42 * Most important Changes: 43 * 44 * 011004: KLDable 45 * 010124: Fixed WF2Q behaviour 46 * 010122: Fixed spl protection. 47 * 000601: WF2Q support 48 * 000106: Large rewrite, use heaps to handle very many pipes. 49 * 980513: Initial release 50 */ 51 52 #include <sys/param.h> 53 #include <sys/kernel.h> 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/socketvar.h> 57 #include <sys/sysctl.h> 58 #include <sys/systimer.h> 59 #include <sys/thread2.h> 60 61 #include <net/ethernet.h> 62 #include <net/netmsg2.h> 63 #include <net/route.h> 64 65 #include <netinet/in_var.h> 66 #include <netinet/ip_var.h> 67 68 #include <net/dummynet/ip_dummynet.h> 69 70 #ifdef DUMMYNET_DEBUG 71 #define DPRINTF(fmt, ...) kprintf(fmt, __VA_ARGS__) 72 #else 73 #define DPRINTF(fmt, ...) ((void)0) 74 #endif 75 76 #ifndef DN_CALLOUT_FREQ_MAX 77 #define DN_CALLOUT_FREQ_MAX 10000 78 #endif 79 80 /* 81 * The maximum/minimum hash table size for queues. 82 * These values must be a power of 2. 83 */ 84 #define DN_MIN_HASH_SIZE 4 85 #define DN_MAX_HASH_SIZE 65536 86 87 /* 88 * Some macros are used to compare key values and handle wraparounds. 89 * MAX64 returns the largest of two key values. 90 */ 91 #define DN_KEY_LT(a, b) ((int64_t)((a) - (b)) < 0) 92 #define DN_KEY_LEQ(a, b) ((int64_t)((a) - (b)) <= 0) 93 #define DN_KEY_GT(a, b) ((int64_t)((a) - (b)) > 0) 94 #define DN_KEY_GEQ(a, b) ((int64_t)((a) - (b)) >= 0) 95 #define MAX64(x, y) ((((int64_t)((y) - (x))) > 0) ? (y) : (x)) 96 97 #define DN_NR_HASH_MAX 16 98 #define DN_NR_HASH_MASK (DN_NR_HASH_MAX - 1) 99 #define DN_NR_HASH(nr) \ 100 ((((nr) >> 12) ^ ((nr) >> 8) ^ ((nr) >> 4) ^ (nr)) & DN_NR_HASH_MASK) 101 102 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap"); 103 104 extern int ip_dn_cpu; 105 106 static dn_key curr_time = 0; /* current simulation time */ 107 static int dn_hash_size = 64; /* default hash size */ 108 static int pipe_expire = 1; /* expire queue if empty */ 109 static int dn_max_ratio = 16; /* max queues/buckets ratio */ 110 111 /* 112 * Statistics on number of queue searches and search steps 113 */ 114 static int searches; 115 static int search_steps; 116 117 /* 118 * RED parameters 119 */ 120 static int red_lookup_depth = 256; /* default lookup table depth */ 121 static int red_avg_pkt_size = 512; /* default medium packet size */ 122 static int red_max_pkt_size = 1500;/* default max packet size */ 123 124 /* 125 * Three heaps contain queues and pipes that the scheduler handles: 126 * 127 * + ready_heap contains all dn_flow_queue related to fixed-rate pipes. 128 * + wfq_ready_heap contains the pipes associated with WF2Q flows 129 * + extract_heap contains pipes associated with delay lines. 130 */ 131 static struct dn_heap ready_heap; 132 static struct dn_heap extract_heap; 133 static struct dn_heap wfq_ready_heap; 134 135 static struct dn_pipe_head pipe_table[DN_NR_HASH_MAX]; 136 static struct dn_flowset_head flowset_table[DN_NR_HASH_MAX]; 137 138 /* 139 * Variables for dummynet systimer 140 */ 141 static struct netmsg_base dn_netmsg; 142 static struct systimer dn_clock; 143 static int dn_hz = 1000; 144 145 static int sysctl_dn_hz(SYSCTL_HANDLER_ARGS); 146 147 SYSCTL_DECL(_net_inet_ip_dummynet); 148 149 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLFLAG_RW, 150 &dn_hash_size, 0, "Default hash table size"); 151 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, curr_time, CTLFLAG_RD, 152 &curr_time, 0, "Current tick"); 153 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW, 154 &pipe_expire, 0, "Expire queue if empty"); 155 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, CTLFLAG_RW, 156 &dn_max_ratio, 0, "Max ratio between dynamic queues and buckets"); 157 158 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap, CTLFLAG_RD, 159 &ready_heap.size, 0, "Size of ready heap"); 160 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap, CTLFLAG_RD, 161 &extract_heap.size, 0, "Size of extract heap"); 162 163 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches, CTLFLAG_RD, 164 &searches, 0, "Number of queue searches"); 165 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, CTLFLAG_RD, 166 &search_steps, 0, "Number of queue search steps"); 167 168 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD, 169 &red_lookup_depth, 0, "Depth of RED lookup table"); 170 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD, 171 &red_avg_pkt_size, 0, "RED Medium packet size"); 172 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD, 173 &red_max_pkt_size, 0, "RED Max packet size"); 174 175 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hz, CTLTYPE_INT | CTLFLAG_RW, 176 0, 0, sysctl_dn_hz, "I", "Dummynet callout frequency"); 177 178 static int heap_init(struct dn_heap *, int); 179 static int heap_insert(struct dn_heap *, dn_key, void *); 180 static void heap_extract(struct dn_heap *, void *); 181 182 static void transmit_event(struct dn_pipe *); 183 static void ready_event(struct dn_flow_queue *); 184 static void ready_event_wfq(struct dn_pipe *); 185 186 static int config_pipe(struct dn_ioc_pipe *); 187 static void dummynet_flush(void); 188 189 static void dummynet_clock(systimer_t, int, struct intrframe *); 190 static void dummynet(netmsg_t); 191 192 static struct dn_pipe *dn_find_pipe(int); 193 static struct dn_flow_set *dn_locate_flowset(int, int); 194 195 typedef void (*dn_pipe_iter_t)(struct dn_pipe *, void *); 196 static void dn_iterate_pipe(dn_pipe_iter_t, void *); 197 198 typedef void (*dn_flowset_iter_t)(struct dn_flow_set *, void *); 199 static void dn_iterate_flowset(dn_flowset_iter_t, void *); 200 201 static ip_dn_io_t dummynet_io; 202 static ip_dn_ctl_t dummynet_ctl; 203 204 /* 205 * Heap management functions. 206 * 207 * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2. 208 * Some macros help finding parent/children so we can optimize them. 209 * 210 * heap_init() is called to expand the heap when needed. 211 * Increment size in blocks of 16 entries. 212 * XXX failure to allocate a new element is a pretty bad failure 213 * as we basically stall a whole queue forever!! 214 * Returns 1 on error, 0 on success 215 */ 216 #define HEAP_FATHER(x) (((x) - 1) / 2) 217 #define HEAP_LEFT(x) (2*(x) + 1) 218 #define HEAP_IS_LEFT(x) ((x) & 1) 219 #define HEAP_RIGHT(x) (2*(x) + 2) 220 #define HEAP_SWAP(a, b, buffer) { buffer = a; a = b; b = buffer; } 221 #define HEAP_INCREMENT 15 222 223 static int 224 heap_init(struct dn_heap *h, int new_size) 225 { 226 struct dn_heap_entry *p; 227 228 if (h->size >= new_size) { 229 kprintf("%s, Bogus call, have %d want %d\n", __func__, 230 h->size, new_size); 231 return 0; 232 } 233 234 new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT; 235 p = kmalloc(new_size * sizeof(*p), M_DUMMYNET, M_WAITOK | M_ZERO); 236 if (h->size > 0) { 237 bcopy(h->p, p, h->size * sizeof(*p)); 238 kfree(h->p, M_DUMMYNET); 239 } 240 h->p = p; 241 h->size = new_size; 242 return 0; 243 } 244 245 /* 246 * Insert element in heap. Normally, p != NULL, we insert p in 247 * a new position and bubble up. If p == NULL, then the element is 248 * already in place, and key is the position where to start the 249 * bubble-up. 250 * Returns 1 on failure (cannot allocate new heap entry) 251 * 252 * If offset > 0 the position (index, int) of the element in the heap is 253 * also stored in the element itself at the given offset in bytes. 254 */ 255 #define SET_OFFSET(heap, node) \ 256 if (heap->offset > 0) \ 257 *((int *)((char *)(heap->p[node].object) + heap->offset)) = node; 258 259 /* 260 * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value. 261 */ 262 #define RESET_OFFSET(heap, node) \ 263 if (heap->offset > 0) \ 264 *((int *)((char *)(heap->p[node].object) + heap->offset)) = -1; 265 266 static int 267 heap_insert(struct dn_heap *h, dn_key key1, void *p) 268 { 269 int son; 270 271 if (p == NULL) { /* Data already there, set starting point */ 272 son = key1; 273 } else { /* Insert new element at the end, possibly resize */ 274 son = h->elements; 275 if (son == h->size) { /* Need resize... */ 276 if (heap_init(h, h->elements + 1)) 277 return 1; /* Failure... */ 278 } 279 h->p[son].object = p; 280 h->p[son].key = key1; 281 h->elements++; 282 } 283 284 while (son > 0) { /* Bubble up */ 285 int father = HEAP_FATHER(son); 286 struct dn_heap_entry tmp; 287 288 if (DN_KEY_LT(h->p[father].key, h->p[son].key)) 289 break; /* Found right position */ 290 291 /* 'son' smaller than 'father', swap and repeat */ 292 HEAP_SWAP(h->p[son], h->p[father], tmp); 293 SET_OFFSET(h, son); 294 son = father; 295 } 296 SET_OFFSET(h, son); 297 return 0; 298 } 299 300 /* 301 * Remove top element from heap, or obj if obj != NULL 302 */ 303 static void 304 heap_extract(struct dn_heap *h, void *obj) 305 { 306 int child, father, max = h->elements - 1; 307 308 if (max < 0) { 309 kprintf("warning, extract from empty heap 0x%p\n", h); 310 return; 311 } 312 313 father = 0; /* Default: move up smallest child */ 314 if (obj != NULL) { /* Extract specific element, index is at offset */ 315 if (h->offset <= 0) 316 panic("%s from middle not supported on this heap!!!\n", __func__); 317 318 father = *((int *)((char *)obj + h->offset)); 319 if (father < 0 || father >= h->elements) { 320 panic("%s father %d out of bound 0..%d\n", __func__, 321 father, h->elements); 322 } 323 } 324 RESET_OFFSET(h, father); 325 326 child = HEAP_LEFT(father); /* Left child */ 327 while (child <= max) { /* Valid entry */ 328 if (child != max && DN_KEY_LT(h->p[child + 1].key, h->p[child].key)) 329 child = child + 1; /* Take right child, otherwise left */ 330 h->p[father] = h->p[child]; 331 SET_OFFSET(h, father); 332 father = child; 333 child = HEAP_LEFT(child); /* Left child for next loop */ 334 } 335 h->elements--; 336 if (father != max) { 337 /* 338 * Fill hole with last entry and bubble up, reusing the insert code 339 */ 340 h->p[father] = h->p[max]; 341 heap_insert(h, father, NULL); /* This one cannot fail */ 342 } 343 } 344 345 /* 346 * heapify() will reorganize data inside an array to maintain the 347 * heap property. It is needed when we delete a bunch of entries. 348 */ 349 static void 350 heapify(struct dn_heap *h) 351 { 352 int i; 353 354 for (i = 0; i < h->elements; i++) 355 heap_insert(h, i , NULL); 356 } 357 358 /* 359 * Cleanup the heap and free data structure 360 */ 361 static void 362 heap_free(struct dn_heap *h) 363 { 364 if (h->size > 0) 365 kfree(h->p, M_DUMMYNET); 366 bzero(h, sizeof(*h)); 367 } 368 369 /* 370 * --- End of heap management functions --- 371 */ 372 373 /* 374 * Scheduler functions: 375 * 376 * transmit_event() is called when the delay-line needs to enter 377 * the scheduler, either because of existing pkts getting ready, 378 * or new packets entering the queue. The event handled is the delivery 379 * time of the packet. 380 * 381 * ready_event() does something similar with fixed-rate queues, and the 382 * event handled is the finish time of the head pkt. 383 * 384 * ready_event_wfq() does something similar with WF2Q queues, and the 385 * event handled is the start time of the head pkt. 386 * 387 * In all cases, we make sure that the data structures are consistent 388 * before passing pkts out, because this might trigger recursive 389 * invocations of the procedures. 390 */ 391 static void 392 transmit_event(struct dn_pipe *pipe) 393 { 394 struct dn_pkt *pkt; 395 396 while ((pkt = TAILQ_FIRST(&pipe->p_queue)) && 397 DN_KEY_LEQ(pkt->output_time, curr_time)) { 398 TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next); 399 ip_dn_packet_redispatch(pkt); 400 } 401 402 /* 403 * If there are leftover packets, put into the heap for next event 404 */ 405 if ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) { 406 /* 407 * XXX should check errors on heap_insert, by draining the 408 * whole pipe and hoping in the future we are more successful 409 */ 410 heap_insert(&extract_heap, pkt->output_time, pipe); 411 } 412 } 413 414 /* 415 * The following macro computes how many ticks we have to wait 416 * before being able to transmit a packet. The credit is taken from 417 * either a pipe (WF2Q) or a flow_queue (per-flow queueing) 418 */ 419 #define SET_TICKS(pkt, q, p) \ 420 (pkt->dn_m->m_pkthdr.len*8*dn_hz - (q)->numbytes + p->bandwidth - 1 ) / \ 421 p->bandwidth; 422 423 /* 424 * Extract pkt from queue, compute output time (could be now) 425 * and put into delay line (p_queue) 426 */ 427 static void 428 move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q, 429 struct dn_pipe *p, int len) 430 { 431 TAILQ_REMOVE(&q->queue, pkt, dn_next); 432 q->len--; 433 q->len_bytes -= len; 434 435 pkt->output_time = curr_time + p->delay; 436 437 TAILQ_INSERT_TAIL(&p->p_queue, pkt, dn_next); 438 } 439 440 /* 441 * ready_event() is invoked every time the queue must enter the 442 * scheduler, either because the first packet arrives, or because 443 * a previously scheduled event fired. 444 * On invokation, drain as many pkts as possible (could be 0) and then 445 * if there are leftover packets reinsert the pkt in the scheduler. 446 */ 447 static void 448 ready_event(struct dn_flow_queue *q) 449 { 450 struct dn_pkt *pkt; 451 struct dn_pipe *p = q->fs->pipe; 452 int p_was_empty; 453 454 if (p == NULL) { 455 kprintf("ready_event- pipe is gone\n"); 456 return; 457 } 458 p_was_empty = TAILQ_EMPTY(&p->p_queue); 459 460 /* 461 * Schedule fixed-rate queues linked to this pipe: 462 * Account for the bw accumulated since last scheduling, then 463 * drain as many pkts as allowed by q->numbytes and move to 464 * the delay line (in p) computing output time. 465 * bandwidth==0 (no limit) means we can drain the whole queue, 466 * setting len_scaled = 0 does the job. 467 */ 468 q->numbytes += (curr_time - q->sched_time) * p->bandwidth; 469 while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) { 470 int len = pkt->dn_m->m_pkthdr.len; 471 int len_scaled = p->bandwidth ? len*8*dn_hz : 0; 472 473 if (len_scaled > q->numbytes) 474 break; 475 q->numbytes -= len_scaled; 476 move_pkt(pkt, q, p, len); 477 } 478 479 /* 480 * If we have more packets queued, schedule next ready event 481 * (can only occur when bandwidth != 0, otherwise we would have 482 * flushed the whole queue in the previous loop). 483 * To this purpose we record the current time and compute how many 484 * ticks to go for the finish time of the packet. 485 */ 486 if ((pkt = TAILQ_FIRST(&q->queue)) != NULL) { 487 /* This implies bandwidth != 0 */ 488 dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */ 489 490 q->sched_time = curr_time; 491 492 /* 493 * XXX should check errors on heap_insert, and drain the whole 494 * queue on error hoping next time we are luckier. 495 */ 496 heap_insert(&ready_heap, curr_time + t, q); 497 } else { /* RED needs to know when the queue becomes empty */ 498 q->q_time = curr_time; 499 q->numbytes = 0; 500 } 501 502 /* 503 * If the delay line was empty call transmit_event(p) now. 504 * Otherwise, the scheduler will take care of it. 505 */ 506 if (p_was_empty) 507 transmit_event(p); 508 } 509 510 /* 511 * Called when we can transmit packets on WF2Q queues. Take pkts out of 512 * the queues at their start time, and enqueue into the delay line. 513 * Packets are drained until p->numbytes < 0. As long as 514 * len_scaled >= p->numbytes, the packet goes into the delay line 515 * with a deadline p->delay. For the last packet, if p->numbytes < 0, 516 * there is an additional delay. 517 */ 518 static void 519 ready_event_wfq(struct dn_pipe *p) 520 { 521 int p_was_empty = TAILQ_EMPTY(&p->p_queue); 522 struct dn_heap *sch = &p->scheduler_heap; 523 struct dn_heap *neh = &p->not_eligible_heap; 524 525 p->numbytes += (curr_time - p->sched_time) * p->bandwidth; 526 527 /* 528 * While we have backlogged traffic AND credit, we need to do 529 * something on the queue. 530 */ 531 while (p->numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) { 532 if (sch->elements > 0) { /* Have some eligible pkts to send out */ 533 struct dn_flow_queue *q = sch->p[0].object; 534 struct dn_pkt *pkt = TAILQ_FIRST(&q->queue); 535 struct dn_flow_set *fs = q->fs; 536 uint64_t len = pkt->dn_m->m_pkthdr.len; 537 int len_scaled = p->bandwidth ? len*8*dn_hz : 0; 538 539 heap_extract(sch, NULL); /* Remove queue from heap */ 540 p->numbytes -= len_scaled; 541 move_pkt(pkt, q, p, len); 542 543 p->V += (len << MY_M) / p->sum; /* Update V */ 544 q->S = q->F; /* Update start time */ 545 546 if (q->len == 0) { /* Flow not backlogged any more */ 547 fs->backlogged--; 548 heap_insert(&p->idle_heap, q->F, q); 549 } else { /* Still backlogged */ 550 /* 551 * Update F and position in backlogged queue, then 552 * put flow in not_eligible_heap (we will fix this later). 553 */ 554 len = TAILQ_FIRST(&q->queue)->dn_m->m_pkthdr.len; 555 q->F += (len << MY_M) / (uint64_t)fs->weight; 556 if (DN_KEY_LEQ(q->S, p->V)) 557 heap_insert(neh, q->S, q); 558 else 559 heap_insert(sch, q->F, q); 560 } 561 } 562 563 /* 564 * Now compute V = max(V, min(S_i)). Remember that all elements in 565 * sch have by definition S_i <= V so if sch is not empty, V is surely 566 * the max and we must not update it. Conversely, if sch is empty 567 * we only need to look at neh. 568 */ 569 if (sch->elements == 0 && neh->elements > 0) 570 p->V = MAX64(p->V, neh->p[0].key); 571 572 /* 573 * Move from neh to sch any packets that have become eligible 574 */ 575 while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) { 576 struct dn_flow_queue *q = neh->p[0].object; 577 578 heap_extract(neh, NULL); 579 heap_insert(sch, q->F, q); 580 } 581 } 582 583 if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0 && 584 p->idle_heap.elements > 0) { 585 /* 586 * No traffic and no events scheduled. We can get rid of idle-heap. 587 */ 588 int i; 589 590 for (i = 0; i < p->idle_heap.elements; i++) { 591 struct dn_flow_queue *q = p->idle_heap.p[i].object; 592 593 q->F = 0; 594 q->S = q->F + 1; 595 } 596 p->sum = 0; 597 p->V = 0; 598 p->idle_heap.elements = 0; 599 } 600 601 /* 602 * If we are getting clocks from dummynet and if we are under credit, 603 * schedule the next ready event. 604 * Also fix the delivery time of the last packet. 605 */ 606 if (p->numbytes < 0) { /* This implies bandwidth>0 */ 607 dn_key t = 0; /* Number of ticks i have to wait */ 608 609 if (p->bandwidth > 0) 610 t = (p->bandwidth - 1 - p->numbytes) / p->bandwidth; 611 TAILQ_LAST(&p->p_queue, dn_pkt_queue)->output_time += t; 612 p->sched_time = curr_time; 613 614 /* 615 * XXX should check errors on heap_insert, and drain the whole 616 * queue on error hoping next time we are luckier. 617 */ 618 heap_insert(&wfq_ready_heap, curr_time + t, p); 619 } 620 621 /* 622 * If the delay line was empty call transmit_event(p) now. 623 * Otherwise, the scheduler will take care of it. 624 */ 625 if (p_was_empty) 626 transmit_event(p); 627 } 628 629 static void 630 dn_expire_pipe_cb(struct dn_pipe *pipe, void *dummy __unused) 631 { 632 if (pipe->idle_heap.elements > 0 && 633 DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) { 634 struct dn_flow_queue *q = pipe->idle_heap.p[0].object; 635 636 heap_extract(&pipe->idle_heap, NULL); 637 q->S = q->F + 1; /* Mark timestamp as invalid */ 638 pipe->sum -= q->fs->weight; 639 } 640 } 641 642 /* 643 * This is called once per tick, or dn_hz times per second. It is used to 644 * increment the current tick counter and schedule expired events. 645 */ 646 static void 647 dummynet(netmsg_t msg) 648 { 649 void *p; 650 struct dn_heap *h; 651 struct dn_heap *heaps[3]; 652 int i; 653 654 heaps[0] = &ready_heap; /* Fixed-rate queues */ 655 heaps[1] = &wfq_ready_heap; /* WF2Q queues */ 656 heaps[2] = &extract_heap; /* Delay line */ 657 658 /* Reply ASAP */ 659 crit_enter(); 660 lwkt_replymsg(&msg->lmsg, 0); 661 crit_exit(); 662 663 curr_time++; 664 for (i = 0; i < 3; i++) { 665 h = heaps[i]; 666 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) { 667 if (h->p[0].key > curr_time) { 668 kprintf("-- dummynet: warning, heap %d is %d ticks late\n", 669 i, (int)(curr_time - h->p[0].key)); 670 } 671 672 p = h->p[0].object; /* Store a copy before heap_extract */ 673 heap_extract(h, NULL); /* Need to extract before processing */ 674 675 if (i == 0) 676 ready_event(p); 677 else if (i == 1) 678 ready_event_wfq(p); 679 else 680 transmit_event(p); 681 } 682 } 683 684 /* Sweep pipes trying to expire idle flow_queues */ 685 dn_iterate_pipe(dn_expire_pipe_cb, NULL); 686 } 687 688 /* 689 * Unconditionally expire empty queues in case of shortage. 690 * Returns the number of queues freed. 691 */ 692 static int 693 expire_queues(struct dn_flow_set *fs) 694 { 695 int i, initial_elements = fs->rq_elements; 696 697 if (fs->last_expired == time_second) 698 return 0; 699 700 fs->last_expired = time_second; 701 702 for (i = 0; i <= fs->rq_size; i++) { /* Last one is overflow */ 703 struct dn_flow_queue *q, *qn; 704 705 LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) { 706 if (!TAILQ_EMPTY(&q->queue) || q->S != q->F + 1) 707 continue; 708 709 /* 710 * Entry is idle, expire it 711 */ 712 LIST_REMOVE(q, q_link); 713 kfree(q, M_DUMMYNET); 714 715 KASSERT(fs->rq_elements > 0, 716 ("invalid rq_elements %d\n", fs->rq_elements)); 717 fs->rq_elements--; 718 } 719 } 720 return initial_elements - fs->rq_elements; 721 } 722 723 /* 724 * If room, create a new queue and put at head of slot i; 725 * otherwise, create or use the default queue. 726 */ 727 static struct dn_flow_queue * 728 create_queue(struct dn_flow_set *fs, int i) 729 { 730 struct dn_flow_queue *q; 731 732 if (fs->rq_elements > fs->rq_size * dn_max_ratio && 733 expire_queues(fs) == 0) { 734 /* 735 * No way to get room, use or create overflow queue. 736 */ 737 i = fs->rq_size; 738 if (!LIST_EMPTY(&fs->rq[i])) 739 return LIST_FIRST(&fs->rq[i]); 740 } 741 742 q = kmalloc(sizeof(*q), M_DUMMYNET, M_INTWAIT | M_NULLOK | M_ZERO); 743 if (q == NULL) 744 return NULL; 745 746 q->fs = fs; 747 q->hash_slot = i; 748 q->S = q->F + 1; /* hack - mark timestamp as invalid */ 749 TAILQ_INIT(&q->queue); 750 751 LIST_INSERT_HEAD(&fs->rq[i], q, q_link); 752 fs->rq_elements++; 753 754 return q; 755 } 756 757 /* 758 * Given a flow_set and a pkt in last_pkt, find a matching queue 759 * after appropriate masking. The queue is moved to front 760 * so that further searches take less time. 761 */ 762 static struct dn_flow_queue * 763 find_queue(struct dn_flow_set *fs, struct dn_flow_id *id) 764 { 765 struct dn_flow_queue *q; 766 int i = 0; 767 768 if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) { 769 q = LIST_FIRST(&fs->rq[0]); 770 } else { 771 struct dn_flow_queue *qn; 772 773 /* First, do the masking */ 774 id->fid_dst_ip &= fs->flow_mask.fid_dst_ip; 775 id->fid_src_ip &= fs->flow_mask.fid_src_ip; 776 id->fid_dst_port &= fs->flow_mask.fid_dst_port; 777 id->fid_src_port &= fs->flow_mask.fid_src_port; 778 id->fid_proto &= fs->flow_mask.fid_proto; 779 id->fid_flags = 0; /* we don't care about this one */ 780 781 /* Then, hash function */ 782 i = ((id->fid_dst_ip) & 0xffff) ^ 783 ((id->fid_dst_ip >> 15) & 0xffff) ^ 784 ((id->fid_src_ip << 1) & 0xffff) ^ 785 ((id->fid_src_ip >> 16 ) & 0xffff) ^ 786 (id->fid_dst_port << 1) ^ (id->fid_src_port) ^ 787 (id->fid_proto); 788 i = i % fs->rq_size; 789 790 /* 791 * Finally, scan the current list for a match and 792 * expire idle flow queues 793 */ 794 searches++; 795 LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) { 796 search_steps++; 797 if (id->fid_dst_ip == q->id.fid_dst_ip && 798 id->fid_src_ip == q->id.fid_src_ip && 799 id->fid_dst_port == q->id.fid_dst_port && 800 id->fid_src_port == q->id.fid_src_port && 801 id->fid_proto == q->id.fid_proto && 802 id->fid_flags == q->id.fid_flags) { 803 break; /* Found */ 804 } else if (pipe_expire && TAILQ_EMPTY(&q->queue) && 805 q->S == q->F + 1) { 806 /* 807 * Entry is idle and not in any heap, expire it 808 */ 809 LIST_REMOVE(q, q_link); 810 kfree(q, M_DUMMYNET); 811 812 KASSERT(fs->rq_elements > 0, 813 ("invalid rq_elements %d\n", fs->rq_elements)); 814 fs->rq_elements--; 815 } 816 } 817 if (q && LIST_FIRST(&fs->rq[i]) != q) { /* Found and not in front */ 818 LIST_REMOVE(q, q_link); 819 LIST_INSERT_HEAD(&fs->rq[i], q, q_link); 820 } 821 } 822 if (q == NULL) { /* No match, need to allocate a new entry */ 823 q = create_queue(fs, i); 824 if (q != NULL) 825 q->id = *id; 826 } 827 return q; 828 } 829 830 static int 831 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len) 832 { 833 /* 834 * RED algorithm 835 * 836 * RED calculates the average queue size (avg) using a low-pass filter 837 * with an exponential weighted (w_q) moving average: 838 * avg <- (1-w_q) * avg + w_q * q_size 839 * where q_size is the queue length (measured in bytes or * packets). 840 * 841 * If q_size == 0, we compute the idle time for the link, and set 842 * avg = (1 - w_q)^(idle/s) 843 * where s is the time needed for transmitting a medium-sized packet. 844 * 845 * Now, if avg < min_th the packet is enqueued. 846 * If avg > max_th the packet is dropped. Otherwise, the packet is 847 * dropped with probability P function of avg. 848 */ 849 850 int64_t p_b = 0; 851 u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len; 852 853 DPRINTF("\n%d q: %2u ", (int)curr_time, q_size); 854 855 /* Average queue size estimation */ 856 if (q_size != 0) { 857 /* 858 * Queue is not empty, avg <- avg + (q_size - avg) * w_q 859 */ 860 int diff = SCALE(q_size) - q->avg; 861 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q); 862 863 q->avg += (int)v; 864 } else { 865 /* 866 * Queue is empty, find for how long the queue has been 867 * empty and use a lookup table for computing 868 * (1 - * w_q)^(idle_time/s) where s is the time to send a 869 * (small) packet. 870 * XXX check wraps... 871 */ 872 if (q->avg) { 873 u_int t = (curr_time - q->q_time) / fs->lookup_step; 874 875 q->avg = (t < fs->lookup_depth) ? 876 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; 877 } 878 } 879 DPRINTF("avg: %u ", SCALE_VAL(q->avg)); 880 881 /* Should i drop? */ 882 883 if (q->avg < fs->min_th) { 884 /* Accept packet */ 885 q->count = -1; 886 return 0; 887 } 888 889 if (q->avg >= fs->max_th) { /* Average queue >= Max threshold */ 890 if (fs->flags_fs & DN_IS_GENTLE_RED) { 891 /* 892 * According to Gentle-RED, if avg is greater than max_th the 893 * packet is dropped with a probability 894 * p_b = c_3 * avg - c_4 895 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p 896 */ 897 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4; 898 } else { 899 q->count = -1; 900 kprintf("- drop\n"); 901 return 1; 902 } 903 } else if (q->avg > fs->min_th) { 904 /* 905 * We compute p_b using the linear dropping function p_b = c_1 * 906 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 = 907 * max_p * min_th / (max_th - min_th) 908 */ 909 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2; 910 } 911 if (fs->flags_fs & DN_QSIZE_IS_BYTES) 912 p_b = (p_b * len) / fs->max_pkt_size; 913 914 if (++q->count == 0) { 915 q->random = krandom() & 0xffff; 916 } else { 917 /* 918 * q->count counts packets arrived since last drop, so a greater 919 * value of q->count means a greater packet drop probability. 920 */ 921 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) { 922 q->count = 0; 923 DPRINTF("%s", "- red drop"); 924 /* After a drop we calculate a new random value */ 925 q->random = krandom() & 0xffff; 926 return 1; /* Drop */ 927 } 928 } 929 /* End of RED algorithm */ 930 return 0; /* Accept */ 931 } 932 933 static void 934 dn_iterate_pipe(dn_pipe_iter_t func, void *arg) 935 { 936 int i; 937 938 for (i = 0; i < DN_NR_HASH_MAX; ++i) { 939 struct dn_pipe_head *pipe_hdr = &pipe_table[i]; 940 struct dn_pipe *pipe, *pipe_next; 941 942 LIST_FOREACH_MUTABLE(pipe, pipe_hdr, p_link, pipe_next) 943 func(pipe, arg); 944 } 945 } 946 947 static void 948 dn_iterate_flowset(dn_flowset_iter_t func, void *arg) 949 { 950 int i; 951 952 for (i = 0; i < DN_NR_HASH_MAX; ++i) { 953 struct dn_flowset_head *fs_hdr = &flowset_table[i]; 954 struct dn_flow_set *fs, *fs_next; 955 956 LIST_FOREACH_MUTABLE(fs, fs_hdr, fs_link, fs_next) 957 func(fs, arg); 958 } 959 } 960 961 static struct dn_pipe * 962 dn_find_pipe(int pipe_nr) 963 { 964 struct dn_pipe_head *pipe_hdr; 965 struct dn_pipe *p; 966 967 pipe_hdr = &pipe_table[DN_NR_HASH(pipe_nr)]; 968 LIST_FOREACH(p, pipe_hdr, p_link) { 969 if (p->pipe_nr == pipe_nr) 970 break; 971 } 972 return p; 973 } 974 975 static struct dn_flow_set * 976 dn_find_flowset(int fs_nr) 977 { 978 struct dn_flowset_head *fs_hdr; 979 struct dn_flow_set *fs; 980 981 fs_hdr = &flowset_table[DN_NR_HASH(fs_nr)]; 982 LIST_FOREACH(fs, fs_hdr, fs_link) { 983 if (fs->fs_nr == fs_nr) 984 break; 985 } 986 return fs; 987 } 988 989 static struct dn_flow_set * 990 dn_locate_flowset(int pipe_nr, int is_pipe) 991 { 992 struct dn_flow_set *fs = NULL; 993 994 if (!is_pipe) { 995 fs = dn_find_flowset(pipe_nr); 996 } else { 997 struct dn_pipe *p; 998 999 p = dn_find_pipe(pipe_nr); 1000 if (p != NULL) 1001 fs = &p->fs; 1002 } 1003 return fs; 1004 } 1005 1006 /* 1007 * Dummynet hook for packets. Below 'pipe' is a pipe or a queue 1008 * depending on whether WF2Q or fixed bw is used. 1009 * 1010 * pipe_nr pipe or queue the packet is destined for. 1011 * dir where shall we send the packet after dummynet. 1012 * m the mbuf with the packet 1013 * fwa->oif the 'ifp' parameter from the caller. 1014 * NULL in ip_input, destination interface in ip_output 1015 * fwa->ro route parameter (only used in ip_output, NULL otherwise) 1016 * fwa->dst destination address, only used by ip_output 1017 * fwa->rule matching rule, in case of multiple passes 1018 * fwa->flags flags from the caller, only used in ip_output 1019 */ 1020 static int 1021 dummynet_io(struct mbuf *m) 1022 { 1023 struct dn_pkt *pkt; 1024 struct m_tag *tag; 1025 struct dn_flow_set *fs; 1026 struct dn_pipe *pipe; 1027 uint64_t len = m->m_pkthdr.len; 1028 struct dn_flow_queue *q = NULL; 1029 int is_pipe, pipe_nr; 1030 1031 tag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL); 1032 pkt = m_tag_data(tag); 1033 1034 is_pipe = pkt->dn_flags & DN_FLAGS_IS_PIPE; 1035 pipe_nr = pkt->pipe_nr; 1036 1037 /* 1038 * This is a dummynet rule, so we expect a O_PIPE or O_QUEUE rule 1039 */ 1040 fs = dn_locate_flowset(pipe_nr, is_pipe); 1041 if (fs == NULL) 1042 goto dropit; /* This queue/pipe does not exist! */ 1043 1044 pipe = fs->pipe; 1045 if (pipe == NULL) { /* Must be a queue, try find a matching pipe */ 1046 pipe = dn_find_pipe(fs->parent_nr); 1047 if (pipe != NULL) { 1048 fs->pipe = pipe; 1049 } else { 1050 kprintf("No pipe %d for queue %d, drop pkt\n", 1051 fs->parent_nr, fs->fs_nr); 1052 goto dropit; 1053 } 1054 } 1055 1056 q = find_queue(fs, &pkt->id); 1057 if (q == NULL) 1058 goto dropit; /* Cannot allocate queue */ 1059 1060 /* 1061 * Update statistics, then check reasons to drop pkt 1062 */ 1063 q->tot_bytes += len; 1064 q->tot_pkts++; 1065 1066 if (fs->plr && krandom() < fs->plr) 1067 goto dropit; /* Random pkt drop */ 1068 1069 if (fs->flags_fs & DN_QSIZE_IS_BYTES) { 1070 if (q->len_bytes > fs->qsize) 1071 goto dropit; /* Queue size overflow */ 1072 } else { 1073 if (q->len >= fs->qsize) 1074 goto dropit; /* Queue count overflow */ 1075 } 1076 1077 if ((fs->flags_fs & DN_IS_RED) && red_drops(fs, q, len)) 1078 goto dropit; 1079 1080 TAILQ_INSERT_TAIL(&q->queue, pkt, dn_next); 1081 q->len++; 1082 q->len_bytes += len; 1083 1084 if (TAILQ_FIRST(&q->queue) != pkt) /* Flow was not idle, we are done */ 1085 goto done; 1086 1087 /* 1088 * If we reach this point the flow was previously idle, so we need 1089 * to schedule it. This involves different actions for fixed-rate 1090 * or WF2Q queues. 1091 */ 1092 if (is_pipe) { 1093 /* 1094 * Fixed-rate queue: just insert into the ready_heap. 1095 */ 1096 dn_key t = 0; 1097 1098 if (pipe->bandwidth) 1099 t = SET_TICKS(pkt, q, pipe); 1100 1101 q->sched_time = curr_time; 1102 if (t == 0) /* Must process it now */ 1103 ready_event(q); 1104 else 1105 heap_insert(&ready_heap, curr_time + t, q); 1106 } else { 1107 /* 1108 * WF2Q: 1109 * First, compute start time S: if the flow was idle (S=F+1) 1110 * set S to the virtual time V for the controlling pipe, and update 1111 * the sum of weights for the pipe; otherwise, remove flow from 1112 * idle_heap and set S to max(F, V). 1113 * Second, compute finish time F = S + len/weight. 1114 * Third, if pipe was idle, update V = max(S, V). 1115 * Fourth, count one more backlogged flow. 1116 */ 1117 if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid */ 1118 q->S = pipe->V; 1119 pipe->sum += fs->weight; /* Add weight of new queue */ 1120 } else { 1121 heap_extract(&pipe->idle_heap, q); 1122 q->S = MAX64(q->F, pipe->V); 1123 } 1124 q->F = q->S + (len << MY_M) / (uint64_t)fs->weight; 1125 1126 if (pipe->not_eligible_heap.elements == 0 && 1127 pipe->scheduler_heap.elements == 0) 1128 pipe->V = MAX64(q->S, pipe->V); 1129 1130 fs->backlogged++; 1131 1132 /* 1133 * Look at eligibility. A flow is not eligibile if S>V (when 1134 * this happens, it means that there is some other flow already 1135 * scheduled for the same pipe, so the scheduler_heap cannot be 1136 * empty). If the flow is not eligible we just store it in the 1137 * not_eligible_heap. Otherwise, we store in the scheduler_heap 1138 * and possibly invoke ready_event_wfq() right now if there is 1139 * leftover credit. 1140 * Note that for all flows in scheduler_heap (SCH), S_i <= V, 1141 * and for all flows in not_eligible_heap (NEH), S_i > V. 1142 * So when we need to compute max(V, min(S_i)) forall i in SCH+NEH, 1143 * we only need to look into NEH. 1144 */ 1145 if (DN_KEY_GT(q->S, pipe->V)) { /* Not eligible */ 1146 if (pipe->scheduler_heap.elements == 0) 1147 kprintf("++ ouch! not eligible but empty scheduler!\n"); 1148 heap_insert(&pipe->not_eligible_heap, q->S, q); 1149 } else { 1150 heap_insert(&pipe->scheduler_heap, q->F, q); 1151 if (pipe->numbytes >= 0) { /* Pipe is idle */ 1152 if (pipe->scheduler_heap.elements != 1) 1153 kprintf("*** OUCH! pipe should have been idle!\n"); 1154 DPRINTF("Waking up pipe %d at %d\n", 1155 pipe->pipe_nr, (int)(q->F >> MY_M)); 1156 pipe->sched_time = curr_time; 1157 ready_event_wfq(pipe); 1158 } 1159 } 1160 } 1161 done: 1162 return 0; 1163 1164 dropit: 1165 if (q) 1166 q->drops++; 1167 return ENOBUFS; 1168 } 1169 1170 /* 1171 * Dispose all packets and flow_queues on a flow_set. 1172 * If all=1, also remove red lookup table and other storage, 1173 * including the descriptor itself. 1174 * For the one in dn_pipe MUST also cleanup ready_heap... 1175 */ 1176 static void 1177 purge_flow_set(struct dn_flow_set *fs, int all) 1178 { 1179 int i; 1180 #ifdef INVARIANTS 1181 int rq_elements = 0; 1182 #endif 1183 1184 for (i = 0; i <= fs->rq_size; i++) { 1185 struct dn_flow_queue *q; 1186 1187 while ((q = LIST_FIRST(&fs->rq[i])) != NULL) { 1188 struct dn_pkt *pkt; 1189 1190 while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) { 1191 TAILQ_REMOVE(&q->queue, pkt, dn_next); 1192 ip_dn_packet_free(pkt); 1193 } 1194 1195 LIST_REMOVE(q, q_link); 1196 kfree(q, M_DUMMYNET); 1197 1198 #ifdef INVARIANTS 1199 rq_elements++; 1200 #endif 1201 } 1202 } 1203 KASSERT(rq_elements == fs->rq_elements, 1204 ("# rq elements mismatch, freed %d, total %d\n", 1205 rq_elements, fs->rq_elements)); 1206 fs->rq_elements = 0; 1207 1208 if (all) { 1209 /* RED - free lookup table */ 1210 if (fs->w_q_lookup) 1211 kfree(fs->w_q_lookup, M_DUMMYNET); 1212 1213 if (fs->rq) 1214 kfree(fs->rq, M_DUMMYNET); 1215 1216 /* 1217 * If this fs is not part of a pipe, free it 1218 * 1219 * fs->pipe == NULL could happen, if 'fs' is a WF2Q and 1220 * - No packet belongs to that flow set is delivered by 1221 * dummynet_io(), i.e. parent pipe is not installed yet. 1222 * - Parent pipe is deleted. 1223 */ 1224 if (fs->pipe == NULL || (fs->pipe && fs != &fs->pipe->fs)) 1225 kfree(fs, M_DUMMYNET); 1226 } 1227 } 1228 1229 /* 1230 * Dispose all packets queued on a pipe (not a flow_set). 1231 * Also free all resources associated to a pipe, which is about 1232 * to be deleted. 1233 */ 1234 static void 1235 purge_pipe(struct dn_pipe *pipe) 1236 { 1237 struct dn_pkt *pkt; 1238 1239 purge_flow_set(&pipe->fs, 1); 1240 1241 while ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) { 1242 TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next); 1243 ip_dn_packet_free(pkt); 1244 } 1245 1246 heap_free(&pipe->scheduler_heap); 1247 heap_free(&pipe->not_eligible_heap); 1248 heap_free(&pipe->idle_heap); 1249 } 1250 1251 /* 1252 * Delete all pipes and heaps returning memory. 1253 */ 1254 static void 1255 dummynet_flush(void) 1256 { 1257 struct dn_pipe_head pipe_list; 1258 struct dn_flowset_head fs_list; 1259 struct dn_pipe *p; 1260 struct dn_flow_set *fs; 1261 int i; 1262 1263 /* 1264 * Prevent future matches... 1265 */ 1266 LIST_INIT(&pipe_list); 1267 for (i = 0; i < DN_NR_HASH_MAX; ++i) { 1268 struct dn_pipe_head *pipe_hdr = &pipe_table[i]; 1269 1270 while ((p = LIST_FIRST(pipe_hdr)) != NULL) { 1271 LIST_REMOVE(p, p_link); 1272 LIST_INSERT_HEAD(&pipe_list, p, p_link); 1273 } 1274 } 1275 1276 LIST_INIT(&fs_list); 1277 for (i = 0; i < DN_NR_HASH_MAX; ++i) { 1278 struct dn_flowset_head *fs_hdr = &flowset_table[i]; 1279 1280 while ((fs = LIST_FIRST(fs_hdr)) != NULL) { 1281 LIST_REMOVE(fs, fs_link); 1282 LIST_INSERT_HEAD(&fs_list, fs, fs_link); 1283 } 1284 } 1285 1286 /* Free heaps so we don't have unwanted events */ 1287 heap_free(&ready_heap); 1288 heap_free(&wfq_ready_heap); 1289 heap_free(&extract_heap); 1290 1291 /* 1292 * Now purge all queued pkts and delete all pipes 1293 */ 1294 /* Scan and purge all flow_sets. */ 1295 while ((fs = LIST_FIRST(&fs_list)) != NULL) { 1296 LIST_REMOVE(fs, fs_link); 1297 purge_flow_set(fs, 1); 1298 } 1299 1300 while ((p = LIST_FIRST(&pipe_list)) != NULL) { 1301 LIST_REMOVE(p, p_link); 1302 purge_pipe(p); 1303 kfree(p, M_DUMMYNET); 1304 } 1305 } 1306 1307 /* 1308 * setup RED parameters 1309 */ 1310 static int 1311 config_red(const struct dn_ioc_flowset *ioc_fs, struct dn_flow_set *x) 1312 { 1313 int i; 1314 1315 x->w_q = ioc_fs->w_q; 1316 x->min_th = SCALE(ioc_fs->min_th); 1317 x->max_th = SCALE(ioc_fs->max_th); 1318 x->max_p = ioc_fs->max_p; 1319 1320 x->c_1 = ioc_fs->max_p / (ioc_fs->max_th - ioc_fs->min_th); 1321 x->c_2 = SCALE_MUL(x->c_1, SCALE(ioc_fs->min_th)); 1322 if (x->flags_fs & DN_IS_GENTLE_RED) { 1323 x->c_3 = (SCALE(1) - ioc_fs->max_p) / ioc_fs->max_th; 1324 x->c_4 = (SCALE(1) - 2 * ioc_fs->max_p); 1325 } 1326 1327 /* If the lookup table already exist, free and create it again */ 1328 if (x->w_q_lookup) { 1329 kfree(x->w_q_lookup, M_DUMMYNET); 1330 x->w_q_lookup = NULL ; 1331 } 1332 1333 if (red_lookup_depth == 0) { 1334 kprintf("net.inet.ip.dummynet.red_lookup_depth must be > 0\n"); 1335 kfree(x, M_DUMMYNET); 1336 return EINVAL; 1337 } 1338 x->lookup_depth = red_lookup_depth; 1339 x->w_q_lookup = kmalloc(x->lookup_depth * sizeof(int), 1340 M_DUMMYNET, M_WAITOK); 1341 1342 /* Fill the lookup table with (1 - w_q)^x */ 1343 x->lookup_step = ioc_fs->lookup_step; 1344 x->lookup_weight = ioc_fs->lookup_weight; 1345 1346 x->w_q_lookup[0] = SCALE(1) - x->w_q; 1347 for (i = 1; i < x->lookup_depth; i++) 1348 x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight); 1349 1350 if (red_avg_pkt_size < 1) 1351 red_avg_pkt_size = 512; 1352 x->avg_pkt_size = red_avg_pkt_size; 1353 1354 if (red_max_pkt_size < 1) 1355 red_max_pkt_size = 1500; 1356 x->max_pkt_size = red_max_pkt_size; 1357 1358 return 0; 1359 } 1360 1361 static void 1362 alloc_hash(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs) 1363 { 1364 int i, alloc_size; 1365 1366 if (x->flags_fs & DN_HAVE_FLOW_MASK) { 1367 int l = ioc_fs->rq_size; 1368 1369 /* Allocate some slots */ 1370 if (l == 0) 1371 l = dn_hash_size; 1372 1373 if (l < DN_MIN_HASH_SIZE) 1374 l = DN_MIN_HASH_SIZE; 1375 else if (l > DN_MAX_HASH_SIZE) 1376 l = DN_MAX_HASH_SIZE; 1377 1378 x->rq_size = l; 1379 } else { 1380 /* One is enough for null mask */ 1381 x->rq_size = 1; 1382 } 1383 alloc_size = x->rq_size + 1; 1384 1385 x->rq = kmalloc(alloc_size * sizeof(struct dn_flowqueue_head), 1386 M_DUMMYNET, M_WAITOK | M_ZERO); 1387 x->rq_elements = 0; 1388 1389 for (i = 0; i < alloc_size; ++i) 1390 LIST_INIT(&x->rq[i]); 1391 } 1392 1393 static void 1394 set_flowid_parms(struct dn_flow_id *id, const struct dn_ioc_flowid *ioc_id) 1395 { 1396 id->fid_dst_ip = ioc_id->u.ip.dst_ip; 1397 id->fid_src_ip = ioc_id->u.ip.src_ip; 1398 id->fid_dst_port = ioc_id->u.ip.dst_port; 1399 id->fid_src_port = ioc_id->u.ip.src_port; 1400 id->fid_proto = ioc_id->u.ip.proto; 1401 id->fid_flags = ioc_id->u.ip.flags; 1402 } 1403 1404 static void 1405 set_fs_parms(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs) 1406 { 1407 x->flags_fs = ioc_fs->flags_fs; 1408 x->qsize = ioc_fs->qsize; 1409 x->plr = ioc_fs->plr; 1410 set_flowid_parms(&x->flow_mask, &ioc_fs->flow_mask); 1411 if (x->flags_fs & DN_QSIZE_IS_BYTES) { 1412 if (x->qsize > 1024 * 1024) 1413 x->qsize = 1024 * 1024; 1414 } else { 1415 if (x->qsize == 0 || x->qsize > 100) 1416 x->qsize = 50; 1417 } 1418 1419 /* Configuring RED */ 1420 if (x->flags_fs & DN_IS_RED) 1421 config_red(ioc_fs, x); /* XXX should check errors */ 1422 } 1423 1424 /* 1425 * setup pipe or queue parameters. 1426 */ 1427 1428 static int 1429 config_pipe(struct dn_ioc_pipe *ioc_pipe) 1430 { 1431 struct dn_ioc_flowset *ioc_fs = &ioc_pipe->fs; 1432 int error; 1433 1434 /* 1435 * The config program passes parameters as follows: 1436 * bw bits/second (0 means no limits) 1437 * delay ms (must be translated into ticks) 1438 * qsize slots or bytes 1439 */ 1440 ioc_pipe->delay = (ioc_pipe->delay * dn_hz) / 1000; 1441 1442 /* 1443 * We need either a pipe number or a flow_set number 1444 */ 1445 if (ioc_pipe->pipe_nr == 0 && ioc_fs->fs_nr == 0) 1446 return EINVAL; 1447 if (ioc_pipe->pipe_nr != 0 && ioc_fs->fs_nr != 0) 1448 return EINVAL; 1449 1450 /* 1451 * Validate pipe number 1452 */ 1453 if (ioc_pipe->pipe_nr > DN_PIPE_NR_MAX || ioc_pipe->pipe_nr < 0) 1454 return EINVAL; 1455 1456 error = EINVAL; 1457 if (ioc_pipe->pipe_nr != 0) { /* This is a pipe */ 1458 struct dn_pipe *x, *p; 1459 1460 /* Locate pipe */ 1461 p = dn_find_pipe(ioc_pipe->pipe_nr); 1462 1463 if (p == NULL) { /* New pipe */ 1464 x = kmalloc(sizeof(struct dn_pipe), M_DUMMYNET, M_WAITOK | M_ZERO); 1465 x->pipe_nr = ioc_pipe->pipe_nr; 1466 x->fs.pipe = x; 1467 TAILQ_INIT(&x->p_queue); 1468 1469 /* 1470 * idle_heap is the only one from which we extract from the middle. 1471 */ 1472 x->idle_heap.size = x->idle_heap.elements = 0; 1473 x->idle_heap.offset = __offsetof(struct dn_flow_queue, heap_pos); 1474 } else { 1475 int i; 1476 1477 x = p; 1478 1479 /* Flush accumulated credit for all queues */ 1480 for (i = 0; i <= x->fs.rq_size; i++) { 1481 struct dn_flow_queue *q; 1482 1483 LIST_FOREACH(q, &x->fs.rq[i], q_link) 1484 q->numbytes = 0; 1485 } 1486 } 1487 1488 x->bandwidth = ioc_pipe->bandwidth; 1489 x->numbytes = 0; /* Just in case... */ 1490 x->delay = ioc_pipe->delay; 1491 1492 set_fs_parms(&x->fs, ioc_fs); 1493 1494 if (x->fs.rq == NULL) { /* A new pipe */ 1495 struct dn_pipe_head *pipe_hdr; 1496 1497 alloc_hash(&x->fs, ioc_fs); 1498 1499 pipe_hdr = &pipe_table[DN_NR_HASH(x->pipe_nr)]; 1500 LIST_INSERT_HEAD(pipe_hdr, x, p_link); 1501 } 1502 } else { /* Config flow_set */ 1503 struct dn_flow_set *x, *fs; 1504 1505 /* Locate flow_set */ 1506 fs = dn_find_flowset(ioc_fs->fs_nr); 1507 1508 if (fs == NULL) { /* New flow_set */ 1509 if (ioc_fs->parent_nr == 0) /* Need link to a pipe */ 1510 goto back; 1511 1512 x = kmalloc(sizeof(struct dn_flow_set), M_DUMMYNET, 1513 M_WAITOK | M_ZERO); 1514 x->fs_nr = ioc_fs->fs_nr; 1515 x->parent_nr = ioc_fs->parent_nr; 1516 x->weight = ioc_fs->weight; 1517 if (x->weight == 0) 1518 x->weight = 1; 1519 else if (x->weight > 100) 1520 x->weight = 100; 1521 } else { 1522 /* Change parent pipe not allowed; must delete and recreate */ 1523 if (ioc_fs->parent_nr != 0 && fs->parent_nr != ioc_fs->parent_nr) 1524 goto back; 1525 x = fs; 1526 } 1527 1528 set_fs_parms(x, ioc_fs); 1529 1530 if (x->rq == NULL) { /* A new flow_set */ 1531 struct dn_flowset_head *fs_hdr; 1532 1533 alloc_hash(x, ioc_fs); 1534 1535 fs_hdr = &flowset_table[DN_NR_HASH(x->fs_nr)]; 1536 LIST_INSERT_HEAD(fs_hdr, x, fs_link); 1537 } 1538 } 1539 error = 0; 1540 1541 back: 1542 return error; 1543 } 1544 1545 /* 1546 * Helper function to remove from a heap queues which are linked to 1547 * a flow_set about to be deleted. 1548 */ 1549 static void 1550 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs) 1551 { 1552 int i = 0, found = 0; 1553 1554 while (i < h->elements) { 1555 if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) { 1556 h->elements--; 1557 h->p[i] = h->p[h->elements]; 1558 found++; 1559 } else { 1560 i++; 1561 } 1562 } 1563 if (found) 1564 heapify(h); 1565 } 1566 1567 /* 1568 * helper function to remove a pipe from a heap (can be there at most once) 1569 */ 1570 static void 1571 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p) 1572 { 1573 if (h->elements > 0) { 1574 int i; 1575 1576 for (i = 0; i < h->elements; i++) { 1577 if (h->p[i].object == p) { /* found it */ 1578 h->elements--; 1579 h->p[i] = h->p[h->elements]; 1580 heapify(h); 1581 break; 1582 } 1583 } 1584 } 1585 } 1586 1587 static void 1588 dn_unref_pipe_cb(struct dn_flow_set *fs, void *pipe0) 1589 { 1590 struct dn_pipe *pipe = pipe0; 1591 1592 if (fs->pipe == pipe) { 1593 kprintf("++ ref to pipe %d from fs %d\n", 1594 pipe->pipe_nr, fs->fs_nr); 1595 fs->pipe = NULL; 1596 purge_flow_set(fs, 0); 1597 } 1598 } 1599 1600 /* 1601 * Fully delete a pipe or a queue, cleaning up associated info. 1602 */ 1603 static int 1604 delete_pipe(const struct dn_ioc_pipe *ioc_pipe) 1605 { 1606 struct dn_pipe *p; 1607 int error; 1608 1609 if (ioc_pipe->pipe_nr == 0 && ioc_pipe->fs.fs_nr == 0) 1610 return EINVAL; 1611 if (ioc_pipe->pipe_nr != 0 && ioc_pipe->fs.fs_nr != 0) 1612 return EINVAL; 1613 1614 if (ioc_pipe->pipe_nr > DN_NR_HASH_MAX || ioc_pipe->pipe_nr < 0) 1615 return EINVAL; 1616 1617 error = EINVAL; 1618 if (ioc_pipe->pipe_nr != 0) { /* This is an old-style pipe */ 1619 /* Locate pipe */ 1620 p = dn_find_pipe(ioc_pipe->pipe_nr); 1621 if (p == NULL) 1622 goto back; /* Not found */ 1623 1624 /* Unlink from pipe hash table */ 1625 LIST_REMOVE(p, p_link); 1626 1627 /* Remove all references to this pipe from flow_sets */ 1628 dn_iterate_flowset(dn_unref_pipe_cb, p); 1629 1630 fs_remove_from_heap(&ready_heap, &p->fs); 1631 purge_pipe(p); /* Remove all data associated to this pipe */ 1632 1633 /* Remove reference to here from extract_heap and wfq_ready_heap */ 1634 pipe_remove_from_heap(&extract_heap, p); 1635 pipe_remove_from_heap(&wfq_ready_heap, p); 1636 1637 kfree(p, M_DUMMYNET); 1638 } else { /* This is a WF2Q queue (dn_flow_set) */ 1639 struct dn_flow_set *fs; 1640 1641 /* Locate flow_set */ 1642 fs = dn_find_flowset(ioc_pipe->fs.fs_nr); 1643 if (fs == NULL) 1644 goto back; /* Not found */ 1645 1646 LIST_REMOVE(fs, fs_link); 1647 1648 if ((p = fs->pipe) != NULL) { 1649 /* Update total weight on parent pipe and cleanup parent heaps */ 1650 p->sum -= fs->weight * fs->backlogged; 1651 fs_remove_from_heap(&p->not_eligible_heap, fs); 1652 fs_remove_from_heap(&p->scheduler_heap, fs); 1653 #if 1 /* XXX should i remove from idle_heap as well ? */ 1654 fs_remove_from_heap(&p->idle_heap, fs); 1655 #endif 1656 } 1657 purge_flow_set(fs, 1); 1658 } 1659 error = 0; 1660 1661 back: 1662 return error; 1663 } 1664 1665 /* 1666 * helper function used to copy data from kernel in DUMMYNET_GET 1667 */ 1668 static void 1669 dn_copy_flowid(const struct dn_flow_id *id, struct dn_ioc_flowid *ioc_id) 1670 { 1671 ioc_id->type = ETHERTYPE_IP; 1672 ioc_id->u.ip.dst_ip = id->fid_dst_ip; 1673 ioc_id->u.ip.src_ip = id->fid_src_ip; 1674 ioc_id->u.ip.dst_port = id->fid_dst_port; 1675 ioc_id->u.ip.src_port = id->fid_src_port; 1676 ioc_id->u.ip.proto = id->fid_proto; 1677 ioc_id->u.ip.flags = id->fid_flags; 1678 } 1679 1680 static void * 1681 dn_copy_flowqueues(const struct dn_flow_set *fs, void *bp) 1682 { 1683 struct dn_ioc_flowqueue *ioc_fq = bp; 1684 int i, copied = 0; 1685 1686 for (i = 0; i <= fs->rq_size; i++) { 1687 const struct dn_flow_queue *q; 1688 1689 LIST_FOREACH(q, &fs->rq[i], q_link) { 1690 if (q->hash_slot != i) { /* XXX ASSERT */ 1691 kprintf("++ at %d: wrong slot (have %d, " 1692 "should be %d)\n", copied, q->hash_slot, i); 1693 } 1694 if (q->fs != fs) { /* XXX ASSERT */ 1695 kprintf("++ at %d: wrong fs ptr (have %p, should be %p)\n", 1696 i, q->fs, fs); 1697 } 1698 1699 copied++; 1700 1701 ioc_fq->len = q->len; 1702 ioc_fq->len_bytes = q->len_bytes; 1703 ioc_fq->tot_pkts = q->tot_pkts; 1704 ioc_fq->tot_bytes = q->tot_bytes; 1705 ioc_fq->drops = q->drops; 1706 ioc_fq->hash_slot = q->hash_slot; 1707 ioc_fq->S = q->S; 1708 ioc_fq->F = q->F; 1709 dn_copy_flowid(&q->id, &ioc_fq->id); 1710 1711 ioc_fq++; 1712 } 1713 } 1714 1715 if (copied != fs->rq_elements) { /* XXX ASSERT */ 1716 kprintf("++ wrong count, have %d should be %d\n", 1717 copied, fs->rq_elements); 1718 } 1719 return ioc_fq; 1720 } 1721 1722 static void 1723 dn_copy_flowset(const struct dn_flow_set *fs, struct dn_ioc_flowset *ioc_fs, 1724 u_short fs_type) 1725 { 1726 ioc_fs->fs_type = fs_type; 1727 1728 ioc_fs->fs_nr = fs->fs_nr; 1729 ioc_fs->flags_fs = fs->flags_fs; 1730 ioc_fs->parent_nr = fs->parent_nr; 1731 1732 ioc_fs->weight = fs->weight; 1733 ioc_fs->qsize = fs->qsize; 1734 ioc_fs->plr = fs->plr; 1735 1736 ioc_fs->rq_size = fs->rq_size; 1737 ioc_fs->rq_elements = fs->rq_elements; 1738 1739 ioc_fs->w_q = fs->w_q; 1740 ioc_fs->max_th = fs->max_th; 1741 ioc_fs->min_th = fs->min_th; 1742 ioc_fs->max_p = fs->max_p; 1743 1744 dn_copy_flowid(&fs->flow_mask, &ioc_fs->flow_mask); 1745 } 1746 1747 static void 1748 dn_calc_pipe_size_cb(struct dn_pipe *pipe, void *sz) 1749 { 1750 size_t *size = sz; 1751 1752 *size += sizeof(struct dn_ioc_pipe) + 1753 pipe->fs.rq_elements * sizeof(struct dn_ioc_flowqueue); 1754 } 1755 1756 static void 1757 dn_calc_fs_size_cb(struct dn_flow_set *fs, void *sz) 1758 { 1759 size_t *size = sz; 1760 1761 *size += sizeof(struct dn_ioc_flowset) + 1762 fs->rq_elements * sizeof(struct dn_ioc_flowqueue); 1763 } 1764 1765 static void 1766 dn_copyout_pipe_cb(struct dn_pipe *pipe, void *bp0) 1767 { 1768 char **bp = bp0; 1769 struct dn_ioc_pipe *ioc_pipe = (struct dn_ioc_pipe *)(*bp); 1770 1771 /* 1772 * Copy flow set descriptor associated with this pipe 1773 */ 1774 dn_copy_flowset(&pipe->fs, &ioc_pipe->fs, DN_IS_PIPE); 1775 1776 /* 1777 * Copy pipe descriptor 1778 */ 1779 ioc_pipe->bandwidth = pipe->bandwidth; 1780 ioc_pipe->pipe_nr = pipe->pipe_nr; 1781 ioc_pipe->V = pipe->V; 1782 /* Convert delay to milliseconds */ 1783 ioc_pipe->delay = (pipe->delay * 1000) / dn_hz; 1784 1785 /* 1786 * Copy flow queue descriptors 1787 */ 1788 *bp += sizeof(*ioc_pipe); 1789 *bp = dn_copy_flowqueues(&pipe->fs, *bp); 1790 } 1791 1792 static void 1793 dn_copyout_fs_cb(struct dn_flow_set *fs, void *bp0) 1794 { 1795 char **bp = bp0; 1796 struct dn_ioc_flowset *ioc_fs = (struct dn_ioc_flowset *)(*bp); 1797 1798 /* 1799 * Copy flow set descriptor 1800 */ 1801 dn_copy_flowset(fs, ioc_fs, DN_IS_QUEUE); 1802 1803 /* 1804 * Copy flow queue descriptors 1805 */ 1806 *bp += sizeof(*ioc_fs); 1807 *bp = dn_copy_flowqueues(fs, *bp); 1808 } 1809 1810 static int 1811 dummynet_get(struct dn_sopt *dn_sopt) 1812 { 1813 char *buf, *bp; 1814 size_t size = 0; 1815 1816 /* 1817 * Compute size of data structures: list of pipes and flow_sets. 1818 */ 1819 dn_iterate_pipe(dn_calc_pipe_size_cb, &size); 1820 dn_iterate_flowset(dn_calc_fs_size_cb, &size); 1821 1822 /* 1823 * Copyout pipe/flow_set/flow_queue 1824 */ 1825 bp = buf = kmalloc(size, M_TEMP, M_WAITOK | M_ZERO); 1826 dn_iterate_pipe(dn_copyout_pipe_cb, &bp); 1827 dn_iterate_flowset(dn_copyout_fs_cb, &bp); 1828 1829 /* Temp memory will be freed by caller */ 1830 dn_sopt->dn_sopt_arg = buf; 1831 dn_sopt->dn_sopt_arglen = size; 1832 return 0; 1833 } 1834 1835 /* 1836 * Handler for the various dummynet socket options (get, flush, config, del) 1837 */ 1838 static int 1839 dummynet_ctl(struct dn_sopt *dn_sopt) 1840 { 1841 int error = 0; 1842 1843 switch (dn_sopt->dn_sopt_name) { 1844 case IP_DUMMYNET_GET: 1845 error = dummynet_get(dn_sopt); 1846 break; 1847 1848 case IP_DUMMYNET_FLUSH: 1849 dummynet_flush(); 1850 break; 1851 1852 case IP_DUMMYNET_CONFIGURE: 1853 KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe)); 1854 error = config_pipe(dn_sopt->dn_sopt_arg); 1855 break; 1856 1857 case IP_DUMMYNET_DEL: /* Remove a pipe or flow_set */ 1858 KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe)); 1859 error = delete_pipe(dn_sopt->dn_sopt_arg); 1860 break; 1861 1862 default: 1863 kprintf("%s -- unknown option %d\n", __func__, dn_sopt->dn_sopt_name); 1864 error = EINVAL; 1865 break; 1866 } 1867 return error; 1868 } 1869 1870 static void 1871 dummynet_clock(systimer_t info __unused, int in_ipi __unused, 1872 struct intrframe *frame __unused) 1873 { 1874 KASSERT(mycpuid == ip_dn_cpu, 1875 ("dummynet systimer comes on cpu%d, should be %d!\n", 1876 mycpuid, ip_dn_cpu)); 1877 1878 crit_enter(); 1879 if (DUMMYNET_LOADED && (dn_netmsg.lmsg.ms_flags & MSGF_DONE)) 1880 lwkt_sendmsg(cpu_portfn(mycpuid), &dn_netmsg.lmsg); 1881 crit_exit(); 1882 } 1883 1884 static int 1885 sysctl_dn_hz(SYSCTL_HANDLER_ARGS) 1886 { 1887 int error, val; 1888 1889 val = dn_hz; 1890 error = sysctl_handle_int(oidp, &val, 0, req); 1891 if (error || req->newptr == NULL) 1892 return error; 1893 if (val <= 0) 1894 return EINVAL; 1895 else if (val > DN_CALLOUT_FREQ_MAX) 1896 val = DN_CALLOUT_FREQ_MAX; 1897 1898 crit_enter(); 1899 dn_hz = val; 1900 systimer_adjust_periodic(&dn_clock, val); 1901 crit_exit(); 1902 1903 return 0; 1904 } 1905 1906 static void 1907 ip_dn_init_dispatch(netmsg_t msg) 1908 { 1909 int i, error = 0; 1910 1911 KASSERT(mycpuid == ip_dn_cpu, 1912 ("%s runs on cpu%d, instead of cpu%d", __func__, 1913 mycpuid, ip_dn_cpu)); 1914 1915 crit_enter(); 1916 1917 if (DUMMYNET_LOADED) { 1918 kprintf("DUMMYNET already loaded\n"); 1919 error = EEXIST; 1920 goto back; 1921 } 1922 1923 kprintf("DUMMYNET initialized (011031)\n"); 1924 1925 for (i = 0; i < DN_NR_HASH_MAX; ++i) 1926 LIST_INIT(&pipe_table[i]); 1927 1928 for (i = 0; i < DN_NR_HASH_MAX; ++i) 1929 LIST_INIT(&flowset_table[i]); 1930 1931 ready_heap.size = ready_heap.elements = 0; 1932 ready_heap.offset = 0; 1933 1934 wfq_ready_heap.size = wfq_ready_heap.elements = 0; 1935 wfq_ready_heap.offset = 0; 1936 1937 extract_heap.size = extract_heap.elements = 0; 1938 extract_heap.offset = 0; 1939 1940 ip_dn_ctl_ptr = dummynet_ctl; 1941 ip_dn_io_ptr = dummynet_io; 1942 1943 netmsg_init(&dn_netmsg, NULL, &netisr_adone_rport, 1944 0, dummynet); 1945 systimer_init_periodic_nq(&dn_clock, dummynet_clock, NULL, dn_hz); 1946 1947 back: 1948 crit_exit(); 1949 lwkt_replymsg(&msg->lmsg, error); 1950 } 1951 1952 static int 1953 ip_dn_init(void) 1954 { 1955 struct netmsg_base smsg; 1956 1957 if (ip_dn_cpu >= ncpus) { 1958 kprintf("%s: CPU%d does not exist, switch to CPU0\n", 1959 __func__, ip_dn_cpu); 1960 ip_dn_cpu = 0; 1961 } 1962 1963 netmsg_init(&smsg, NULL, &curthread->td_msgport, 1964 0, ip_dn_init_dispatch); 1965 lwkt_domsg(cpu_portfn(ip_dn_cpu), &smsg.lmsg, 0); 1966 return smsg.lmsg.ms_error; 1967 } 1968 1969 #ifdef KLD_MODULE 1970 1971 static void 1972 ip_dn_stop_dispatch(netmsg_t msg) 1973 { 1974 crit_enter(); 1975 1976 dummynet_flush(); 1977 1978 ip_dn_ctl_ptr = NULL; 1979 ip_dn_io_ptr = NULL; 1980 1981 systimer_del(&dn_clock); 1982 1983 crit_exit(); 1984 lwkt_replymsg(&msg->lmsg, 0); 1985 } 1986 1987 1988 static void 1989 ip_dn_stop(void) 1990 { 1991 struct netmsg_base smsg; 1992 1993 netmsg_init(&smsg, NULL, &curthread->td_msgport, 1994 0, ip_dn_stop_dispatch); 1995 lwkt_domsg(cpu_portfn(ip_dn_cpu), &smsg.lmsg, 0); 1996 1997 netmsg_service_sync(); 1998 } 1999 2000 #endif /* KLD_MODULE */ 2001 2002 static int 2003 dummynet_modevent(module_t mod, int type, void *data) 2004 { 2005 switch (type) { 2006 case MOD_LOAD: 2007 return ip_dn_init(); 2008 2009 case MOD_UNLOAD: 2010 #ifndef KLD_MODULE 2011 kprintf("dummynet statically compiled, cannot unload\n"); 2012 return EINVAL; 2013 #else 2014 ip_dn_stop(); 2015 #endif 2016 break; 2017 2018 default: 2019 break; 2020 } 2021 return 0; 2022 } 2023 2024 static moduledata_t dummynet_mod = { 2025 "dummynet", 2026 dummynet_modevent, 2027 NULL 2028 }; 2029 DECLARE_MODULE(dummynet, dummynet_mod, SI_SUB_PROTO_END, SI_ORDER_ANY); 2030 MODULE_VERSION(dummynet, 1); 2031