xref: /dragonfly/sys/net/dummynet/ip_dummynet.c (revision b960c7cf)
1 /*
2  * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3  * Portions Copyright (c) 2000 Akamba Corp.
4  * All rights reserved
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.24.2.22 2003/05/13 09:31:06 maxim Exp $
28  */
29 
30 #include "opt_ipdn.h"
31 
32 /*
33  * This module implements IP dummynet, a bandwidth limiter/delay emulator.
34  * Description of the data structures used is in ip_dummynet.h
35  * Here you mainly find the following blocks of code:
36  *  + variable declarations;
37  *  + heap management functions;
38  *  + scheduler and dummynet functions;
39  *  + configuration and initialization.
40  *
41  * Most important Changes:
42  *
43  * 011004: KLDable
44  * 010124: Fixed WF2Q behaviour
45  * 010122: Fixed spl protection.
46  * 000601: WF2Q support
47  * 000106: Large rewrite, use heaps to handle very many pipes.
48  * 980513: Initial release
49  */
50 
51 #include <sys/param.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/mbuf.h>
55 #include <sys/socketvar.h>
56 #include <sys/sysctl.h>
57 #include <sys/systimer.h>
58 #include <sys/thread2.h>
59 
60 #include <net/ethernet.h>
61 #include <net/netmsg2.h>
62 #include <net/netisr2.h>
63 #include <net/route.h>
64 
65 #include <net/if.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip_var.h>
68 
69 #include <net/dummynet/ip_dummynet.h>
70 
71 #ifdef DUMMYNET_DEBUG
72 #define DPRINTF(fmt, ...)	kprintf(fmt, __VA_ARGS__)
73 #else
74 #define DPRINTF(fmt, ...)	((void)0)
75 #endif
76 
77 #ifndef DN_CALLOUT_FREQ_MAX
78 #define DN_CALLOUT_FREQ_MAX	10000
79 #endif
80 
81 /*
82  * The maximum/minimum hash table size for queues.
83  * These values must be a power of 2.
84  */
85 #define DN_MIN_HASH_SIZE	4
86 #define DN_MAX_HASH_SIZE	65536
87 
88 /*
89  * Some macros are used to compare key values and handle wraparounds.
90  * MAX64 returns the largest of two key values.
91  */
92 #define DN_KEY_LT(a, b)		((int64_t)((a) - (b)) < 0)
93 #define DN_KEY_LEQ(a, b)	((int64_t)((a) - (b)) <= 0)
94 #define DN_KEY_GT(a, b)		((int64_t)((a) - (b)) > 0)
95 #define DN_KEY_GEQ(a, b)	((int64_t)((a) - (b)) >= 0)
96 #define MAX64(x, y)		((((int64_t)((y) - (x))) > 0) ? (y) : (x))
97 
98 #define DN_NR_HASH_MAX		16
99 #define DN_NR_HASH_MASK		(DN_NR_HASH_MAX - 1)
100 #define DN_NR_HASH(nr)		\
101 	((((nr) >> 12) ^ ((nr) >> 8) ^ ((nr) >> 4) ^ (nr)) & DN_NR_HASH_MASK)
102 
103 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
104 
105 extern int	ip_dn_cpu;
106 
107 static dn_key	curr_time = 0;		/* current simulation time */
108 static int	dn_hash_size = 64;	/* default hash size */
109 static int	pipe_expire = 1;	/* expire queue if empty */
110 static int	dn_max_ratio = 16;	/* max queues/buckets ratio */
111 
112 /*
113  * Statistics on number of queue searches and search steps
114  */
115 static int	searches;
116 static int	search_steps;
117 
118 /*
119  * RED parameters
120  */
121 static int	red_lookup_depth = 256;	/* default lookup table depth */
122 static int	red_avg_pkt_size = 512;	/* default medium packet size */
123 static int	red_max_pkt_size = 1500;/* default max packet size */
124 
125 /*
126  * Three heaps contain queues and pipes that the scheduler handles:
127  *
128  *  + ready_heap	contains all dn_flow_queue related to fixed-rate pipes.
129  *  + wfq_ready_heap	contains the pipes associated with WF2Q flows
130  *  + extract_heap	contains pipes associated with delay lines.
131  */
132 static struct dn_heap	ready_heap;
133 static struct dn_heap	extract_heap;
134 static struct dn_heap	wfq_ready_heap;
135 
136 static struct dn_pipe_head	pipe_table[DN_NR_HASH_MAX];
137 static struct dn_flowset_head	flowset_table[DN_NR_HASH_MAX];
138 
139 /*
140  * Variables for dummynet systimer
141  */
142 static struct netmsg_base dn_netmsg;
143 static struct systimer	dn_clock;
144 #ifdef _KERNEL_VIRTUAL
145 static int		dn_hz = 100;
146 #else
147 static int		dn_hz = 1000;
148 #endif
149 static int		dn_count;
150 static int		dn_running;
151 static struct lock	dn_lock = LOCK_INITIALIZER("dnlk", 0, 0);
152 
153 static int	sysctl_dn_hz(SYSCTL_HANDLER_ARGS);
154 
155 SYSCTL_DECL(_net_inet_ip_dummynet);
156 
157 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLFLAG_RW,
158 	   &dn_hash_size, 0, "Default hash table size");
159 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, curr_time, CTLFLAG_RD,
160 	   &curr_time, 0, "Current tick");
161 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW,
162 	   &pipe_expire, 0, "Expire queue if empty");
163 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, CTLFLAG_RW,
164 	   &dn_max_ratio, 0, "Max ratio between dynamic queues and buckets");
165 
166 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap, CTLFLAG_RD,
167 	   &ready_heap.size, 0, "Size of ready heap");
168 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap, CTLFLAG_RD,
169 	   &extract_heap.size, 0, "Size of extract heap");
170 
171 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches, CTLFLAG_RD,
172 	   &searches, 0, "Number of queue searches");
173 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, CTLFLAG_RD,
174 	   &search_steps, 0, "Number of queue search steps");
175 
176 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD,
177 	   &red_lookup_depth, 0, "Depth of RED lookup table");
178 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD,
179 	   &red_avg_pkt_size, 0, "RED Medium packet size");
180 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD,
181 	   &red_max_pkt_size, 0, "RED Max packet size");
182 
183 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hz, CTLTYPE_INT | CTLFLAG_RW,
184 	    0, 0, sysctl_dn_hz, "I", "Dummynet callout frequency");
185 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, running, CTLFLAG_RD,
186 	   &dn_running, 0, "Dummynet Active");
187 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, count, CTLFLAG_RD,
188 	   &dn_count, 0, "Dummynet pipe+flow count");
189 
190 static int	heap_init(struct dn_heap *, int);
191 static int	heap_insert(struct dn_heap *, dn_key, void *);
192 static void	heap_extract(struct dn_heap *, void *);
193 
194 static void	transmit_event(struct dn_pipe *);
195 static void	ready_event(struct dn_flow_queue *);
196 static void	ready_event_wfq(struct dn_pipe *);
197 
198 static int	config_pipe(struct dn_ioc_pipe *);
199 static void	dummynet_flush(void);
200 
201 static void	dummynet_clock(systimer_t, int, struct intrframe *);
202 static void	dummynet(netmsg_t);
203 
204 static struct dn_pipe *dn_find_pipe(int);
205 static struct dn_flow_set *dn_locate_flowset(int, int);
206 
207 typedef void	(*dn_pipe_iter_t)(struct dn_pipe *, void *);
208 static void	dn_iterate_pipe(dn_pipe_iter_t, void *);
209 
210 typedef void	(*dn_flowset_iter_t)(struct dn_flow_set *, void *);
211 static void	dn_iterate_flowset(dn_flowset_iter_t, void *);
212 
213 static ip_dn_io_t	dummynet_io;
214 static ip_dn_ctl_t	dummynet_ctl;
215 
216 /*
217  * Heap management functions.
218  *
219  * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
220  * Some macros help finding parent/children so we can optimize them.
221  *
222  * heap_init() is called to expand the heap when needed.
223  * Increment size in blocks of 16 entries.
224  * XXX failure to allocate a new element is a pretty bad failure
225  * as we basically stall a whole queue forever!!
226  * Returns 1 on error, 0 on success
227  */
228 #define HEAP_FATHER(x)		(((x) - 1) / 2)
229 #define HEAP_LEFT(x)		(2*(x) + 1)
230 #define HEAP_IS_LEFT(x)		((x) & 1)
231 #define HEAP_RIGHT(x)		(2*(x) + 2)
232 #define HEAP_SWAP(a, b, buffer)	{ buffer = a; a = b; b = buffer; }
233 #define HEAP_INCREMENT		15
234 
235 static int
236 heap_init(struct dn_heap *h, int new_size)
237 {
238     struct dn_heap_entry *p;
239 
240     if (h->size >= new_size) {
241 	kprintf("%s, Bogus call, have %d want %d\n", __func__,
242 		h->size, new_size);
243 	return 0;
244     }
245 
246     new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
247     p = kmalloc(new_size * sizeof(*p), M_DUMMYNET, M_WAITOK | M_ZERO);
248     if (h->size > 0) {
249 	bcopy(h->p, p, h->size * sizeof(*p));
250 	kfree(h->p, M_DUMMYNET);
251     }
252     h->p = p;
253     h->size = new_size;
254     return 0;
255 }
256 
257 /*
258  * Insert element in heap. Normally, p != NULL, we insert p in
259  * a new position and bubble up.  If p == NULL, then the element is
260  * already in place, and key is the position where to start the
261  * bubble-up.
262  * Returns 1 on failure (cannot allocate new heap entry)
263  *
264  * If offset > 0 the position (index, int) of the element in the heap is
265  * also stored in the element itself at the given offset in bytes.
266  */
267 #define SET_OFFSET(heap, node) \
268     if (heap->offset > 0) \
269 	*((int *)((char *)(heap->p[node].object) + heap->offset)) = node;
270 
271 /*
272  * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
273  */
274 #define RESET_OFFSET(heap, node) \
275     if (heap->offset > 0) \
276 	*((int *)((char *)(heap->p[node].object) + heap->offset)) = -1;
277 
278 static int
279 heap_insert(struct dn_heap *h, dn_key key1, void *p)
280 {
281     int son;
282 
283     if (p == NULL) {	/* Data already there, set starting point */
284 	son = key1;
285     } else {		/* Insert new element at the end, possibly resize */
286 	son = h->elements;
287 	if (son == h->size) { /* Need resize... */
288 	    if (heap_init(h, h->elements + 1))
289 		return 1; /* Failure... */
290 	}
291 	h->p[son].object = p;
292 	h->p[son].key = key1;
293 	h->elements++;
294     }
295 
296     while (son > 0) {	/* Bubble up */
297 	int father = HEAP_FATHER(son);
298 	struct dn_heap_entry tmp;
299 
300 	if (DN_KEY_LT(h->p[father].key, h->p[son].key))
301 	    break; /* Found right position */
302 
303 	/* 'son' smaller than 'father', swap and repeat */
304 	HEAP_SWAP(h->p[son], h->p[father], tmp);
305 	SET_OFFSET(h, son);
306 	son = father;
307     }
308     SET_OFFSET(h, son);
309     return 0;
310 }
311 
312 /*
313  * Remove top element from heap, or obj if obj != NULL
314  */
315 static void
316 heap_extract(struct dn_heap *h, void *obj)
317 {
318     int child, father, max = h->elements - 1;
319 
320     if (max < 0) {
321 	kprintf("warning, extract from empty heap 0x%p\n", h);
322 	return;
323     }
324 
325     father = 0; /* Default: move up smallest child */
326     if (obj != NULL) { /* Extract specific element, index is at offset */
327 	if (h->offset <= 0)
328 	    panic("%s from middle not supported on this heap!!!", __func__);
329 
330 	father = *((int *)((char *)obj + h->offset));
331 	if (father < 0 || father >= h->elements) {
332 	    panic("%s father %d out of bound 0..%d", __func__,
333 	    	  father, h->elements);
334 	}
335     }
336     RESET_OFFSET(h, father);
337 
338     child = HEAP_LEFT(father);		/* Left child */
339     while (child <= max) {		/* Valid entry */
340 	if (child != max && DN_KEY_LT(h->p[child + 1].key, h->p[child].key))
341 	    child = child + 1;		/* Take right child, otherwise left */
342 	h->p[father] = h->p[child];
343 	SET_OFFSET(h, father);
344 	father = child;
345 	child = HEAP_LEFT(child);	/* Left child for next loop */
346     }
347     h->elements--;
348     if (father != max) {
349 	/*
350 	 * Fill hole with last entry and bubble up, reusing the insert code
351 	 */
352 	h->p[father] = h->p[max];
353 	heap_insert(h, father, NULL);	/* This one cannot fail */
354     }
355 }
356 
357 /*
358  * heapify() will reorganize data inside an array to maintain the
359  * heap property.  It is needed when we delete a bunch of entries.
360  */
361 static void
362 heapify(struct dn_heap *h)
363 {
364     int i;
365 
366     for (i = 0; i < h->elements; i++)
367 	heap_insert(h, i , NULL);
368 }
369 
370 /*
371  * Cleanup the heap and free data structure
372  */
373 static void
374 heap_free(struct dn_heap *h)
375 {
376     if (h->size > 0)
377 	kfree(h->p, M_DUMMYNET);
378     bzero(h, sizeof(*h));
379 }
380 
381 /*
382  * --- End of heap management functions ---
383  */
384 
385 /*
386  * Scheduler functions:
387  *
388  * transmit_event() is called when the delay-line needs to enter
389  * the scheduler, either because of existing pkts getting ready,
390  * or new packets entering the queue.  The event handled is the delivery
391  * time of the packet.
392  *
393  * ready_event() does something similar with fixed-rate queues, and the
394  * event handled is the finish time of the head pkt.
395  *
396  * ready_event_wfq() does something similar with WF2Q queues, and the
397  * event handled is the start time of the head pkt.
398  *
399  * In all cases, we make sure that the data structures are consistent
400  * before passing pkts out, because this might trigger recursive
401  * invocations of the procedures.
402  */
403 static void
404 transmit_event(struct dn_pipe *pipe)
405 {
406     struct dn_pkt *pkt;
407 
408     while ((pkt = TAILQ_FIRST(&pipe->p_queue)) &&
409     	   DN_KEY_LEQ(pkt->output_time, curr_time)) {
410 	TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
411 	ip_dn_packet_redispatch(pkt);
412     }
413 
414     /*
415      * If there are leftover packets, put into the heap for next event
416      */
417     if ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
418 	/*
419 	 * XXX should check errors on heap_insert, by draining the
420 	 * whole pipe and hoping in the future we are more successful
421 	 */
422 	heap_insert(&extract_heap, pkt->output_time, pipe);
423     }
424 }
425 
426 /*
427  * The following macro computes how many ticks we have to wait
428  * before being able to transmit a packet. The credit is taken from
429  * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
430  */
431 #define SET_TICKS(pkt, q, p)	\
432     (pkt->dn_m->m_pkthdr.len*8*dn_hz - (q)->numbytes + p->bandwidth - 1 ) / \
433 	    p->bandwidth;
434 
435 /*
436  * Extract pkt from queue, compute output time (could be now)
437  * and put into delay line (p_queue)
438  */
439 static void
440 move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q,
441 	 struct dn_pipe *p, int len)
442 {
443     TAILQ_REMOVE(&q->queue, pkt, dn_next);
444     q->len--;
445     q->len_bytes -= len;
446 
447     pkt->output_time = curr_time + p->delay;
448 
449     TAILQ_INSERT_TAIL(&p->p_queue, pkt, dn_next);
450 }
451 
452 /*
453  * ready_event() is invoked every time the queue must enter the
454  * scheduler, either because the first packet arrives, or because
455  * a previously scheduled event fired.
456  * On invokation, drain as many pkts as possible (could be 0) and then
457  * if there are leftover packets reinsert the pkt in the scheduler.
458  */
459 static void
460 ready_event(struct dn_flow_queue *q)
461 {
462     struct dn_pkt *pkt;
463     struct dn_pipe *p = q->fs->pipe;
464     int p_was_empty;
465 
466     if (p == NULL) {
467 	kprintf("ready_event- pipe is gone\n");
468 	return;
469     }
470     p_was_empty = TAILQ_EMPTY(&p->p_queue);
471 
472     /*
473      * Schedule fixed-rate queues linked to this pipe:
474      * Account for the bw accumulated since last scheduling, then
475      * drain as many pkts as allowed by q->numbytes and move to
476      * the delay line (in p) computing output time.
477      * bandwidth==0 (no limit) means we can drain the whole queue,
478      * setting len_scaled = 0 does the job.
479      */
480     q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
481     while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
482 	int len = pkt->dn_m->m_pkthdr.len;
483 	int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
484 
485 	if (len_scaled > q->numbytes)
486 	    break;
487 	q->numbytes -= len_scaled;
488 	move_pkt(pkt, q, p, len);
489     }
490 
491     /*
492      * If we have more packets queued, schedule next ready event
493      * (can only occur when bandwidth != 0, otherwise we would have
494      * flushed the whole queue in the previous loop).
495      * To this purpose we record the current time and compute how many
496      * ticks to go for the finish time of the packet.
497      */
498     if ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
499     	/* This implies bandwidth != 0 */
500 	dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
501 
502 	q->sched_time = curr_time;
503 
504 	/*
505 	 * XXX should check errors on heap_insert, and drain the whole
506 	 * queue on error hoping next time we are luckier.
507 	 */
508 	heap_insert(&ready_heap, curr_time + t, q);
509     } else {	/* RED needs to know when the queue becomes empty */
510 	q->q_time = curr_time;
511 	q->numbytes = 0;
512     }
513 
514     /*
515      * If the delay line was empty call transmit_event(p) now.
516      * Otherwise, the scheduler will take care of it.
517      */
518     if (p_was_empty)
519 	transmit_event(p);
520 }
521 
522 /*
523  * Called when we can transmit packets on WF2Q queues.  Take pkts out of
524  * the queues at their start time, and enqueue into the delay line.
525  * Packets are drained until p->numbytes < 0.  As long as
526  * len_scaled >= p->numbytes, the packet goes into the delay line
527  * with a deadline p->delay.  For the last packet, if p->numbytes < 0,
528  * there is an additional delay.
529  */
530 static void
531 ready_event_wfq(struct dn_pipe *p)
532 {
533     int p_was_empty = TAILQ_EMPTY(&p->p_queue);
534     struct dn_heap *sch = &p->scheduler_heap;
535     struct dn_heap *neh = &p->not_eligible_heap;
536 
537     p->numbytes += (curr_time - p->sched_time) * p->bandwidth;
538 
539     /*
540      * While we have backlogged traffic AND credit, we need to do
541      * something on the queue.
542      */
543     while (p->numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
544 	if (sch->elements > 0) { /* Have some eligible pkts to send out */
545 	    struct dn_flow_queue *q = sch->p[0].object;
546 	    struct dn_pkt *pkt = TAILQ_FIRST(&q->queue);
547 	    struct dn_flow_set *fs = q->fs;
548 	    uint64_t len = pkt->dn_m->m_pkthdr.len;
549 	    int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
550 
551 	    heap_extract(sch, NULL);	/* Remove queue from heap */
552 	    p->numbytes -= len_scaled;
553 	    move_pkt(pkt, q, p, len);
554 
555 	    p->V += (len << MY_M) / p->sum;	/* Update V */
556 	    q->S = q->F;			/* Update start time */
557 
558 	    if (q->len == 0) {	/* Flow not backlogged any more */
559 		fs->backlogged--;
560 		heap_insert(&p->idle_heap, q->F, q);
561 	    } else {		/* Still backlogged */
562 		/*
563 		 * Update F and position in backlogged queue, then
564 		 * put flow in not_eligible_heap (we will fix this later).
565 		 */
566 		len = TAILQ_FIRST(&q->queue)->dn_m->m_pkthdr.len;
567 		q->F += (len << MY_M) / (uint64_t)fs->weight;
568 		if (DN_KEY_LEQ(q->S, p->V))
569 		    heap_insert(neh, q->S, q);
570 		else
571 		    heap_insert(sch, q->F, q);
572 	    }
573 	}
574 
575 	/*
576 	 * Now compute V = max(V, min(S_i)).  Remember that all elements in
577 	 * sch have by definition S_i <= V so if sch is not empty, V is surely
578 	 * the max and we must not update it.  Conversely, if sch is empty
579 	 * we only need to look at neh.
580 	 */
581 	if (sch->elements == 0 && neh->elements > 0)
582 	    p->V = MAX64(p->V, neh->p[0].key);
583 
584 	/*
585 	 * Move from neh to sch any packets that have become eligible
586 	 */
587 	while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
588 	    struct dn_flow_queue *q = neh->p[0].object;
589 
590 	    heap_extract(neh, NULL);
591 	    heap_insert(sch, q->F, q);
592 	}
593     }
594 
595     if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0 &&
596     	p->idle_heap.elements > 0) {
597 	/*
598 	 * No traffic and no events scheduled.  We can get rid of idle-heap.
599 	 */
600 	int i;
601 
602 	for (i = 0; i < p->idle_heap.elements; i++) {
603 	    struct dn_flow_queue *q = p->idle_heap.p[i].object;
604 
605 	    q->F = 0;
606 	    q->S = q->F + 1;
607 	}
608 	p->sum = 0;
609 	p->V = 0;
610 	p->idle_heap.elements = 0;
611     }
612 
613     /*
614      * If we are getting clocks from dummynet and if we are under credit,
615      * schedule the next ready event.
616      * Also fix the delivery time of the last packet.
617      */
618     if (p->numbytes < 0) { /* This implies bandwidth>0 */
619 	dn_key t = 0; /* Number of ticks i have to wait */
620 
621 	if (p->bandwidth > 0)
622 	    t = (p->bandwidth - 1 - p->numbytes) / p->bandwidth;
623 	TAILQ_LAST(&p->p_queue, dn_pkt_queue)->output_time += t;
624 	p->sched_time = curr_time;
625 
626 	/*
627 	 * XXX should check errors on heap_insert, and drain the whole
628 	 * queue on error hoping next time we are luckier.
629 	 */
630 	heap_insert(&wfq_ready_heap, curr_time + t, p);
631     }
632 
633     /*
634      * If the delay line was empty call transmit_event(p) now.
635      * Otherwise, the scheduler will take care of it.
636      */
637     if (p_was_empty)
638 	transmit_event(p);
639 }
640 
641 static void
642 dn_expire_pipe_cb(struct dn_pipe *pipe, void *dummy __unused)
643 {
644     if (pipe->idle_heap.elements > 0 &&
645 	DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
646 	struct dn_flow_queue *q = pipe->idle_heap.p[0].object;
647 
648 	heap_extract(&pipe->idle_heap, NULL);
649 	q->S = q->F + 1; /* Mark timestamp as invalid */
650 	pipe->sum -= q->fs->weight;
651     }
652 }
653 
654 /*
655  * This is called once per tick, or dn_hz times per second.  It is used to
656  * increment the current tick counter and schedule expired events.
657  */
658 static void
659 dummynet(netmsg_t msg)
660 {
661     void *p;
662     struct dn_heap *h;
663     struct dn_heap *heaps[3];
664     int i;
665 
666     heaps[0] = &ready_heap;		/* Fixed-rate queues */
667     heaps[1] = &wfq_ready_heap;		/* WF2Q queues */
668     heaps[2] = &extract_heap;		/* Delay line */
669 
670     /* Reply ASAP */
671     crit_enter();
672     lwkt_replymsg(&msg->lmsg, 0);
673     crit_exit();
674 
675     curr_time++;
676     for (i = 0; i < 3; i++) {
677 	h = heaps[i];
678 	while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
679 	    if (h->p[0].key > curr_time) {
680 		kprintf("-- dummynet: warning, heap %d is %d ticks late\n",
681 		    i, (int)(curr_time - h->p[0].key));
682 	    }
683 
684 	    p = h->p[0].object;		/* Store a copy before heap_extract */
685 	    heap_extract(h, NULL);	/* Need to extract before processing */
686 
687 	    if (i == 0)
688 		ready_event(p);
689 	    else if (i == 1)
690 		ready_event_wfq(p);
691 	    else
692 		transmit_event(p);
693 	}
694     }
695 
696     /* Sweep pipes trying to expire idle flow_queues */
697     dn_iterate_pipe(dn_expire_pipe_cb, NULL);
698 }
699 
700 /*
701  * Unconditionally expire empty queues in case of shortage.
702  * Returns the number of queues freed.
703  */
704 static int
705 expire_queues(struct dn_flow_set *fs)
706 {
707     int i, initial_elements = fs->rq_elements;
708 
709     if (fs->last_expired == time_uptime)
710 	return 0;
711 
712     fs->last_expired = time_uptime;
713 
714     for (i = 0; i <= fs->rq_size; i++) { /* Last one is overflow */
715 	struct dn_flow_queue *q, *qn;
716 
717 	LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
718 	    if (!TAILQ_EMPTY(&q->queue) || q->S != q->F + 1)
719 		continue;
720 
721  	    /*
722 	     * Entry is idle, expire it
723 	     */
724 	    LIST_REMOVE(q, q_link);
725 	    kfree(q, M_DUMMYNET);
726 
727 	    KASSERT(fs->rq_elements > 0,
728 		    ("invalid rq_elements %d", fs->rq_elements));
729 	    fs->rq_elements--;
730 	}
731     }
732     return initial_elements - fs->rq_elements;
733 }
734 
735 /*
736  * If room, create a new queue and put at head of slot i;
737  * otherwise, create or use the default queue.
738  */
739 static struct dn_flow_queue *
740 create_queue(struct dn_flow_set *fs, int i)
741 {
742     struct dn_flow_queue *q;
743 
744     if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
745 	expire_queues(fs) == 0) {
746 	/*
747 	 * No way to get room, use or create overflow queue.
748 	 */
749 	i = fs->rq_size;
750 	if (!LIST_EMPTY(&fs->rq[i]))
751 	    return LIST_FIRST(&fs->rq[i]);
752     }
753 
754     q = kmalloc(sizeof(*q), M_DUMMYNET, M_INTWAIT | M_NULLOK | M_ZERO);
755     if (q == NULL)
756 	return NULL;
757 
758     q->fs = fs;
759     q->hash_slot = i;
760     q->S = q->F + 1;   /* hack - mark timestamp as invalid */
761     TAILQ_INIT(&q->queue);
762 
763     LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
764     fs->rq_elements++;
765 
766     return q;
767 }
768 
769 /*
770  * Given a flow_set and a pkt in last_pkt, find a matching queue
771  * after appropriate masking. The queue is moved to front
772  * so that further searches take less time.
773  */
774 static struct dn_flow_queue *
775 find_queue(struct dn_flow_set *fs, struct dn_flow_id *id)
776 {
777     struct dn_flow_queue *q;
778     int i = 0;
779 
780     if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
781 	q = LIST_FIRST(&fs->rq[0]);
782     } else {
783 	struct dn_flow_queue *qn;
784 
785 	/* First, do the masking */
786 	id->fid_dst_ip &= fs->flow_mask.fid_dst_ip;
787 	id->fid_src_ip &= fs->flow_mask.fid_src_ip;
788 	id->fid_dst_port &= fs->flow_mask.fid_dst_port;
789 	id->fid_src_port &= fs->flow_mask.fid_src_port;
790 	id->fid_proto &= fs->flow_mask.fid_proto;
791 	id->fid_flags = 0; /* we don't care about this one */
792 
793 	/* Then, hash function */
794 	i = ((id->fid_dst_ip) & 0xffff) ^
795 	    ((id->fid_dst_ip >> 15) & 0xffff) ^
796 	    ((id->fid_src_ip << 1) & 0xffff) ^
797 	    ((id->fid_src_ip >> 16 ) & 0xffff) ^
798 	    (id->fid_dst_port << 1) ^ (id->fid_src_port) ^
799 	    (id->fid_proto);
800 	i = i % fs->rq_size;
801 
802 	/*
803 	 * Finally, scan the current list for a match and
804 	 * expire idle flow queues
805 	 */
806 	searches++;
807 	LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
808 	    search_steps++;
809 	    if (id->fid_dst_ip == q->id.fid_dst_ip &&
810 		id->fid_src_ip == q->id.fid_src_ip &&
811 		id->fid_dst_port == q->id.fid_dst_port &&
812 		id->fid_src_port == q->id.fid_src_port &&
813 		id->fid_proto == q->id.fid_proto &&
814 		id->fid_flags == q->id.fid_flags) {
815 		break; /* Found */
816 	    } else if (pipe_expire && TAILQ_EMPTY(&q->queue) &&
817 	    	       q->S == q->F + 1) {
818 		/*
819 		 * Entry is idle and not in any heap, expire it
820 		 */
821 		LIST_REMOVE(q, q_link);
822 		kfree(q, M_DUMMYNET);
823 
824 		KASSERT(fs->rq_elements > 0,
825 			("invalid rq_elements %d", fs->rq_elements));
826 		fs->rq_elements--;
827 	    }
828 	}
829 	if (q && LIST_FIRST(&fs->rq[i]) != q) { /* Found and not in front */
830 	    LIST_REMOVE(q, q_link);
831 	    LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
832 	}
833     }
834     if (q == NULL) {	/* No match, need to allocate a new entry */
835 	q = create_queue(fs, i);
836 	if (q != NULL)
837 	    q->id = *id;
838     }
839     return q;
840 }
841 
842 static int
843 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
844 {
845     /*
846      * RED algorithm
847      *
848      * RED calculates the average queue size (avg) using a low-pass filter
849      * with an exponential weighted (w_q) moving average:
850      * 	avg  <-  (1-w_q) * avg + w_q * q_size
851      * where q_size is the queue length (measured in bytes or * packets).
852      *
853      * If q_size == 0, we compute the idle time for the link, and set
854      *	avg = (1 - w_q)^(idle/s)
855      * where s is the time needed for transmitting a medium-sized packet.
856      *
857      * Now, if avg < min_th the packet is enqueued.
858      * If avg > max_th the packet is dropped. Otherwise, the packet is
859      * dropped with probability P function of avg.
860      */
861 
862     int64_t p_b = 0;
863     u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
864 
865     DPRINTF("\n%d q: %2u ", (int)curr_time, q_size);
866 
867     /* Average queue size estimation */
868     if (q_size != 0) {
869 	/*
870 	 * Queue is not empty, avg <- avg + (q_size - avg) * w_q
871 	 */
872 	int diff = SCALE(q_size) - q->avg;
873 	int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
874 
875 	q->avg += (int)v;
876     } else {
877 	/*
878 	 * Queue is empty, find for how long the queue has been
879 	 * empty and use a lookup table for computing
880 	 * (1 - * w_q)^(idle_time/s) where s is the time to send a
881 	 * (small) packet.
882 	 * XXX check wraps...
883 	 */
884 	if (q->avg) {
885 	    u_int t = (curr_time - q->q_time) / fs->lookup_step;
886 
887 	    q->avg = (t < fs->lookup_depth) ?
888 		     SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
889 	}
890     }
891     DPRINTF("avg: %u ", SCALE_VAL(q->avg));
892 
893     /* Should i drop? */
894 
895     if (q->avg < fs->min_th) {
896 	/* Accept packet */
897 	q->count = -1;
898 	return 0;
899     }
900 
901     if (q->avg >= fs->max_th) { /* Average queue >=  Max threshold */
902 	if (fs->flags_fs & DN_IS_GENTLE_RED) {
903 	    /*
904 	     * According to Gentle-RED, if avg is greater than max_th the
905 	     * packet is dropped with a probability
906 	     *	p_b = c_3 * avg - c_4
907 	     * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
908 	     */
909 	    p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4;
910 	} else {
911 	    q->count = -1;
912 	    kprintf("- drop\n");
913 	    return 1;
914 	}
915     } else if (q->avg > fs->min_th) {
916 	/*
917 	 * We compute p_b using the linear dropping function p_b = c_1 *
918 	 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
919 	 * max_p * min_th / (max_th - min_th)
920 	 */
921 	p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
922     }
923     if (fs->flags_fs & DN_QSIZE_IS_BYTES)
924 	p_b = (p_b * len) / fs->max_pkt_size;
925 
926     if (++q->count == 0) {
927 	q->random = krandom() & 0xffff;
928     } else {
929 	/*
930 	 * q->count counts packets arrived since last drop, so a greater
931 	 * value of q->count means a greater packet drop probability.
932 	 */
933 	if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
934 	    q->count = 0;
935 	    DPRINTF("%s", "- red drop");
936 	    /* After a drop we calculate a new random value */
937 	    q->random = krandom() & 0xffff;
938 	    return 1;    /* Drop */
939 	}
940     }
941     /* End of RED algorithm */
942     return 0; /* Accept */
943 }
944 
945 static void
946 dn_iterate_pipe(dn_pipe_iter_t func, void *arg)
947 {
948     int i;
949 
950     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
951 	struct dn_pipe_head *pipe_hdr = &pipe_table[i];
952 	struct dn_pipe *pipe, *pipe_next;
953 
954 	LIST_FOREACH_MUTABLE(pipe, pipe_hdr, p_link, pipe_next)
955 	    func(pipe, arg);
956     }
957 }
958 
959 static void
960 dn_iterate_flowset(dn_flowset_iter_t func, void *arg)
961 {
962     int i;
963 
964     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
965 	struct dn_flowset_head *fs_hdr = &flowset_table[i];
966 	struct dn_flow_set *fs, *fs_next;
967 
968 	LIST_FOREACH_MUTABLE(fs, fs_hdr, fs_link, fs_next)
969 	    func(fs, arg);
970     }
971 }
972 
973 static struct dn_pipe *
974 dn_find_pipe(int pipe_nr)
975 {
976     struct dn_pipe_head *pipe_hdr;
977     struct dn_pipe *p;
978 
979     pipe_hdr = &pipe_table[DN_NR_HASH(pipe_nr)];
980     LIST_FOREACH(p, pipe_hdr, p_link) {
981 	if (p->pipe_nr == pipe_nr)
982 	    break;
983     }
984     return p;
985 }
986 
987 static struct dn_flow_set *
988 dn_find_flowset(int fs_nr)
989 {
990     struct dn_flowset_head *fs_hdr;
991     struct dn_flow_set *fs;
992 
993     fs_hdr = &flowset_table[DN_NR_HASH(fs_nr)];
994     LIST_FOREACH(fs, fs_hdr, fs_link) {
995 	if (fs->fs_nr == fs_nr)
996 	    break;
997     }
998     return fs;
999 }
1000 
1001 static struct dn_flow_set *
1002 dn_locate_flowset(int pipe_nr, int is_pipe)
1003 {
1004     struct dn_flow_set *fs = NULL;
1005 
1006     if (!is_pipe) {
1007 	fs = dn_find_flowset(pipe_nr);
1008     } else {
1009 	struct dn_pipe *p;
1010 
1011 	p = dn_find_pipe(pipe_nr);
1012 	if (p != NULL)
1013 	    fs = &p->fs;
1014     }
1015     return fs;
1016 }
1017 
1018 /*
1019  * Dummynet hook for packets.  Below 'pipe' is a pipe or a queue
1020  * depending on whether WF2Q or fixed bw is used.
1021  *
1022  * pipe_nr	pipe or queue the packet is destined for.
1023  * dir		where shall we send the packet after dummynet.
1024  * m		the mbuf with the packet
1025  * fwa->oif	the 'ifp' parameter from the caller.
1026  *		NULL in ip_input, destination interface in ip_output
1027  * fwa->ro	route parameter (only used in ip_output, NULL otherwise)
1028  * fwa->dst	destination address, only used by ip_output
1029  * fwa->rule	matching rule, in case of multiple passes
1030  * fwa->flags	flags from the caller, only used in ip_output
1031  */
1032 static int
1033 dummynet_io(struct mbuf *m)
1034 {
1035     struct dn_pkt *pkt;
1036     struct m_tag *tag;
1037     struct dn_flow_set *fs;
1038     struct dn_pipe *pipe;
1039     uint64_t len = m->m_pkthdr.len;
1040     struct dn_flow_queue *q = NULL;
1041     int is_pipe, pipe_nr;
1042 
1043     tag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1044     pkt = m_tag_data(tag);
1045 
1046     is_pipe = pkt->dn_flags & DN_FLAGS_IS_PIPE;
1047     pipe_nr = pkt->pipe_nr;
1048 
1049     /*
1050      * This is a dummynet rule, so we expect a O_PIPE or O_QUEUE rule
1051      */
1052     fs = dn_locate_flowset(pipe_nr, is_pipe);
1053     if (fs == NULL)
1054 	goto dropit;	/* This queue/pipe does not exist! */
1055 
1056     pipe = fs->pipe;
1057     if (pipe == NULL) { /* Must be a queue, try find a matching pipe */
1058 	pipe = dn_find_pipe(fs->parent_nr);
1059 	if (pipe != NULL) {
1060 	    fs->pipe = pipe;
1061 	} else {
1062 	    kprintf("No pipe %d for queue %d, drop pkt\n",
1063 	    	    fs->parent_nr, fs->fs_nr);
1064 	    goto dropit;
1065 	}
1066     }
1067 
1068     q = find_queue(fs, &pkt->id);
1069     if (q == NULL)
1070 	goto dropit;	/* Cannot allocate queue */
1071 
1072     /*
1073      * Update statistics, then check reasons to drop pkt
1074      */
1075     q->tot_bytes += len;
1076     q->tot_pkts++;
1077 
1078     if (fs->plr && krandom() < fs->plr)
1079 	goto dropit;	/* Random pkt drop */
1080 
1081     if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1082     	if (q->len_bytes > fs->qsize)
1083 	    goto dropit;	/* Queue size overflow */
1084     } else {
1085 	if (q->len >= fs->qsize)
1086 	    goto dropit;	/* Queue count overflow */
1087     }
1088 
1089     if ((fs->flags_fs & DN_IS_RED) && red_drops(fs, q, len))
1090 	goto dropit;
1091 
1092     TAILQ_INSERT_TAIL(&q->queue, pkt, dn_next);
1093     q->len++;
1094     q->len_bytes += len;
1095 
1096     if (TAILQ_FIRST(&q->queue) != pkt)	/* Flow was not idle, we are done */
1097 	goto done;
1098 
1099     /*
1100      * If we reach this point the flow was previously idle, so we need
1101      * to schedule it.  This involves different actions for fixed-rate
1102      * or WF2Q queues.
1103      */
1104     if (is_pipe) {
1105 	/*
1106 	 * Fixed-rate queue: just insert into the ready_heap.
1107 	 */
1108 	dn_key t = 0;
1109 
1110 	if (pipe->bandwidth)
1111 	    t = SET_TICKS(pkt, q, pipe);
1112 
1113 	q->sched_time = curr_time;
1114 	if (t == 0)	/* Must process it now */
1115 	    ready_event(q);
1116 	else
1117 	    heap_insert(&ready_heap, curr_time + t, q);
1118     } else {
1119 	/*
1120 	 * WF2Q:
1121 	 * First, compute start time S: if the flow was idle (S=F+1)
1122 	 * set S to the virtual time V for the controlling pipe, and update
1123 	 * the sum of weights for the pipe; otherwise, remove flow from
1124 	 * idle_heap and set S to max(F, V).
1125 	 * Second, compute finish time F = S + len/weight.
1126 	 * Third, if pipe was idle, update V = max(S, V).
1127 	 * Fourth, count one more backlogged flow.
1128 	 */
1129 	if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid */
1130 	    q->S = pipe->V;
1131 	    pipe->sum += fs->weight; /* Add weight of new queue */
1132 	} else {
1133 	    heap_extract(&pipe->idle_heap, q);
1134 	    q->S = MAX64(q->F, pipe->V);
1135 	}
1136 	q->F = q->S + (len << MY_M) / (uint64_t)fs->weight;
1137 
1138 	if (pipe->not_eligible_heap.elements == 0 &&
1139 	    pipe->scheduler_heap.elements == 0)
1140 	    pipe->V = MAX64(q->S, pipe->V);
1141 
1142 	fs->backlogged++;
1143 
1144 	/*
1145 	 * Look at eligibility.  A flow is not eligibile if S>V (when
1146 	 * this happens, it means that there is some other flow already
1147 	 * scheduled for the same pipe, so the scheduler_heap cannot be
1148 	 * empty).  If the flow is not eligible we just store it in the
1149 	 * not_eligible_heap.  Otherwise, we store in the scheduler_heap
1150 	 * and possibly invoke ready_event_wfq() right now if there is
1151 	 * leftover credit.
1152 	 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1153 	 * and for all flows in not_eligible_heap (NEH), S_i > V.
1154 	 * So when we need to compute max(V, min(S_i)) forall i in SCH+NEH,
1155 	 * we only need to look into NEH.
1156 	 */
1157 	if (DN_KEY_GT(q->S, pipe->V)) {	/* Not eligible */
1158 	    if (pipe->scheduler_heap.elements == 0)
1159 		kprintf("++ ouch! not eligible but empty scheduler!\n");
1160 	    heap_insert(&pipe->not_eligible_heap, q->S, q);
1161 	} else {
1162 	    heap_insert(&pipe->scheduler_heap, q->F, q);
1163 	    if (pipe->numbytes >= 0) {	/* Pipe is idle */
1164 		if (pipe->scheduler_heap.elements != 1)
1165 		    kprintf("*** OUCH! pipe should have been idle!\n");
1166 		DPRINTF("Waking up pipe %d at %d\n",
1167 			pipe->pipe_nr, (int)(q->F >> MY_M));
1168 		pipe->sched_time = curr_time;
1169 		ready_event_wfq(pipe);
1170 	    }
1171 	}
1172     }
1173 done:
1174     return 0;
1175 
1176 dropit:
1177     if (q)
1178 	q->drops++;
1179     return ENOBUFS;
1180 }
1181 
1182 /*
1183  * Dispose all packets and flow_queues on a flow_set.
1184  * If all=1, also remove red lookup table and other storage,
1185  * including the descriptor itself.
1186  * For the one in dn_pipe MUST also cleanup ready_heap...
1187  */
1188 static void
1189 purge_flow_set(struct dn_flow_set *fs, int all)
1190 {
1191     int i;
1192 #ifdef INVARIANTS
1193     int rq_elements = 0;
1194 #endif
1195 
1196     for (i = 0; i <= fs->rq_size; i++) {
1197 	struct dn_flow_queue *q;
1198 
1199 	while ((q = LIST_FIRST(&fs->rq[i])) != NULL) {
1200 	    struct dn_pkt *pkt;
1201 
1202 	    while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
1203 	    	TAILQ_REMOVE(&q->queue, pkt, dn_next);
1204 	    	ip_dn_packet_free(pkt);
1205 	    }
1206 
1207 	    LIST_REMOVE(q, q_link);
1208 	    kfree(q, M_DUMMYNET);
1209 
1210 #ifdef INVARIANTS
1211 	    rq_elements++;
1212 #endif
1213 	}
1214     }
1215     KASSERT(rq_elements == fs->rq_elements,
1216 	    ("# rq elements mismatch, freed %d, total %d",
1217 	     rq_elements, fs->rq_elements));
1218     fs->rq_elements = 0;
1219 
1220     if (all) {
1221 	/* RED - free lookup table */
1222 	if (fs->w_q_lookup)
1223 	    kfree(fs->w_q_lookup, M_DUMMYNET);
1224 
1225 	if (fs->rq)
1226 	    kfree(fs->rq, M_DUMMYNET);
1227 
1228 	/*
1229 	 * If this fs is not part of a pipe, free it
1230 	 *
1231 	 * fs->pipe == NULL could happen, if 'fs' is a WF2Q and
1232 	 * - No packet belongs to that flow set is delivered by
1233 	 *   dummynet_io(), i.e. parent pipe is not installed yet.
1234 	 * - Parent pipe is deleted.
1235 	 */
1236 	if (fs->pipe == NULL || (fs->pipe && fs != &fs->pipe->fs))
1237 	    kfree(fs, M_DUMMYNET);
1238     }
1239 }
1240 
1241 /*
1242  * Dispose all packets queued on a pipe (not a flow_set).
1243  * Also free all resources associated to a pipe, which is about
1244  * to be deleted.
1245  */
1246 static void
1247 purge_pipe(struct dn_pipe *pipe)
1248 {
1249     struct dn_pkt *pkt;
1250 
1251     purge_flow_set(&pipe->fs, 1);
1252 
1253     while ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
1254 	TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
1255 	ip_dn_packet_free(pkt);
1256     }
1257 
1258     heap_free(&pipe->scheduler_heap);
1259     heap_free(&pipe->not_eligible_heap);
1260     heap_free(&pipe->idle_heap);
1261 }
1262 
1263 /*
1264  * Delete all pipes and heaps returning memory.
1265  */
1266 static void
1267 dummynet_flush(void)
1268 {
1269     struct dn_pipe_head pipe_list;
1270     struct dn_flowset_head fs_list;
1271     struct dn_pipe *p;
1272     struct dn_flow_set *fs;
1273     int i;
1274 
1275     lockmgr(&dn_lock, LK_EXCLUSIVE);
1276 
1277     /*
1278      * Prevent future matches...
1279      */
1280     LIST_INIT(&pipe_list);
1281     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1282 	struct dn_pipe_head *pipe_hdr = &pipe_table[i];
1283 
1284     	while ((p = LIST_FIRST(pipe_hdr)) != NULL) {
1285 	    LIST_REMOVE(p, p_link);
1286 	    LIST_INSERT_HEAD(&pipe_list, p, p_link);
1287 	    --dn_count;
1288 	}
1289     }
1290 
1291     LIST_INIT(&fs_list);
1292     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1293 	struct dn_flowset_head *fs_hdr = &flowset_table[i];
1294 
1295 	while ((fs = LIST_FIRST(fs_hdr)) != NULL) {
1296 	    LIST_REMOVE(fs, fs_link);
1297 	    LIST_INSERT_HEAD(&fs_list, fs, fs_link);
1298 	    --dn_count;
1299 	}
1300     }
1301 
1302     /* Free heaps so we don't have unwanted events */
1303     heap_free(&ready_heap);
1304     heap_free(&wfq_ready_heap);
1305     heap_free(&extract_heap);
1306 
1307     /*
1308      * Now purge all queued pkts and delete all pipes
1309      */
1310     /* Scan and purge all flow_sets. */
1311     while ((fs = LIST_FIRST(&fs_list)) != NULL) {
1312 	LIST_REMOVE(fs, fs_link);
1313 	purge_flow_set(fs, 1);
1314     }
1315 
1316     while ((p = LIST_FIRST(&pipe_list)) != NULL) {
1317 	LIST_REMOVE(p, p_link);
1318 	purge_pipe(p);
1319 	kfree(p, M_DUMMYNET);
1320     }
1321 
1322     /*
1323      * Everything has been cleaned out, clear the run state.
1324      */
1325     KKASSERT(dn_count == 0);
1326     if (dn_running) {
1327 	    systimer_del(&dn_clock);
1328 	    dn_running = 0;
1329     }
1330     lockmgr(&dn_lock, LK_RELEASE);
1331 }
1332 
1333 /*
1334  * setup RED parameters
1335  */
1336 static int
1337 config_red(const struct dn_ioc_flowset *ioc_fs, struct dn_flow_set *x)
1338 {
1339     int i;
1340 
1341     x->w_q = ioc_fs->w_q;
1342     x->min_th = SCALE(ioc_fs->min_th);
1343     x->max_th = SCALE(ioc_fs->max_th);
1344     x->max_p = ioc_fs->max_p;
1345 
1346     x->c_1 = ioc_fs->max_p / (ioc_fs->max_th - ioc_fs->min_th);
1347     x->c_2 = SCALE_MUL(x->c_1, SCALE(ioc_fs->min_th));
1348     if (x->flags_fs & DN_IS_GENTLE_RED) {
1349 	x->c_3 = (SCALE(1) - ioc_fs->max_p) / ioc_fs->max_th;
1350 	x->c_4 = (SCALE(1) - 2 * ioc_fs->max_p);
1351     }
1352 
1353     /* If the lookup table already exist, free and create it again */
1354     if (x->w_q_lookup) {
1355 	kfree(x->w_q_lookup, M_DUMMYNET);
1356 	x->w_q_lookup = NULL ;
1357     }
1358 
1359     if (red_lookup_depth == 0) {
1360 	kprintf("net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1361 	kfree(x, M_DUMMYNET);
1362 	return EINVAL;
1363     }
1364     x->lookup_depth = red_lookup_depth;
1365     x->w_q_lookup = kmalloc(x->lookup_depth * sizeof(int),
1366     			    M_DUMMYNET, M_WAITOK);
1367 
1368     /* Fill the lookup table with (1 - w_q)^x */
1369     x->lookup_step = ioc_fs->lookup_step;
1370     x->lookup_weight = ioc_fs->lookup_weight;
1371 
1372     x->w_q_lookup[0] = SCALE(1) - x->w_q;
1373     for (i = 1; i < x->lookup_depth; i++)
1374 	x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1375 
1376     if (red_avg_pkt_size < 1)
1377 	red_avg_pkt_size = 512;
1378     x->avg_pkt_size = red_avg_pkt_size;
1379 
1380     if (red_max_pkt_size < 1)
1381 	red_max_pkt_size = 1500;
1382     x->max_pkt_size = red_max_pkt_size;
1383 
1384     return 0;
1385 }
1386 
1387 static void
1388 alloc_hash(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1389 {
1390     int i, alloc_size;
1391 
1392     if (x->flags_fs & DN_HAVE_FLOW_MASK) {
1393 	int l = ioc_fs->rq_size;
1394 
1395 	/* Allocate some slots */
1396 	if (l == 0)
1397 	    l = dn_hash_size;
1398 
1399 	if (l < DN_MIN_HASH_SIZE)
1400 	    l = DN_MIN_HASH_SIZE;
1401 	else if (l > DN_MAX_HASH_SIZE)
1402 	    l = DN_MAX_HASH_SIZE;
1403 
1404 	x->rq_size = l;
1405     } else {
1406 	/* One is enough for null mask */
1407 	x->rq_size = 1;
1408     }
1409     alloc_size = x->rq_size + 1;
1410 
1411     x->rq = kmalloc(alloc_size * sizeof(struct dn_flowqueue_head),
1412 		    M_DUMMYNET, M_WAITOK | M_ZERO);
1413     x->rq_elements = 0;
1414 
1415     for (i = 0; i < alloc_size; ++i)
1416 	LIST_INIT(&x->rq[i]);
1417 }
1418 
1419 static void
1420 set_flowid_parms(struct dn_flow_id *id, const struct dn_ioc_flowid *ioc_id)
1421 {
1422     id->fid_dst_ip = ioc_id->u.ip.dst_ip;
1423     id->fid_src_ip = ioc_id->u.ip.src_ip;
1424     id->fid_dst_port = ioc_id->u.ip.dst_port;
1425     id->fid_src_port = ioc_id->u.ip.src_port;
1426     id->fid_proto = ioc_id->u.ip.proto;
1427     id->fid_flags = ioc_id->u.ip.flags;
1428 }
1429 
1430 static void
1431 set_fs_parms(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1432 {
1433     x->flags_fs = ioc_fs->flags_fs;
1434     x->qsize = ioc_fs->qsize;
1435     x->plr = ioc_fs->plr;
1436     set_flowid_parms(&x->flow_mask, &ioc_fs->flow_mask);
1437     if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1438 	if (x->qsize > 1024 * 1024)
1439 	    x->qsize = 1024 * 1024;
1440     } else {
1441 	if (x->qsize == 0 || x->qsize > 100)
1442 	    x->qsize = 50;
1443     }
1444 
1445     /* Configuring RED */
1446     if (x->flags_fs & DN_IS_RED)
1447 	config_red(ioc_fs, x);	/* XXX should check errors */
1448 }
1449 
1450 /*
1451  * setup pipe or queue parameters.
1452  */
1453 
1454 static int
1455 config_pipe(struct dn_ioc_pipe *ioc_pipe)
1456 {
1457     struct dn_ioc_flowset *ioc_fs = &ioc_pipe->fs;
1458     int error;
1459 
1460     /*
1461      * The config program passes parameters as follows:
1462      * bw	bits/second (0 means no limits)
1463      * delay	ms (must be translated into ticks)
1464      * qsize	slots or bytes
1465      */
1466     ioc_pipe->delay = (ioc_pipe->delay * dn_hz) / 1000;
1467 
1468     /*
1469      * We need either a pipe number or a flow_set number
1470      */
1471     if (ioc_pipe->pipe_nr == 0 && ioc_fs->fs_nr == 0)
1472 	return EINVAL;
1473     if (ioc_pipe->pipe_nr != 0 && ioc_fs->fs_nr != 0)
1474 	return EINVAL;
1475 
1476     /*
1477      * Validate pipe number
1478      */
1479     if (ioc_pipe->pipe_nr > DN_PIPE_NR_MAX || ioc_pipe->pipe_nr < 0)
1480 	return EINVAL;
1481 
1482     lockmgr(&dn_lock, LK_EXCLUSIVE);
1483     error = EINVAL;
1484 
1485     if (ioc_pipe->pipe_nr != 0) {	/* This is a pipe */
1486 	struct dn_pipe *x, *p;
1487 
1488 	/* Locate pipe */
1489 	p = dn_find_pipe(ioc_pipe->pipe_nr);
1490 
1491 	if (p == NULL) {	/* New pipe */
1492 	    x = kmalloc(sizeof(struct dn_pipe), M_DUMMYNET, M_WAITOK | M_ZERO);
1493 	    x->pipe_nr = ioc_pipe->pipe_nr;
1494 	    x->fs.pipe = x;
1495 	    TAILQ_INIT(&x->p_queue);
1496 
1497 	    /*
1498 	     * idle_heap is the only one from which we extract from the middle.
1499 	     */
1500 	    x->idle_heap.size = x->idle_heap.elements = 0;
1501 	    x->idle_heap.offset = __offsetof(struct dn_flow_queue, heap_pos);
1502 	} else {
1503 	    int i;
1504 
1505 	    x = p;
1506 
1507 	    /* Flush accumulated credit for all queues */
1508 	    for (i = 0; i <= x->fs.rq_size; i++) {
1509 		struct dn_flow_queue *q;
1510 
1511 		LIST_FOREACH(q, &x->fs.rq[i], q_link)
1512 		    q->numbytes = 0;
1513 	    }
1514 	}
1515 
1516 	x->bandwidth = ioc_pipe->bandwidth;
1517 	x->numbytes = 0; /* Just in case... */
1518 	x->delay = ioc_pipe->delay;
1519 
1520 	set_fs_parms(&x->fs, ioc_fs);
1521 
1522 	if (x->fs.rq == NULL) {	/* A new pipe */
1523 	    struct dn_pipe_head *pipe_hdr;
1524 
1525 	    alloc_hash(&x->fs, ioc_fs);
1526 
1527 	    pipe_hdr = &pipe_table[DN_NR_HASH(x->pipe_nr)];
1528 	    LIST_INSERT_HEAD(pipe_hdr, x, p_link);
1529 	    ++dn_count;
1530 	}
1531     } else {	/* Config flow_set */
1532 	struct dn_flow_set *x, *fs;
1533 
1534 	/* Locate flow_set */
1535 	fs = dn_find_flowset(ioc_fs->fs_nr);
1536 
1537 	if (fs == NULL) {	/* New flow_set */
1538 	    if (ioc_fs->parent_nr == 0)	/* Need link to a pipe */
1539 		goto back;
1540 
1541 	    x = kmalloc(sizeof(struct dn_flow_set), M_DUMMYNET,
1542 	    		M_WAITOK | M_ZERO);
1543 	    x->fs_nr = ioc_fs->fs_nr;
1544 	    x->parent_nr = ioc_fs->parent_nr;
1545 	    x->weight = ioc_fs->weight;
1546 	    if (x->weight == 0)
1547 		x->weight = 1;
1548 	    else if (x->weight > 100)
1549 		x->weight = 100;
1550 	} else {
1551 	    /* Change parent pipe not allowed; must delete and recreate */
1552 	    if (ioc_fs->parent_nr != 0 && fs->parent_nr != ioc_fs->parent_nr)
1553 		goto back;
1554 	    x = fs;
1555 	}
1556 
1557 	set_fs_parms(x, ioc_fs);
1558 
1559 	if (x->rq == NULL) {	/* A new flow_set */
1560 	    struct dn_flowset_head *fs_hdr;
1561 
1562 	    alloc_hash(x, ioc_fs);
1563 
1564 	    fs_hdr = &flowset_table[DN_NR_HASH(x->fs_nr)];
1565 	    LIST_INSERT_HEAD(fs_hdr, x, fs_link);
1566 	    ++dn_count;
1567 	}
1568     }
1569     error = 0;
1570 
1571     /*
1572      * We have at least one entry, set run state and start the systimer
1573      * poll if necessary.
1574      */
1575     if (dn_running == 0) {
1576 	dn_running = 1;
1577 	systimer_init_periodic_nq(&dn_clock, dummynet_clock, NULL, dn_hz);
1578     }
1579 
1580 back:
1581     lockmgr(&dn_lock, LK_RELEASE);
1582     return error;
1583 }
1584 
1585 /*
1586  * Helper function to remove from a heap queues which are linked to
1587  * a flow_set about to be deleted.
1588  */
1589 static void
1590 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
1591 {
1592     int i = 0, found = 0;
1593 
1594     while (i < h->elements) {
1595 	if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
1596 	    h->elements--;
1597 	    h->p[i] = h->p[h->elements];
1598 	    found++;
1599 	} else {
1600 	    i++;
1601 	}
1602     }
1603     if (found)
1604 	heapify(h);
1605 }
1606 
1607 /*
1608  * helper function to remove a pipe from a heap (can be there at most once)
1609  */
1610 static void
1611 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
1612 {
1613     if (h->elements > 0) {
1614 	int i;
1615 
1616 	for (i = 0; i < h->elements; i++) {
1617 	    if (h->p[i].object == p) { /* found it */
1618 		h->elements--;
1619 		h->p[i] = h->p[h->elements];
1620 		heapify(h);
1621 		break;
1622 	    }
1623 	}
1624     }
1625 }
1626 
1627 static void
1628 dn_unref_pipe_cb(struct dn_flow_set *fs, void *pipe0)
1629 {
1630     struct dn_pipe *pipe = pipe0;
1631 
1632     if (fs->pipe == pipe) {
1633 	kprintf("++ ref to pipe %d from fs %d\n",
1634 		pipe->pipe_nr, fs->fs_nr);
1635 	fs->pipe = NULL;
1636 	purge_flow_set(fs, 0);
1637     }
1638 }
1639 
1640 /*
1641  * Fully delete a pipe or a queue, cleaning up associated info.
1642  */
1643 static int
1644 delete_pipe(const struct dn_ioc_pipe *ioc_pipe)
1645 {
1646     struct dn_pipe *p;
1647     int error;
1648 
1649     if (ioc_pipe->pipe_nr == 0 && ioc_pipe->fs.fs_nr == 0)
1650 	return EINVAL;
1651     if (ioc_pipe->pipe_nr != 0 && ioc_pipe->fs.fs_nr != 0)
1652 	return EINVAL;
1653 
1654     if (ioc_pipe->pipe_nr > DN_NR_HASH_MAX || ioc_pipe->pipe_nr < 0)
1655     	return EINVAL;
1656 
1657     lockmgr(&dn_lock, LK_EXCLUSIVE);
1658 
1659     error = EINVAL;
1660     if (ioc_pipe->pipe_nr != 0) {	/* This is an old-style pipe */
1661 	/* Locate pipe */
1662 	p = dn_find_pipe(ioc_pipe->pipe_nr);
1663 	if (p == NULL)
1664 	    goto back; /* Not found */
1665 
1666 	/* Unlink from pipe hash table */
1667 	LIST_REMOVE(p, p_link);
1668 	--dn_count;
1669 
1670 	/* Remove all references to this pipe from flow_sets */
1671 	dn_iterate_flowset(dn_unref_pipe_cb, p);
1672 
1673 	fs_remove_from_heap(&ready_heap, &p->fs);
1674 	purge_pipe(p);	/* Remove all data associated to this pipe */
1675 
1676 	/* Remove reference to here from extract_heap and wfq_ready_heap */
1677 	pipe_remove_from_heap(&extract_heap, p);
1678 	pipe_remove_from_heap(&wfq_ready_heap, p);
1679 
1680 	kfree(p, M_DUMMYNET);
1681     } else {	/* This is a WF2Q queue (dn_flow_set) */
1682 	struct dn_flow_set *fs;
1683 
1684 	/* Locate flow_set */
1685 	fs = dn_find_flowset(ioc_pipe->fs.fs_nr);
1686 	if (fs == NULL)
1687 	    goto back; /* Not found */
1688 
1689 	LIST_REMOVE(fs, fs_link);
1690 	--dn_count;
1691 
1692 	if ((p = fs->pipe) != NULL) {
1693 	    /* Update total weight on parent pipe and cleanup parent heaps */
1694 	    p->sum -= fs->weight * fs->backlogged;
1695 	    fs_remove_from_heap(&p->not_eligible_heap, fs);
1696 	    fs_remove_from_heap(&p->scheduler_heap, fs);
1697 #if 1	/* XXX should i remove from idle_heap as well ? */
1698 	    fs_remove_from_heap(&p->idle_heap, fs);
1699 #endif
1700 	}
1701 	purge_flow_set(fs, 1);
1702     }
1703     error = 0;
1704 
1705     /*
1706      * If there are no more pipes or flow-sets, clear the run state.
1707      */
1708     if (dn_count == 0 && dn_running) {
1709 	    systimer_del(&dn_clock);
1710 	    dn_running = 0;
1711     }
1712 back:
1713     lockmgr(&dn_lock, LK_RELEASE);
1714 
1715     return error;
1716 }
1717 
1718 /*
1719  * helper function used to copy data from kernel in DUMMYNET_GET
1720  */
1721 static void
1722 dn_copy_flowid(const struct dn_flow_id *id, struct dn_ioc_flowid *ioc_id)
1723 {
1724     ioc_id->type = ETHERTYPE_IP;
1725     ioc_id->u.ip.dst_ip = id->fid_dst_ip;
1726     ioc_id->u.ip.src_ip = id->fid_src_ip;
1727     ioc_id->u.ip.dst_port = id->fid_dst_port;
1728     ioc_id->u.ip.src_port = id->fid_src_port;
1729     ioc_id->u.ip.proto = id->fid_proto;
1730     ioc_id->u.ip.flags = id->fid_flags;
1731 }
1732 
1733 static void *
1734 dn_copy_flowqueues(const struct dn_flow_set *fs, void *bp)
1735 {
1736     struct dn_ioc_flowqueue *ioc_fq = bp;
1737     int i, copied = 0;
1738 
1739     for (i = 0; i <= fs->rq_size; i++) {
1740 	const struct dn_flow_queue *q;
1741 
1742 	LIST_FOREACH(q, &fs->rq[i], q_link) {
1743 	    if (q->hash_slot != i) {	/* XXX ASSERT */
1744 		kprintf("++ at %d: wrong slot (have %d, "
1745 			"should be %d)\n", copied, q->hash_slot, i);
1746 	    }
1747 	    if (q->fs != fs) {		/* XXX ASSERT */
1748 		kprintf("++ at %d: wrong fs ptr (have %p, should be %p)\n",
1749 			i, q->fs, fs);
1750 	    }
1751 
1752 	    copied++;
1753 
1754 	    ioc_fq->len = q->len;
1755 	    ioc_fq->len_bytes = q->len_bytes;
1756 	    ioc_fq->tot_pkts = q->tot_pkts;
1757 	    ioc_fq->tot_bytes = q->tot_bytes;
1758 	    ioc_fq->drops = q->drops;
1759 	    ioc_fq->hash_slot = q->hash_slot;
1760 	    ioc_fq->S = q->S;
1761 	    ioc_fq->F = q->F;
1762 	    dn_copy_flowid(&q->id, &ioc_fq->id);
1763 
1764 	    ioc_fq++;
1765 	}
1766     }
1767 
1768     if (copied != fs->rq_elements) {	/* XXX ASSERT */
1769 	kprintf("++ wrong count, have %d should be %d\n",
1770 		copied, fs->rq_elements);
1771     }
1772     return ioc_fq;
1773 }
1774 
1775 static void
1776 dn_copy_flowset(const struct dn_flow_set *fs, struct dn_ioc_flowset *ioc_fs,
1777 		u_short fs_type)
1778 {
1779     ioc_fs->fs_type = fs_type;
1780 
1781     ioc_fs->fs_nr = fs->fs_nr;
1782     ioc_fs->flags_fs = fs->flags_fs;
1783     ioc_fs->parent_nr = fs->parent_nr;
1784 
1785     ioc_fs->weight = fs->weight;
1786     ioc_fs->qsize = fs->qsize;
1787     ioc_fs->plr = fs->plr;
1788 
1789     ioc_fs->rq_size = fs->rq_size;
1790     ioc_fs->rq_elements = fs->rq_elements;
1791 
1792     ioc_fs->w_q = fs->w_q;
1793     ioc_fs->max_th = fs->max_th;
1794     ioc_fs->min_th = fs->min_th;
1795     ioc_fs->max_p = fs->max_p;
1796 
1797     dn_copy_flowid(&fs->flow_mask, &ioc_fs->flow_mask);
1798 }
1799 
1800 static void
1801 dn_calc_pipe_size_cb(struct dn_pipe *pipe, void *sz)
1802 {
1803     size_t *size = sz;
1804 
1805     *size += sizeof(struct dn_ioc_pipe) +
1806 	     pipe->fs.rq_elements * sizeof(struct dn_ioc_flowqueue);
1807 }
1808 
1809 static void
1810 dn_calc_fs_size_cb(struct dn_flow_set *fs, void *sz)
1811 {
1812     size_t *size = sz;
1813 
1814     *size += sizeof(struct dn_ioc_flowset) +
1815 	     fs->rq_elements * sizeof(struct dn_ioc_flowqueue);
1816 }
1817 
1818 static void
1819 dn_copyout_pipe_cb(struct dn_pipe *pipe, void *bp0)
1820 {
1821     char **bp = bp0;
1822     struct dn_ioc_pipe *ioc_pipe = (struct dn_ioc_pipe *)(*bp);
1823 
1824     /*
1825      * Copy flow set descriptor associated with this pipe
1826      */
1827     dn_copy_flowset(&pipe->fs, &ioc_pipe->fs, DN_IS_PIPE);
1828 
1829     /*
1830      * Copy pipe descriptor
1831      */
1832     ioc_pipe->bandwidth = pipe->bandwidth;
1833     ioc_pipe->pipe_nr = pipe->pipe_nr;
1834     ioc_pipe->V = pipe->V;
1835     /* Convert delay to milliseconds */
1836     ioc_pipe->delay = (pipe->delay * 1000) / dn_hz;
1837 
1838     /*
1839      * Copy flow queue descriptors
1840      */
1841     *bp += sizeof(*ioc_pipe);
1842     *bp = dn_copy_flowqueues(&pipe->fs, *bp);
1843 }
1844 
1845 static void
1846 dn_copyout_fs_cb(struct dn_flow_set *fs, void *bp0)
1847 {
1848     char **bp = bp0;
1849     struct dn_ioc_flowset *ioc_fs = (struct dn_ioc_flowset *)(*bp);
1850 
1851     /*
1852      * Copy flow set descriptor
1853      */
1854     dn_copy_flowset(fs, ioc_fs, DN_IS_QUEUE);
1855 
1856     /*
1857      * Copy flow queue descriptors
1858      */
1859     *bp += sizeof(*ioc_fs);
1860     *bp = dn_copy_flowqueues(fs, *bp);
1861 }
1862 
1863 static int
1864 dummynet_get(struct dn_sopt *dn_sopt)
1865 {
1866     char *buf, *bp;
1867     size_t size = 0;
1868 
1869     /*
1870      * Compute size of data structures: list of pipes and flow_sets.
1871      */
1872     dn_iterate_pipe(dn_calc_pipe_size_cb, &size);
1873     dn_iterate_flowset(dn_calc_fs_size_cb, &size);
1874 
1875     /*
1876      * Copyout pipe/flow_set/flow_queue
1877      */
1878     bp = buf = kmalloc(size, M_TEMP, M_WAITOK | M_ZERO);
1879     dn_iterate_pipe(dn_copyout_pipe_cb, &bp);
1880     dn_iterate_flowset(dn_copyout_fs_cb, &bp);
1881 
1882     /* Temp memory will be freed by caller */
1883     dn_sopt->dn_sopt_arg = buf;
1884     dn_sopt->dn_sopt_arglen = size;
1885     return 0;
1886 }
1887 
1888 /*
1889  * Handler for the various dummynet socket options (get, flush, config, del)
1890  */
1891 static int
1892 dummynet_ctl(struct dn_sopt *dn_sopt)
1893 {
1894     int error = 0;
1895 
1896     switch (dn_sopt->dn_sopt_name) {
1897     case IP_DUMMYNET_GET:
1898 	error = dummynet_get(dn_sopt);
1899 	break;
1900 
1901     case IP_DUMMYNET_FLUSH:
1902 	dummynet_flush();
1903 	break;
1904 
1905     case IP_DUMMYNET_CONFIGURE:
1906 	KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1907 	error = config_pipe(dn_sopt->dn_sopt_arg);
1908 	break;
1909 
1910     case IP_DUMMYNET_DEL:	/* Remove a pipe or flow_set */
1911 	KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1912 	error = delete_pipe(dn_sopt->dn_sopt_arg);
1913 	break;
1914 
1915     default:
1916 	kprintf("%s -- unknown option %d\n", __func__, dn_sopt->dn_sopt_name);
1917 	error = EINVAL;
1918 	break;
1919     }
1920     return error;
1921 }
1922 
1923 static void
1924 dummynet_clock(systimer_t info __unused, int in_ipi __unused,
1925     struct intrframe *frame __unused)
1926 {
1927     KASSERT(mycpuid == ip_dn_cpu,
1928 	    ("dummynet systimer comes on cpu%d, should be %d!",
1929 	     mycpuid, ip_dn_cpu));
1930 
1931     crit_enter();
1932     if (DUMMYNET_LOADED && (dn_netmsg.lmsg.ms_flags & MSGF_DONE))
1933 	lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), &dn_netmsg.lmsg);
1934     crit_exit();
1935 }
1936 
1937 static int
1938 sysctl_dn_hz(SYSCTL_HANDLER_ARGS)
1939 {
1940     int error, val, origcpu;
1941 
1942     val = dn_hz;
1943     error = sysctl_handle_int(oidp, &val, 0, req);
1944     if (error || req->newptr == NULL)
1945 	return error;
1946     if (val <= 0)
1947 	return EINVAL;
1948     else if (val > DN_CALLOUT_FREQ_MAX)
1949 	val = DN_CALLOUT_FREQ_MAX;
1950 
1951     origcpu = mycpuid;
1952     lwkt_migratecpu(ip_dn_cpu);
1953 
1954     lockmgr(&dn_lock, LK_EXCLUSIVE);
1955     crit_enter();
1956     dn_hz = val;
1957     if (dn_running)
1958 	    systimer_adjust_periodic(&dn_clock, val);
1959     crit_exit();
1960     lockmgr(&dn_lock, LK_RELEASE);
1961 
1962     lwkt_migratecpu(origcpu);
1963 
1964     return 0;
1965 }
1966 
1967 static void
1968 ip_dn_init_dispatch(netmsg_t msg)
1969 {
1970     int i, error = 0;
1971 
1972     KASSERT(mycpuid == ip_dn_cpu,
1973     	    ("%s runs on cpu%d, instead of cpu%d", __func__,
1974 	     mycpuid, ip_dn_cpu));
1975 
1976     crit_enter();
1977 
1978     if (DUMMYNET_LOADED) {
1979 	kprintf("DUMMYNET already loaded\n");
1980 	error = EEXIST;
1981 	goto back;
1982     }
1983 
1984     kprintf("DUMMYNET initialized (011031)\n");
1985 
1986     for (i = 0; i < DN_NR_HASH_MAX; ++i)
1987     	LIST_INIT(&pipe_table[i]);
1988 
1989     for (i = 0; i < DN_NR_HASH_MAX; ++i)
1990 	LIST_INIT(&flowset_table[i]);
1991 
1992     ready_heap.size = ready_heap.elements = 0;
1993     ready_heap.offset = 0;
1994 
1995     wfq_ready_heap.size = wfq_ready_heap.elements = 0;
1996     wfq_ready_heap.offset = 0;
1997 
1998     extract_heap.size = extract_heap.elements = 0;
1999     extract_heap.offset = 0;
2000 
2001     ip_dn_ctl_ptr = dummynet_ctl;
2002     ip_dn_io_ptr = dummynet_io;
2003 
2004     netmsg_init(&dn_netmsg, NULL, &netisr_adone_rport,
2005 		0, dummynet);
2006 
2007     KKASSERT(dn_running == 0);
2008 #if 0
2009     /* REMOVED, initialized on first insertion */
2010     systimer_init_periodic_nq(&dn_clock, dummynet_clock, NULL, dn_hz);
2011 #endif
2012 
2013 back:
2014     crit_exit();
2015     lwkt_replymsg(&msg->lmsg, error);
2016 }
2017 
2018 static int
2019 ip_dn_init(void)
2020 {
2021     struct netmsg_base smsg;
2022 
2023     if (ip_dn_cpu >= ncpus) {
2024 	kprintf("%s: CPU%d does not exist, switch to CPU0\n",
2025 		__func__, ip_dn_cpu);
2026 	ip_dn_cpu = 0;
2027     }
2028 
2029     netmsg_init(&smsg, NULL, &curthread->td_msgport,
2030 		0, ip_dn_init_dispatch);
2031     lwkt_domsg(netisr_cpuport(ip_dn_cpu), &smsg.lmsg, 0);
2032     return smsg.lmsg.ms_error;
2033 }
2034 
2035 #ifdef KLD_MODULE
2036 
2037 static void
2038 ip_dn_stop_dispatch(netmsg_t msg)
2039 {
2040     crit_enter();
2041 
2042     dummynet_flush();
2043 
2044     ip_dn_ctl_ptr = NULL;
2045     ip_dn_io_ptr = NULL;
2046     KKASSERT(dn_running == 0);
2047 
2048     crit_exit();
2049     lwkt_replymsg(&msg->lmsg, 0);
2050 }
2051 
2052 
2053 static void
2054 ip_dn_stop(void)
2055 {
2056     struct netmsg_base smsg;
2057 
2058     netmsg_init(&smsg, NULL, &curthread->td_msgport,
2059 		0, ip_dn_stop_dispatch);
2060     lwkt_domsg(netisr_cpuport(ip_dn_cpu), &smsg.lmsg, 0);
2061 
2062     netmsg_service_sync();
2063 }
2064 
2065 #endif	/* KLD_MODULE */
2066 
2067 static int
2068 dummynet_modevent(module_t mod, int type, void *data)
2069 {
2070     switch (type) {
2071     case MOD_LOAD:
2072 	return ip_dn_init();
2073 
2074     case MOD_UNLOAD:
2075 #ifndef KLD_MODULE
2076 	kprintf("dummynet statically compiled, cannot unload\n");
2077 	return EINVAL;
2078 #else
2079 	ip_dn_stop();
2080 #endif
2081 	break;
2082 
2083     default:
2084 	break;
2085     }
2086     return 0;
2087 }
2088 
2089 static moduledata_t dummynet_mod = {
2090     "dummynet",
2091     dummynet_modevent,
2092     NULL
2093 };
2094 DECLARE_MODULE(dummynet, dummynet_mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2095 MODULE_VERSION(dummynet, 1);
2096