xref: /dragonfly/sys/net/dummynet/ip_dummynet.c (revision 50b09fda)
1 /*
2  * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3  * Portions Copyright (c) 2000 Akamba Corp.
4  * All rights reserved
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.24.2.22 2003/05/13 09:31:06 maxim Exp $
28  */
29 
30 #include "opt_ipdn.h"
31 
32 /*
33  * This module implements IP dummynet, a bandwidth limiter/delay emulator.
34  * Description of the data structures used is in ip_dummynet.h
35  * Here you mainly find the following blocks of code:
36  *  + variable declarations;
37  *  + heap management functions;
38  *  + scheduler and dummynet functions;
39  *  + configuration and initialization.
40  *
41  * Most important Changes:
42  *
43  * 011004: KLDable
44  * 010124: Fixed WF2Q behaviour
45  * 010122: Fixed spl protection.
46  * 000601: WF2Q support
47  * 000106: Large rewrite, use heaps to handle very many pipes.
48  * 980513: Initial release
49  */
50 
51 #include <sys/param.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/mbuf.h>
55 #include <sys/socketvar.h>
56 #include <sys/sysctl.h>
57 #include <sys/systimer.h>
58 #include <sys/thread2.h>
59 
60 #include <net/ethernet.h>
61 #include <net/netmsg2.h>
62 #include <net/netisr2.h>
63 #include <net/route.h>
64 
65 #include <net/if.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip_var.h>
68 
69 #include <net/dummynet/ip_dummynet.h>
70 
71 #ifdef DUMMYNET_DEBUG
72 #define DPRINTF(fmt, ...)	kprintf(fmt, __VA_ARGS__)
73 #else
74 #define DPRINTF(fmt, ...)	((void)0)
75 #endif
76 
77 #ifndef DN_CALLOUT_FREQ_MAX
78 #define DN_CALLOUT_FREQ_MAX	10000
79 #endif
80 
81 /*
82  * The maximum/minimum hash table size for queues.
83  * These values must be a power of 2.
84  */
85 #define DN_MIN_HASH_SIZE	4
86 #define DN_MAX_HASH_SIZE	65536
87 
88 /*
89  * Some macros are used to compare key values and handle wraparounds.
90  * MAX64 returns the largest of two key values.
91  */
92 #define DN_KEY_LT(a, b)		((int64_t)((a) - (b)) < 0)
93 #define DN_KEY_LEQ(a, b)	((int64_t)((a) - (b)) <= 0)
94 #define DN_KEY_GT(a, b)		((int64_t)((a) - (b)) > 0)
95 #define DN_KEY_GEQ(a, b)	((int64_t)((a) - (b)) >= 0)
96 #define MAX64(x, y)		((((int64_t)((y) - (x))) > 0) ? (y) : (x))
97 
98 #define DN_NR_HASH_MAX		16
99 #define DN_NR_HASH_MASK		(DN_NR_HASH_MAX - 1)
100 #define DN_NR_HASH(nr)		\
101 	((((nr) >> 12) ^ ((nr) >> 8) ^ ((nr) >> 4) ^ (nr)) & DN_NR_HASH_MASK)
102 
103 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
104 
105 extern int	ip_dn_cpu;
106 
107 static dn_key	curr_time = 0;		/* current simulation time */
108 static int	dn_hash_size = 64;	/* default hash size */
109 static int	pipe_expire = 1;	/* expire queue if empty */
110 static int	dn_max_ratio = 16;	/* max queues/buckets ratio */
111 
112 /*
113  * Statistics on number of queue searches and search steps
114  */
115 static int	searches;
116 static int	search_steps;
117 
118 /*
119  * RED parameters
120  */
121 static int	red_lookup_depth = 256;	/* default lookup table depth */
122 static int	red_avg_pkt_size = 512;	/* default medium packet size */
123 static int	red_max_pkt_size = 1500;/* default max packet size */
124 
125 /*
126  * Three heaps contain queues and pipes that the scheduler handles:
127  *
128  *  + ready_heap	contains all dn_flow_queue related to fixed-rate pipes.
129  *  + wfq_ready_heap	contains the pipes associated with WF2Q flows
130  *  + extract_heap	contains pipes associated with delay lines.
131  */
132 static struct dn_heap	ready_heap;
133 static struct dn_heap	extract_heap;
134 static struct dn_heap	wfq_ready_heap;
135 
136 static struct dn_pipe_head	pipe_table[DN_NR_HASH_MAX];
137 static struct dn_flowset_head	flowset_table[DN_NR_HASH_MAX];
138 
139 /*
140  * Variables for dummynet systimer
141  */
142 static struct netmsg_base dn_netmsg;
143 static struct systimer	dn_clock;
144 static int		dn_hz = 1000;
145 
146 static int	sysctl_dn_hz(SYSCTL_HANDLER_ARGS);
147 
148 SYSCTL_DECL(_net_inet_ip_dummynet);
149 
150 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLFLAG_RW,
151 	   &dn_hash_size, 0, "Default hash table size");
152 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, curr_time, CTLFLAG_RD,
153 	   &curr_time, 0, "Current tick");
154 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW,
155 	   &pipe_expire, 0, "Expire queue if empty");
156 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, CTLFLAG_RW,
157 	   &dn_max_ratio, 0, "Max ratio between dynamic queues and buckets");
158 
159 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap, CTLFLAG_RD,
160 	   &ready_heap.size, 0, "Size of ready heap");
161 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap, CTLFLAG_RD,
162 	   &extract_heap.size, 0, "Size of extract heap");
163 
164 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches, CTLFLAG_RD,
165 	   &searches, 0, "Number of queue searches");
166 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, CTLFLAG_RD,
167 	   &search_steps, 0, "Number of queue search steps");
168 
169 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD,
170 	   &red_lookup_depth, 0, "Depth of RED lookup table");
171 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD,
172 	   &red_avg_pkt_size, 0, "RED Medium packet size");
173 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD,
174 	   &red_max_pkt_size, 0, "RED Max packet size");
175 
176 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hz, CTLTYPE_INT | CTLFLAG_RW,
177 	    0, 0, sysctl_dn_hz, "I", "Dummynet callout frequency");
178 
179 static int	heap_init(struct dn_heap *, int);
180 static int	heap_insert(struct dn_heap *, dn_key, void *);
181 static void	heap_extract(struct dn_heap *, void *);
182 
183 static void	transmit_event(struct dn_pipe *);
184 static void	ready_event(struct dn_flow_queue *);
185 static void	ready_event_wfq(struct dn_pipe *);
186 
187 static int	config_pipe(struct dn_ioc_pipe *);
188 static void	dummynet_flush(void);
189 
190 static void	dummynet_clock(systimer_t, int, struct intrframe *);
191 static void	dummynet(netmsg_t);
192 
193 static struct dn_pipe *dn_find_pipe(int);
194 static struct dn_flow_set *dn_locate_flowset(int, int);
195 
196 typedef void	(*dn_pipe_iter_t)(struct dn_pipe *, void *);
197 static void	dn_iterate_pipe(dn_pipe_iter_t, void *);
198 
199 typedef void	(*dn_flowset_iter_t)(struct dn_flow_set *, void *);
200 static void	dn_iterate_flowset(dn_flowset_iter_t, void *);
201 
202 static ip_dn_io_t	dummynet_io;
203 static ip_dn_ctl_t	dummynet_ctl;
204 
205 /*
206  * Heap management functions.
207  *
208  * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
209  * Some macros help finding parent/children so we can optimize them.
210  *
211  * heap_init() is called to expand the heap when needed.
212  * Increment size in blocks of 16 entries.
213  * XXX failure to allocate a new element is a pretty bad failure
214  * as we basically stall a whole queue forever!!
215  * Returns 1 on error, 0 on success
216  */
217 #define HEAP_FATHER(x)		(((x) - 1) / 2)
218 #define HEAP_LEFT(x)		(2*(x) + 1)
219 #define HEAP_IS_LEFT(x)		((x) & 1)
220 #define HEAP_RIGHT(x)		(2*(x) + 2)
221 #define HEAP_SWAP(a, b, buffer)	{ buffer = a; a = b; b = buffer; }
222 #define HEAP_INCREMENT		15
223 
224 static int
225 heap_init(struct dn_heap *h, int new_size)
226 {
227     struct dn_heap_entry *p;
228 
229     if (h->size >= new_size) {
230 	kprintf("%s, Bogus call, have %d want %d\n", __func__,
231 		h->size, new_size);
232 	return 0;
233     }
234 
235     new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
236     p = kmalloc(new_size * sizeof(*p), M_DUMMYNET, M_WAITOK | M_ZERO);
237     if (h->size > 0) {
238 	bcopy(h->p, p, h->size * sizeof(*p));
239 	kfree(h->p, M_DUMMYNET);
240     }
241     h->p = p;
242     h->size = new_size;
243     return 0;
244 }
245 
246 /*
247  * Insert element in heap. Normally, p != NULL, we insert p in
248  * a new position and bubble up.  If p == NULL, then the element is
249  * already in place, and key is the position where to start the
250  * bubble-up.
251  * Returns 1 on failure (cannot allocate new heap entry)
252  *
253  * If offset > 0 the position (index, int) of the element in the heap is
254  * also stored in the element itself at the given offset in bytes.
255  */
256 #define SET_OFFSET(heap, node) \
257     if (heap->offset > 0) \
258 	*((int *)((char *)(heap->p[node].object) + heap->offset)) = node;
259 
260 /*
261  * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
262  */
263 #define RESET_OFFSET(heap, node) \
264     if (heap->offset > 0) \
265 	*((int *)((char *)(heap->p[node].object) + heap->offset)) = -1;
266 
267 static int
268 heap_insert(struct dn_heap *h, dn_key key1, void *p)
269 {
270     int son;
271 
272     if (p == NULL) {	/* Data already there, set starting point */
273 	son = key1;
274     } else {		/* Insert new element at the end, possibly resize */
275 	son = h->elements;
276 	if (son == h->size) { /* Need resize... */
277 	    if (heap_init(h, h->elements + 1))
278 		return 1; /* Failure... */
279 	}
280 	h->p[son].object = p;
281 	h->p[son].key = key1;
282 	h->elements++;
283     }
284 
285     while (son > 0) {	/* Bubble up */
286 	int father = HEAP_FATHER(son);
287 	struct dn_heap_entry tmp;
288 
289 	if (DN_KEY_LT(h->p[father].key, h->p[son].key))
290 	    break; /* Found right position */
291 
292 	/* 'son' smaller than 'father', swap and repeat */
293 	HEAP_SWAP(h->p[son], h->p[father], tmp);
294 	SET_OFFSET(h, son);
295 	son = father;
296     }
297     SET_OFFSET(h, son);
298     return 0;
299 }
300 
301 /*
302  * Remove top element from heap, or obj if obj != NULL
303  */
304 static void
305 heap_extract(struct dn_heap *h, void *obj)
306 {
307     int child, father, max = h->elements - 1;
308 
309     if (max < 0) {
310 	kprintf("warning, extract from empty heap 0x%p\n", h);
311 	return;
312     }
313 
314     father = 0; /* Default: move up smallest child */
315     if (obj != NULL) { /* Extract specific element, index is at offset */
316 	if (h->offset <= 0)
317 	    panic("%s from middle not supported on this heap!!!", __func__);
318 
319 	father = *((int *)((char *)obj + h->offset));
320 	if (father < 0 || father >= h->elements) {
321 	    panic("%s father %d out of bound 0..%d", __func__,
322 	    	  father, h->elements);
323 	}
324     }
325     RESET_OFFSET(h, father);
326 
327     child = HEAP_LEFT(father);		/* Left child */
328     while (child <= max) {		/* Valid entry */
329 	if (child != max && DN_KEY_LT(h->p[child + 1].key, h->p[child].key))
330 	    child = child + 1;		/* Take right child, otherwise left */
331 	h->p[father] = h->p[child];
332 	SET_OFFSET(h, father);
333 	father = child;
334 	child = HEAP_LEFT(child);	/* Left child for next loop */
335     }
336     h->elements--;
337     if (father != max) {
338 	/*
339 	 * Fill hole with last entry and bubble up, reusing the insert code
340 	 */
341 	h->p[father] = h->p[max];
342 	heap_insert(h, father, NULL);	/* This one cannot fail */
343     }
344 }
345 
346 /*
347  * heapify() will reorganize data inside an array to maintain the
348  * heap property.  It is needed when we delete a bunch of entries.
349  */
350 static void
351 heapify(struct dn_heap *h)
352 {
353     int i;
354 
355     for (i = 0; i < h->elements; i++)
356 	heap_insert(h, i , NULL);
357 }
358 
359 /*
360  * Cleanup the heap and free data structure
361  */
362 static void
363 heap_free(struct dn_heap *h)
364 {
365     if (h->size > 0)
366 	kfree(h->p, M_DUMMYNET);
367     bzero(h, sizeof(*h));
368 }
369 
370 /*
371  * --- End of heap management functions ---
372  */
373 
374 /*
375  * Scheduler functions:
376  *
377  * transmit_event() is called when the delay-line needs to enter
378  * the scheduler, either because of existing pkts getting ready,
379  * or new packets entering the queue.  The event handled is the delivery
380  * time of the packet.
381  *
382  * ready_event() does something similar with fixed-rate queues, and the
383  * event handled is the finish time of the head pkt.
384  *
385  * ready_event_wfq() does something similar with WF2Q queues, and the
386  * event handled is the start time of the head pkt.
387  *
388  * In all cases, we make sure that the data structures are consistent
389  * before passing pkts out, because this might trigger recursive
390  * invocations of the procedures.
391  */
392 static void
393 transmit_event(struct dn_pipe *pipe)
394 {
395     struct dn_pkt *pkt;
396 
397     while ((pkt = TAILQ_FIRST(&pipe->p_queue)) &&
398     	   DN_KEY_LEQ(pkt->output_time, curr_time)) {
399 	TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
400 	ip_dn_packet_redispatch(pkt);
401     }
402 
403     /*
404      * If there are leftover packets, put into the heap for next event
405      */
406     if ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
407 	/*
408 	 * XXX should check errors on heap_insert, by draining the
409 	 * whole pipe and hoping in the future we are more successful
410 	 */
411 	heap_insert(&extract_heap, pkt->output_time, pipe);
412     }
413 }
414 
415 /*
416  * The following macro computes how many ticks we have to wait
417  * before being able to transmit a packet. The credit is taken from
418  * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
419  */
420 #define SET_TICKS(pkt, q, p)	\
421     (pkt->dn_m->m_pkthdr.len*8*dn_hz - (q)->numbytes + p->bandwidth - 1 ) / \
422 	    p->bandwidth;
423 
424 /*
425  * Extract pkt from queue, compute output time (could be now)
426  * and put into delay line (p_queue)
427  */
428 static void
429 move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q,
430 	 struct dn_pipe *p, int len)
431 {
432     TAILQ_REMOVE(&q->queue, pkt, dn_next);
433     q->len--;
434     q->len_bytes -= len;
435 
436     pkt->output_time = curr_time + p->delay;
437 
438     TAILQ_INSERT_TAIL(&p->p_queue, pkt, dn_next);
439 }
440 
441 /*
442  * ready_event() is invoked every time the queue must enter the
443  * scheduler, either because the first packet arrives, or because
444  * a previously scheduled event fired.
445  * On invokation, drain as many pkts as possible (could be 0) and then
446  * if there are leftover packets reinsert the pkt in the scheduler.
447  */
448 static void
449 ready_event(struct dn_flow_queue *q)
450 {
451     struct dn_pkt *pkt;
452     struct dn_pipe *p = q->fs->pipe;
453     int p_was_empty;
454 
455     if (p == NULL) {
456 	kprintf("ready_event- pipe is gone\n");
457 	return;
458     }
459     p_was_empty = TAILQ_EMPTY(&p->p_queue);
460 
461     /*
462      * Schedule fixed-rate queues linked to this pipe:
463      * Account for the bw accumulated since last scheduling, then
464      * drain as many pkts as allowed by q->numbytes and move to
465      * the delay line (in p) computing output time.
466      * bandwidth==0 (no limit) means we can drain the whole queue,
467      * setting len_scaled = 0 does the job.
468      */
469     q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
470     while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
471 	int len = pkt->dn_m->m_pkthdr.len;
472 	int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
473 
474 	if (len_scaled > q->numbytes)
475 	    break;
476 	q->numbytes -= len_scaled;
477 	move_pkt(pkt, q, p, len);
478     }
479 
480     /*
481      * If we have more packets queued, schedule next ready event
482      * (can only occur when bandwidth != 0, otherwise we would have
483      * flushed the whole queue in the previous loop).
484      * To this purpose we record the current time and compute how many
485      * ticks to go for the finish time of the packet.
486      */
487     if ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
488     	/* This implies bandwidth != 0 */
489 	dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
490 
491 	q->sched_time = curr_time;
492 
493 	/*
494 	 * XXX should check errors on heap_insert, and drain the whole
495 	 * queue on error hoping next time we are luckier.
496 	 */
497 	heap_insert(&ready_heap, curr_time + t, q);
498     } else {	/* RED needs to know when the queue becomes empty */
499 	q->q_time = curr_time;
500 	q->numbytes = 0;
501     }
502 
503     /*
504      * If the delay line was empty call transmit_event(p) now.
505      * Otherwise, the scheduler will take care of it.
506      */
507     if (p_was_empty)
508 	transmit_event(p);
509 }
510 
511 /*
512  * Called when we can transmit packets on WF2Q queues.  Take pkts out of
513  * the queues at their start time, and enqueue into the delay line.
514  * Packets are drained until p->numbytes < 0.  As long as
515  * len_scaled >= p->numbytes, the packet goes into the delay line
516  * with a deadline p->delay.  For the last packet, if p->numbytes < 0,
517  * there is an additional delay.
518  */
519 static void
520 ready_event_wfq(struct dn_pipe *p)
521 {
522     int p_was_empty = TAILQ_EMPTY(&p->p_queue);
523     struct dn_heap *sch = &p->scheduler_heap;
524     struct dn_heap *neh = &p->not_eligible_heap;
525 
526     p->numbytes += (curr_time - p->sched_time) * p->bandwidth;
527 
528     /*
529      * While we have backlogged traffic AND credit, we need to do
530      * something on the queue.
531      */
532     while (p->numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
533 	if (sch->elements > 0) { /* Have some eligible pkts to send out */
534 	    struct dn_flow_queue *q = sch->p[0].object;
535 	    struct dn_pkt *pkt = TAILQ_FIRST(&q->queue);
536 	    struct dn_flow_set *fs = q->fs;
537 	    uint64_t len = pkt->dn_m->m_pkthdr.len;
538 	    int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
539 
540 	    heap_extract(sch, NULL);	/* Remove queue from heap */
541 	    p->numbytes -= len_scaled;
542 	    move_pkt(pkt, q, p, len);
543 
544 	    p->V += (len << MY_M) / p->sum;	/* Update V */
545 	    q->S = q->F;			/* Update start time */
546 
547 	    if (q->len == 0) {	/* Flow not backlogged any more */
548 		fs->backlogged--;
549 		heap_insert(&p->idle_heap, q->F, q);
550 	    } else {		/* Still backlogged */
551 		/*
552 		 * Update F and position in backlogged queue, then
553 		 * put flow in not_eligible_heap (we will fix this later).
554 		 */
555 		len = TAILQ_FIRST(&q->queue)->dn_m->m_pkthdr.len;
556 		q->F += (len << MY_M) / (uint64_t)fs->weight;
557 		if (DN_KEY_LEQ(q->S, p->V))
558 		    heap_insert(neh, q->S, q);
559 		else
560 		    heap_insert(sch, q->F, q);
561 	    }
562 	}
563 
564 	/*
565 	 * Now compute V = max(V, min(S_i)).  Remember that all elements in
566 	 * sch have by definition S_i <= V so if sch is not empty, V is surely
567 	 * the max and we must not update it.  Conversely, if sch is empty
568 	 * we only need to look at neh.
569 	 */
570 	if (sch->elements == 0 && neh->elements > 0)
571 	    p->V = MAX64(p->V, neh->p[0].key);
572 
573 	/*
574 	 * Move from neh to sch any packets that have become eligible
575 	 */
576 	while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
577 	    struct dn_flow_queue *q = neh->p[0].object;
578 
579 	    heap_extract(neh, NULL);
580 	    heap_insert(sch, q->F, q);
581 	}
582     }
583 
584     if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0 &&
585     	p->idle_heap.elements > 0) {
586 	/*
587 	 * No traffic and no events scheduled.  We can get rid of idle-heap.
588 	 */
589 	int i;
590 
591 	for (i = 0; i < p->idle_heap.elements; i++) {
592 	    struct dn_flow_queue *q = p->idle_heap.p[i].object;
593 
594 	    q->F = 0;
595 	    q->S = q->F + 1;
596 	}
597 	p->sum = 0;
598 	p->V = 0;
599 	p->idle_heap.elements = 0;
600     }
601 
602     /*
603      * If we are getting clocks from dummynet and if we are under credit,
604      * schedule the next ready event.
605      * Also fix the delivery time of the last packet.
606      */
607     if (p->numbytes < 0) { /* This implies bandwidth>0 */
608 	dn_key t = 0; /* Number of ticks i have to wait */
609 
610 	if (p->bandwidth > 0)
611 	    t = (p->bandwidth - 1 - p->numbytes) / p->bandwidth;
612 	TAILQ_LAST(&p->p_queue, dn_pkt_queue)->output_time += t;
613 	p->sched_time = curr_time;
614 
615 	/*
616 	 * XXX should check errors on heap_insert, and drain the whole
617 	 * queue on error hoping next time we are luckier.
618 	 */
619 	heap_insert(&wfq_ready_heap, curr_time + t, p);
620     }
621 
622     /*
623      * If the delay line was empty call transmit_event(p) now.
624      * Otherwise, the scheduler will take care of it.
625      */
626     if (p_was_empty)
627 	transmit_event(p);
628 }
629 
630 static void
631 dn_expire_pipe_cb(struct dn_pipe *pipe, void *dummy __unused)
632 {
633     if (pipe->idle_heap.elements > 0 &&
634 	DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
635 	struct dn_flow_queue *q = pipe->idle_heap.p[0].object;
636 
637 	heap_extract(&pipe->idle_heap, NULL);
638 	q->S = q->F + 1; /* Mark timestamp as invalid */
639 	pipe->sum -= q->fs->weight;
640     }
641 }
642 
643 /*
644  * This is called once per tick, or dn_hz times per second.  It is used to
645  * increment the current tick counter and schedule expired events.
646  */
647 static void
648 dummynet(netmsg_t msg)
649 {
650     void *p;
651     struct dn_heap *h;
652     struct dn_heap *heaps[3];
653     int i;
654 
655     heaps[0] = &ready_heap;		/* Fixed-rate queues */
656     heaps[1] = &wfq_ready_heap;		/* WF2Q queues */
657     heaps[2] = &extract_heap;		/* Delay line */
658 
659     /* Reply ASAP */
660     crit_enter();
661     lwkt_replymsg(&msg->lmsg, 0);
662     crit_exit();
663 
664     curr_time++;
665     for (i = 0; i < 3; i++) {
666 	h = heaps[i];
667 	while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
668 	    if (h->p[0].key > curr_time) {
669 		kprintf("-- dummynet: warning, heap %d is %d ticks late\n",
670 		    i, (int)(curr_time - h->p[0].key));
671 	    }
672 
673 	    p = h->p[0].object;		/* Store a copy before heap_extract */
674 	    heap_extract(h, NULL);	/* Need to extract before processing */
675 
676 	    if (i == 0)
677 		ready_event(p);
678 	    else if (i == 1)
679 		ready_event_wfq(p);
680 	    else
681 		transmit_event(p);
682 	}
683     }
684 
685     /* Sweep pipes trying to expire idle flow_queues */
686     dn_iterate_pipe(dn_expire_pipe_cb, NULL);
687 }
688 
689 /*
690  * Unconditionally expire empty queues in case of shortage.
691  * Returns the number of queues freed.
692  */
693 static int
694 expire_queues(struct dn_flow_set *fs)
695 {
696     int i, initial_elements = fs->rq_elements;
697 
698     if (fs->last_expired == time_uptime)
699 	return 0;
700 
701     fs->last_expired = time_uptime;
702 
703     for (i = 0; i <= fs->rq_size; i++) { /* Last one is overflow */
704 	struct dn_flow_queue *q, *qn;
705 
706 	LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
707 	    if (!TAILQ_EMPTY(&q->queue) || q->S != q->F + 1)
708 		continue;
709 
710  	    /*
711 	     * Entry is idle, expire it
712 	     */
713 	    LIST_REMOVE(q, q_link);
714 	    kfree(q, M_DUMMYNET);
715 
716 	    KASSERT(fs->rq_elements > 0,
717 		    ("invalid rq_elements %d", fs->rq_elements));
718 	    fs->rq_elements--;
719 	}
720     }
721     return initial_elements - fs->rq_elements;
722 }
723 
724 /*
725  * If room, create a new queue and put at head of slot i;
726  * otherwise, create or use the default queue.
727  */
728 static struct dn_flow_queue *
729 create_queue(struct dn_flow_set *fs, int i)
730 {
731     struct dn_flow_queue *q;
732 
733     if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
734 	expire_queues(fs) == 0) {
735 	/*
736 	 * No way to get room, use or create overflow queue.
737 	 */
738 	i = fs->rq_size;
739 	if (!LIST_EMPTY(&fs->rq[i]))
740 	    return LIST_FIRST(&fs->rq[i]);
741     }
742 
743     q = kmalloc(sizeof(*q), M_DUMMYNET, M_INTWAIT | M_NULLOK | M_ZERO);
744     if (q == NULL)
745 	return NULL;
746 
747     q->fs = fs;
748     q->hash_slot = i;
749     q->S = q->F + 1;   /* hack - mark timestamp as invalid */
750     TAILQ_INIT(&q->queue);
751 
752     LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
753     fs->rq_elements++;
754 
755     return q;
756 }
757 
758 /*
759  * Given a flow_set and a pkt in last_pkt, find a matching queue
760  * after appropriate masking. The queue is moved to front
761  * so that further searches take less time.
762  */
763 static struct dn_flow_queue *
764 find_queue(struct dn_flow_set *fs, struct dn_flow_id *id)
765 {
766     struct dn_flow_queue *q;
767     int i = 0;
768 
769     if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
770 	q = LIST_FIRST(&fs->rq[0]);
771     } else {
772 	struct dn_flow_queue *qn;
773 
774 	/* First, do the masking */
775 	id->fid_dst_ip &= fs->flow_mask.fid_dst_ip;
776 	id->fid_src_ip &= fs->flow_mask.fid_src_ip;
777 	id->fid_dst_port &= fs->flow_mask.fid_dst_port;
778 	id->fid_src_port &= fs->flow_mask.fid_src_port;
779 	id->fid_proto &= fs->flow_mask.fid_proto;
780 	id->fid_flags = 0; /* we don't care about this one */
781 
782 	/* Then, hash function */
783 	i = ((id->fid_dst_ip) & 0xffff) ^
784 	    ((id->fid_dst_ip >> 15) & 0xffff) ^
785 	    ((id->fid_src_ip << 1) & 0xffff) ^
786 	    ((id->fid_src_ip >> 16 ) & 0xffff) ^
787 	    (id->fid_dst_port << 1) ^ (id->fid_src_port) ^
788 	    (id->fid_proto);
789 	i = i % fs->rq_size;
790 
791 	/*
792 	 * Finally, scan the current list for a match and
793 	 * expire idle flow queues
794 	 */
795 	searches++;
796 	LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
797 	    search_steps++;
798 	    if (id->fid_dst_ip == q->id.fid_dst_ip &&
799 		id->fid_src_ip == q->id.fid_src_ip &&
800 		id->fid_dst_port == q->id.fid_dst_port &&
801 		id->fid_src_port == q->id.fid_src_port &&
802 		id->fid_proto == q->id.fid_proto &&
803 		id->fid_flags == q->id.fid_flags) {
804 		break; /* Found */
805 	    } else if (pipe_expire && TAILQ_EMPTY(&q->queue) &&
806 	    	       q->S == q->F + 1) {
807 		/*
808 		 * Entry is idle and not in any heap, expire it
809 		 */
810 		LIST_REMOVE(q, q_link);
811 		kfree(q, M_DUMMYNET);
812 
813 		KASSERT(fs->rq_elements > 0,
814 			("invalid rq_elements %d", fs->rq_elements));
815 		fs->rq_elements--;
816 	    }
817 	}
818 	if (q && LIST_FIRST(&fs->rq[i]) != q) { /* Found and not in front */
819 	    LIST_REMOVE(q, q_link);
820 	    LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
821 	}
822     }
823     if (q == NULL) {	/* No match, need to allocate a new entry */
824 	q = create_queue(fs, i);
825 	if (q != NULL)
826 	    q->id = *id;
827     }
828     return q;
829 }
830 
831 static int
832 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
833 {
834     /*
835      * RED algorithm
836      *
837      * RED calculates the average queue size (avg) using a low-pass filter
838      * with an exponential weighted (w_q) moving average:
839      * 	avg  <-  (1-w_q) * avg + w_q * q_size
840      * where q_size is the queue length (measured in bytes or * packets).
841      *
842      * If q_size == 0, we compute the idle time for the link, and set
843      *	avg = (1 - w_q)^(idle/s)
844      * where s is the time needed for transmitting a medium-sized packet.
845      *
846      * Now, if avg < min_th the packet is enqueued.
847      * If avg > max_th the packet is dropped. Otherwise, the packet is
848      * dropped with probability P function of avg.
849      */
850 
851     int64_t p_b = 0;
852     u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
853 
854     DPRINTF("\n%d q: %2u ", (int)curr_time, q_size);
855 
856     /* Average queue size estimation */
857     if (q_size != 0) {
858 	/*
859 	 * Queue is not empty, avg <- avg + (q_size - avg) * w_q
860 	 */
861 	int diff = SCALE(q_size) - q->avg;
862 	int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
863 
864 	q->avg += (int)v;
865     } else {
866 	/*
867 	 * Queue is empty, find for how long the queue has been
868 	 * empty and use a lookup table for computing
869 	 * (1 - * w_q)^(idle_time/s) where s is the time to send a
870 	 * (small) packet.
871 	 * XXX check wraps...
872 	 */
873 	if (q->avg) {
874 	    u_int t = (curr_time - q->q_time) / fs->lookup_step;
875 
876 	    q->avg = (t < fs->lookup_depth) ?
877 		     SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
878 	}
879     }
880     DPRINTF("avg: %u ", SCALE_VAL(q->avg));
881 
882     /* Should i drop? */
883 
884     if (q->avg < fs->min_th) {
885 	/* Accept packet */
886 	q->count = -1;
887 	return 0;
888     }
889 
890     if (q->avg >= fs->max_th) { /* Average queue >=  Max threshold */
891 	if (fs->flags_fs & DN_IS_GENTLE_RED) {
892 	    /*
893 	     * According to Gentle-RED, if avg is greater than max_th the
894 	     * packet is dropped with a probability
895 	     *	p_b = c_3 * avg - c_4
896 	     * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
897 	     */
898 	    p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4;
899 	} else {
900 	    q->count = -1;
901 	    kprintf("- drop\n");
902 	    return 1;
903 	}
904     } else if (q->avg > fs->min_th) {
905 	/*
906 	 * We compute p_b using the linear dropping function p_b = c_1 *
907 	 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
908 	 * max_p * min_th / (max_th - min_th)
909 	 */
910 	p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
911     }
912     if (fs->flags_fs & DN_QSIZE_IS_BYTES)
913 	p_b = (p_b * len) / fs->max_pkt_size;
914 
915     if (++q->count == 0) {
916 	q->random = krandom() & 0xffff;
917     } else {
918 	/*
919 	 * q->count counts packets arrived since last drop, so a greater
920 	 * value of q->count means a greater packet drop probability.
921 	 */
922 	if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
923 	    q->count = 0;
924 	    DPRINTF("%s", "- red drop");
925 	    /* After a drop we calculate a new random value */
926 	    q->random = krandom() & 0xffff;
927 	    return 1;    /* Drop */
928 	}
929     }
930     /* End of RED algorithm */
931     return 0; /* Accept */
932 }
933 
934 static void
935 dn_iterate_pipe(dn_pipe_iter_t func, void *arg)
936 {
937     int i;
938 
939     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
940 	struct dn_pipe_head *pipe_hdr = &pipe_table[i];
941 	struct dn_pipe *pipe, *pipe_next;
942 
943 	LIST_FOREACH_MUTABLE(pipe, pipe_hdr, p_link, pipe_next)
944 	    func(pipe, arg);
945     }
946 }
947 
948 static void
949 dn_iterate_flowset(dn_flowset_iter_t func, void *arg)
950 {
951     int i;
952 
953     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
954 	struct dn_flowset_head *fs_hdr = &flowset_table[i];
955 	struct dn_flow_set *fs, *fs_next;
956 
957 	LIST_FOREACH_MUTABLE(fs, fs_hdr, fs_link, fs_next)
958 	    func(fs, arg);
959     }
960 }
961 
962 static struct dn_pipe *
963 dn_find_pipe(int pipe_nr)
964 {
965     struct dn_pipe_head *pipe_hdr;
966     struct dn_pipe *p;
967 
968     pipe_hdr = &pipe_table[DN_NR_HASH(pipe_nr)];
969     LIST_FOREACH(p, pipe_hdr, p_link) {
970 	if (p->pipe_nr == pipe_nr)
971 	    break;
972     }
973     return p;
974 }
975 
976 static struct dn_flow_set *
977 dn_find_flowset(int fs_nr)
978 {
979     struct dn_flowset_head *fs_hdr;
980     struct dn_flow_set *fs;
981 
982     fs_hdr = &flowset_table[DN_NR_HASH(fs_nr)];
983     LIST_FOREACH(fs, fs_hdr, fs_link) {
984 	if (fs->fs_nr == fs_nr)
985 	    break;
986     }
987     return fs;
988 }
989 
990 static struct dn_flow_set *
991 dn_locate_flowset(int pipe_nr, int is_pipe)
992 {
993     struct dn_flow_set *fs = NULL;
994 
995     if (!is_pipe) {
996 	fs = dn_find_flowset(pipe_nr);
997     } else {
998 	struct dn_pipe *p;
999 
1000 	p = dn_find_pipe(pipe_nr);
1001 	if (p != NULL)
1002 	    fs = &p->fs;
1003     }
1004     return fs;
1005 }
1006 
1007 /*
1008  * Dummynet hook for packets.  Below 'pipe' is a pipe or a queue
1009  * depending on whether WF2Q or fixed bw is used.
1010  *
1011  * pipe_nr	pipe or queue the packet is destined for.
1012  * dir		where shall we send the packet after dummynet.
1013  * m		the mbuf with the packet
1014  * fwa->oif	the 'ifp' parameter from the caller.
1015  *		NULL in ip_input, destination interface in ip_output
1016  * fwa->ro	route parameter (only used in ip_output, NULL otherwise)
1017  * fwa->dst	destination address, only used by ip_output
1018  * fwa->rule	matching rule, in case of multiple passes
1019  * fwa->flags	flags from the caller, only used in ip_output
1020  */
1021 static int
1022 dummynet_io(struct mbuf *m)
1023 {
1024     struct dn_pkt *pkt;
1025     struct m_tag *tag;
1026     struct dn_flow_set *fs;
1027     struct dn_pipe *pipe;
1028     uint64_t len = m->m_pkthdr.len;
1029     struct dn_flow_queue *q = NULL;
1030     int is_pipe, pipe_nr;
1031 
1032     tag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1033     pkt = m_tag_data(tag);
1034 
1035     is_pipe = pkt->dn_flags & DN_FLAGS_IS_PIPE;
1036     pipe_nr = pkt->pipe_nr;
1037 
1038     /*
1039      * This is a dummynet rule, so we expect a O_PIPE or O_QUEUE rule
1040      */
1041     fs = dn_locate_flowset(pipe_nr, is_pipe);
1042     if (fs == NULL)
1043 	goto dropit;	/* This queue/pipe does not exist! */
1044 
1045     pipe = fs->pipe;
1046     if (pipe == NULL) { /* Must be a queue, try find a matching pipe */
1047 	pipe = dn_find_pipe(fs->parent_nr);
1048 	if (pipe != NULL) {
1049 	    fs->pipe = pipe;
1050 	} else {
1051 	    kprintf("No pipe %d for queue %d, drop pkt\n",
1052 	    	    fs->parent_nr, fs->fs_nr);
1053 	    goto dropit;
1054 	}
1055     }
1056 
1057     q = find_queue(fs, &pkt->id);
1058     if (q == NULL)
1059 	goto dropit;	/* Cannot allocate queue */
1060 
1061     /*
1062      * Update statistics, then check reasons to drop pkt
1063      */
1064     q->tot_bytes += len;
1065     q->tot_pkts++;
1066 
1067     if (fs->plr && krandom() < fs->plr)
1068 	goto dropit;	/* Random pkt drop */
1069 
1070     if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1071     	if (q->len_bytes > fs->qsize)
1072 	    goto dropit;	/* Queue size overflow */
1073     } else {
1074 	if (q->len >= fs->qsize)
1075 	    goto dropit;	/* Queue count overflow */
1076     }
1077 
1078     if ((fs->flags_fs & DN_IS_RED) && red_drops(fs, q, len))
1079 	goto dropit;
1080 
1081     TAILQ_INSERT_TAIL(&q->queue, pkt, dn_next);
1082     q->len++;
1083     q->len_bytes += len;
1084 
1085     if (TAILQ_FIRST(&q->queue) != pkt)	/* Flow was not idle, we are done */
1086 	goto done;
1087 
1088     /*
1089      * If we reach this point the flow was previously idle, so we need
1090      * to schedule it.  This involves different actions for fixed-rate
1091      * or WF2Q queues.
1092      */
1093     if (is_pipe) {
1094 	/*
1095 	 * Fixed-rate queue: just insert into the ready_heap.
1096 	 */
1097 	dn_key t = 0;
1098 
1099 	if (pipe->bandwidth)
1100 	    t = SET_TICKS(pkt, q, pipe);
1101 
1102 	q->sched_time = curr_time;
1103 	if (t == 0)	/* Must process it now */
1104 	    ready_event(q);
1105 	else
1106 	    heap_insert(&ready_heap, curr_time + t, q);
1107     } else {
1108 	/*
1109 	 * WF2Q:
1110 	 * First, compute start time S: if the flow was idle (S=F+1)
1111 	 * set S to the virtual time V for the controlling pipe, and update
1112 	 * the sum of weights for the pipe; otherwise, remove flow from
1113 	 * idle_heap and set S to max(F, V).
1114 	 * Second, compute finish time F = S + len/weight.
1115 	 * Third, if pipe was idle, update V = max(S, V).
1116 	 * Fourth, count one more backlogged flow.
1117 	 */
1118 	if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid */
1119 	    q->S = pipe->V;
1120 	    pipe->sum += fs->weight; /* Add weight of new queue */
1121 	} else {
1122 	    heap_extract(&pipe->idle_heap, q);
1123 	    q->S = MAX64(q->F, pipe->V);
1124 	}
1125 	q->F = q->S + (len << MY_M) / (uint64_t)fs->weight;
1126 
1127 	if (pipe->not_eligible_heap.elements == 0 &&
1128 	    pipe->scheduler_heap.elements == 0)
1129 	    pipe->V = MAX64(q->S, pipe->V);
1130 
1131 	fs->backlogged++;
1132 
1133 	/*
1134 	 * Look at eligibility.  A flow is not eligibile if S>V (when
1135 	 * this happens, it means that there is some other flow already
1136 	 * scheduled for the same pipe, so the scheduler_heap cannot be
1137 	 * empty).  If the flow is not eligible we just store it in the
1138 	 * not_eligible_heap.  Otherwise, we store in the scheduler_heap
1139 	 * and possibly invoke ready_event_wfq() right now if there is
1140 	 * leftover credit.
1141 	 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1142 	 * and for all flows in not_eligible_heap (NEH), S_i > V.
1143 	 * So when we need to compute max(V, min(S_i)) forall i in SCH+NEH,
1144 	 * we only need to look into NEH.
1145 	 */
1146 	if (DN_KEY_GT(q->S, pipe->V)) {	/* Not eligible */
1147 	    if (pipe->scheduler_heap.elements == 0)
1148 		kprintf("++ ouch! not eligible but empty scheduler!\n");
1149 	    heap_insert(&pipe->not_eligible_heap, q->S, q);
1150 	} else {
1151 	    heap_insert(&pipe->scheduler_heap, q->F, q);
1152 	    if (pipe->numbytes >= 0) {	/* Pipe is idle */
1153 		if (pipe->scheduler_heap.elements != 1)
1154 		    kprintf("*** OUCH! pipe should have been idle!\n");
1155 		DPRINTF("Waking up pipe %d at %d\n",
1156 			pipe->pipe_nr, (int)(q->F >> MY_M));
1157 		pipe->sched_time = curr_time;
1158 		ready_event_wfq(pipe);
1159 	    }
1160 	}
1161     }
1162 done:
1163     return 0;
1164 
1165 dropit:
1166     if (q)
1167 	q->drops++;
1168     return ENOBUFS;
1169 }
1170 
1171 /*
1172  * Dispose all packets and flow_queues on a flow_set.
1173  * If all=1, also remove red lookup table and other storage,
1174  * including the descriptor itself.
1175  * For the one in dn_pipe MUST also cleanup ready_heap...
1176  */
1177 static void
1178 purge_flow_set(struct dn_flow_set *fs, int all)
1179 {
1180     int i;
1181 #ifdef INVARIANTS
1182     int rq_elements = 0;
1183 #endif
1184 
1185     for (i = 0; i <= fs->rq_size; i++) {
1186 	struct dn_flow_queue *q;
1187 
1188 	while ((q = LIST_FIRST(&fs->rq[i])) != NULL) {
1189 	    struct dn_pkt *pkt;
1190 
1191 	    while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
1192 	    	TAILQ_REMOVE(&q->queue, pkt, dn_next);
1193 	    	ip_dn_packet_free(pkt);
1194 	    }
1195 
1196 	    LIST_REMOVE(q, q_link);
1197 	    kfree(q, M_DUMMYNET);
1198 
1199 #ifdef INVARIANTS
1200 	    rq_elements++;
1201 #endif
1202 	}
1203     }
1204     KASSERT(rq_elements == fs->rq_elements,
1205 	    ("# rq elements mismatch, freed %d, total %d",
1206 	     rq_elements, fs->rq_elements));
1207     fs->rq_elements = 0;
1208 
1209     if (all) {
1210 	/* RED - free lookup table */
1211 	if (fs->w_q_lookup)
1212 	    kfree(fs->w_q_lookup, M_DUMMYNET);
1213 
1214 	if (fs->rq)
1215 	    kfree(fs->rq, M_DUMMYNET);
1216 
1217 	/*
1218 	 * If this fs is not part of a pipe, free it
1219 	 *
1220 	 * fs->pipe == NULL could happen, if 'fs' is a WF2Q and
1221 	 * - No packet belongs to that flow set is delivered by
1222 	 *   dummynet_io(), i.e. parent pipe is not installed yet.
1223 	 * - Parent pipe is deleted.
1224 	 */
1225 	if (fs->pipe == NULL || (fs->pipe && fs != &fs->pipe->fs))
1226 	    kfree(fs, M_DUMMYNET);
1227     }
1228 }
1229 
1230 /*
1231  * Dispose all packets queued on a pipe (not a flow_set).
1232  * Also free all resources associated to a pipe, which is about
1233  * to be deleted.
1234  */
1235 static void
1236 purge_pipe(struct dn_pipe *pipe)
1237 {
1238     struct dn_pkt *pkt;
1239 
1240     purge_flow_set(&pipe->fs, 1);
1241 
1242     while ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
1243 	TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
1244 	ip_dn_packet_free(pkt);
1245     }
1246 
1247     heap_free(&pipe->scheduler_heap);
1248     heap_free(&pipe->not_eligible_heap);
1249     heap_free(&pipe->idle_heap);
1250 }
1251 
1252 /*
1253  * Delete all pipes and heaps returning memory.
1254  */
1255 static void
1256 dummynet_flush(void)
1257 {
1258     struct dn_pipe_head pipe_list;
1259     struct dn_flowset_head fs_list;
1260     struct dn_pipe *p;
1261     struct dn_flow_set *fs;
1262     int i;
1263 
1264     /*
1265      * Prevent future matches...
1266      */
1267     LIST_INIT(&pipe_list);
1268     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1269 	struct dn_pipe_head *pipe_hdr = &pipe_table[i];
1270 
1271     	while ((p = LIST_FIRST(pipe_hdr)) != NULL) {
1272 	    LIST_REMOVE(p, p_link);
1273 	    LIST_INSERT_HEAD(&pipe_list, p, p_link);
1274 	}
1275     }
1276 
1277     LIST_INIT(&fs_list);
1278     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1279 	struct dn_flowset_head *fs_hdr = &flowset_table[i];
1280 
1281 	while ((fs = LIST_FIRST(fs_hdr)) != NULL) {
1282 	    LIST_REMOVE(fs, fs_link);
1283 	    LIST_INSERT_HEAD(&fs_list, fs, fs_link);
1284 	}
1285     }
1286 
1287     /* Free heaps so we don't have unwanted events */
1288     heap_free(&ready_heap);
1289     heap_free(&wfq_ready_heap);
1290     heap_free(&extract_heap);
1291 
1292     /*
1293      * Now purge all queued pkts and delete all pipes
1294      */
1295     /* Scan and purge all flow_sets. */
1296     while ((fs = LIST_FIRST(&fs_list)) != NULL) {
1297 	LIST_REMOVE(fs, fs_link);
1298 	purge_flow_set(fs, 1);
1299     }
1300 
1301     while ((p = LIST_FIRST(&pipe_list)) != NULL) {
1302 	LIST_REMOVE(p, p_link);
1303 	purge_pipe(p);
1304 	kfree(p, M_DUMMYNET);
1305     }
1306 }
1307 
1308 /*
1309  * setup RED parameters
1310  */
1311 static int
1312 config_red(const struct dn_ioc_flowset *ioc_fs, struct dn_flow_set *x)
1313 {
1314     int i;
1315 
1316     x->w_q = ioc_fs->w_q;
1317     x->min_th = SCALE(ioc_fs->min_th);
1318     x->max_th = SCALE(ioc_fs->max_th);
1319     x->max_p = ioc_fs->max_p;
1320 
1321     x->c_1 = ioc_fs->max_p / (ioc_fs->max_th - ioc_fs->min_th);
1322     x->c_2 = SCALE_MUL(x->c_1, SCALE(ioc_fs->min_th));
1323     if (x->flags_fs & DN_IS_GENTLE_RED) {
1324 	x->c_3 = (SCALE(1) - ioc_fs->max_p) / ioc_fs->max_th;
1325 	x->c_4 = (SCALE(1) - 2 * ioc_fs->max_p);
1326     }
1327 
1328     /* If the lookup table already exist, free and create it again */
1329     if (x->w_q_lookup) {
1330 	kfree(x->w_q_lookup, M_DUMMYNET);
1331 	x->w_q_lookup = NULL ;
1332     }
1333 
1334     if (red_lookup_depth == 0) {
1335 	kprintf("net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1336 	kfree(x, M_DUMMYNET);
1337 	return EINVAL;
1338     }
1339     x->lookup_depth = red_lookup_depth;
1340     x->w_q_lookup = kmalloc(x->lookup_depth * sizeof(int),
1341     			    M_DUMMYNET, M_WAITOK);
1342 
1343     /* Fill the lookup table with (1 - w_q)^x */
1344     x->lookup_step = ioc_fs->lookup_step;
1345     x->lookup_weight = ioc_fs->lookup_weight;
1346 
1347     x->w_q_lookup[0] = SCALE(1) - x->w_q;
1348     for (i = 1; i < x->lookup_depth; i++)
1349 	x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1350 
1351     if (red_avg_pkt_size < 1)
1352 	red_avg_pkt_size = 512;
1353     x->avg_pkt_size = red_avg_pkt_size;
1354 
1355     if (red_max_pkt_size < 1)
1356 	red_max_pkt_size = 1500;
1357     x->max_pkt_size = red_max_pkt_size;
1358 
1359     return 0;
1360 }
1361 
1362 static void
1363 alloc_hash(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1364 {
1365     int i, alloc_size;
1366 
1367     if (x->flags_fs & DN_HAVE_FLOW_MASK) {
1368 	int l = ioc_fs->rq_size;
1369 
1370 	/* Allocate some slots */
1371 	if (l == 0)
1372 	    l = dn_hash_size;
1373 
1374 	if (l < DN_MIN_HASH_SIZE)
1375 	    l = DN_MIN_HASH_SIZE;
1376 	else if (l > DN_MAX_HASH_SIZE)
1377 	    l = DN_MAX_HASH_SIZE;
1378 
1379 	x->rq_size = l;
1380     } else {
1381 	/* One is enough for null mask */
1382 	x->rq_size = 1;
1383     }
1384     alloc_size = x->rq_size + 1;
1385 
1386     x->rq = kmalloc(alloc_size * sizeof(struct dn_flowqueue_head),
1387 		    M_DUMMYNET, M_WAITOK | M_ZERO);
1388     x->rq_elements = 0;
1389 
1390     for (i = 0; i < alloc_size; ++i)
1391 	LIST_INIT(&x->rq[i]);
1392 }
1393 
1394 static void
1395 set_flowid_parms(struct dn_flow_id *id, const struct dn_ioc_flowid *ioc_id)
1396 {
1397     id->fid_dst_ip = ioc_id->u.ip.dst_ip;
1398     id->fid_src_ip = ioc_id->u.ip.src_ip;
1399     id->fid_dst_port = ioc_id->u.ip.dst_port;
1400     id->fid_src_port = ioc_id->u.ip.src_port;
1401     id->fid_proto = ioc_id->u.ip.proto;
1402     id->fid_flags = ioc_id->u.ip.flags;
1403 }
1404 
1405 static void
1406 set_fs_parms(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1407 {
1408     x->flags_fs = ioc_fs->flags_fs;
1409     x->qsize = ioc_fs->qsize;
1410     x->plr = ioc_fs->plr;
1411     set_flowid_parms(&x->flow_mask, &ioc_fs->flow_mask);
1412     if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1413 	if (x->qsize > 1024 * 1024)
1414 	    x->qsize = 1024 * 1024;
1415     } else {
1416 	if (x->qsize == 0 || x->qsize > 100)
1417 	    x->qsize = 50;
1418     }
1419 
1420     /* Configuring RED */
1421     if (x->flags_fs & DN_IS_RED)
1422 	config_red(ioc_fs, x);	/* XXX should check errors */
1423 }
1424 
1425 /*
1426  * setup pipe or queue parameters.
1427  */
1428 
1429 static int
1430 config_pipe(struct dn_ioc_pipe *ioc_pipe)
1431 {
1432     struct dn_ioc_flowset *ioc_fs = &ioc_pipe->fs;
1433     int error;
1434 
1435     /*
1436      * The config program passes parameters as follows:
1437      * bw	bits/second (0 means no limits)
1438      * delay	ms (must be translated into ticks)
1439      * qsize	slots or bytes
1440      */
1441     ioc_pipe->delay = (ioc_pipe->delay * dn_hz) / 1000;
1442 
1443     /*
1444      * We need either a pipe number or a flow_set number
1445      */
1446     if (ioc_pipe->pipe_nr == 0 && ioc_fs->fs_nr == 0)
1447 	return EINVAL;
1448     if (ioc_pipe->pipe_nr != 0 && ioc_fs->fs_nr != 0)
1449 	return EINVAL;
1450 
1451     /*
1452      * Validate pipe number
1453      */
1454     if (ioc_pipe->pipe_nr > DN_PIPE_NR_MAX || ioc_pipe->pipe_nr < 0)
1455 	return EINVAL;
1456 
1457     error = EINVAL;
1458     if (ioc_pipe->pipe_nr != 0) {	/* This is a pipe */
1459 	struct dn_pipe *x, *p;
1460 
1461 	/* Locate pipe */
1462 	p = dn_find_pipe(ioc_pipe->pipe_nr);
1463 
1464 	if (p == NULL) {	/* New pipe */
1465 	    x = kmalloc(sizeof(struct dn_pipe), M_DUMMYNET, M_WAITOK | M_ZERO);
1466 	    x->pipe_nr = ioc_pipe->pipe_nr;
1467 	    x->fs.pipe = x;
1468 	    TAILQ_INIT(&x->p_queue);
1469 
1470 	    /*
1471 	     * idle_heap is the only one from which we extract from the middle.
1472 	     */
1473 	    x->idle_heap.size = x->idle_heap.elements = 0;
1474 	    x->idle_heap.offset = __offsetof(struct dn_flow_queue, heap_pos);
1475 	} else {
1476 	    int i;
1477 
1478 	    x = p;
1479 
1480 	    /* Flush accumulated credit for all queues */
1481 	    for (i = 0; i <= x->fs.rq_size; i++) {
1482 		struct dn_flow_queue *q;
1483 
1484 		LIST_FOREACH(q, &x->fs.rq[i], q_link)
1485 		    q->numbytes = 0;
1486 	    }
1487 	}
1488 
1489 	x->bandwidth = ioc_pipe->bandwidth;
1490 	x->numbytes = 0; /* Just in case... */
1491 	x->delay = ioc_pipe->delay;
1492 
1493 	set_fs_parms(&x->fs, ioc_fs);
1494 
1495 	if (x->fs.rq == NULL) {	/* A new pipe */
1496 	    struct dn_pipe_head *pipe_hdr;
1497 
1498 	    alloc_hash(&x->fs, ioc_fs);
1499 
1500 	    pipe_hdr = &pipe_table[DN_NR_HASH(x->pipe_nr)];
1501 	    LIST_INSERT_HEAD(pipe_hdr, x, p_link);
1502 	}
1503     } else {	/* Config flow_set */
1504 	struct dn_flow_set *x, *fs;
1505 
1506 	/* Locate flow_set */
1507 	fs = dn_find_flowset(ioc_fs->fs_nr);
1508 
1509 	if (fs == NULL) {	/* New flow_set */
1510 	    if (ioc_fs->parent_nr == 0)	/* Need link to a pipe */
1511 		goto back;
1512 
1513 	    x = kmalloc(sizeof(struct dn_flow_set), M_DUMMYNET,
1514 	    		M_WAITOK | M_ZERO);
1515 	    x->fs_nr = ioc_fs->fs_nr;
1516 	    x->parent_nr = ioc_fs->parent_nr;
1517 	    x->weight = ioc_fs->weight;
1518 	    if (x->weight == 0)
1519 		x->weight = 1;
1520 	    else if (x->weight > 100)
1521 		x->weight = 100;
1522 	} else {
1523 	    /* Change parent pipe not allowed; must delete and recreate */
1524 	    if (ioc_fs->parent_nr != 0 && fs->parent_nr != ioc_fs->parent_nr)
1525 		goto back;
1526 	    x = fs;
1527 	}
1528 
1529 	set_fs_parms(x, ioc_fs);
1530 
1531 	if (x->rq == NULL) {	/* A new flow_set */
1532 	    struct dn_flowset_head *fs_hdr;
1533 
1534 	    alloc_hash(x, ioc_fs);
1535 
1536 	    fs_hdr = &flowset_table[DN_NR_HASH(x->fs_nr)];
1537 	    LIST_INSERT_HEAD(fs_hdr, x, fs_link);
1538 	}
1539     }
1540     error = 0;
1541 
1542 back:
1543     return error;
1544 }
1545 
1546 /*
1547  * Helper function to remove from a heap queues which are linked to
1548  * a flow_set about to be deleted.
1549  */
1550 static void
1551 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
1552 {
1553     int i = 0, found = 0;
1554 
1555     while (i < h->elements) {
1556 	if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
1557 	    h->elements--;
1558 	    h->p[i] = h->p[h->elements];
1559 	    found++;
1560 	} else {
1561 	    i++;
1562 	}
1563     }
1564     if (found)
1565 	heapify(h);
1566 }
1567 
1568 /*
1569  * helper function to remove a pipe from a heap (can be there at most once)
1570  */
1571 static void
1572 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
1573 {
1574     if (h->elements > 0) {
1575 	int i;
1576 
1577 	for (i = 0; i < h->elements; i++) {
1578 	    if (h->p[i].object == p) { /* found it */
1579 		h->elements--;
1580 		h->p[i] = h->p[h->elements];
1581 		heapify(h);
1582 		break;
1583 	    }
1584 	}
1585     }
1586 }
1587 
1588 static void
1589 dn_unref_pipe_cb(struct dn_flow_set *fs, void *pipe0)
1590 {
1591     struct dn_pipe *pipe = pipe0;
1592 
1593     if (fs->pipe == pipe) {
1594 	kprintf("++ ref to pipe %d from fs %d\n",
1595 		pipe->pipe_nr, fs->fs_nr);
1596 	fs->pipe = NULL;
1597 	purge_flow_set(fs, 0);
1598     }
1599 }
1600 
1601 /*
1602  * Fully delete a pipe or a queue, cleaning up associated info.
1603  */
1604 static int
1605 delete_pipe(const struct dn_ioc_pipe *ioc_pipe)
1606 {
1607     struct dn_pipe *p;
1608     int error;
1609 
1610     if (ioc_pipe->pipe_nr == 0 && ioc_pipe->fs.fs_nr == 0)
1611 	return EINVAL;
1612     if (ioc_pipe->pipe_nr != 0 && ioc_pipe->fs.fs_nr != 0)
1613 	return EINVAL;
1614 
1615     if (ioc_pipe->pipe_nr > DN_NR_HASH_MAX || ioc_pipe->pipe_nr < 0)
1616     	return EINVAL;
1617 
1618     error = EINVAL;
1619     if (ioc_pipe->pipe_nr != 0) {	/* This is an old-style pipe */
1620 	/* Locate pipe */
1621 	p = dn_find_pipe(ioc_pipe->pipe_nr);
1622 	if (p == NULL)
1623 	    goto back; /* Not found */
1624 
1625 	/* Unlink from pipe hash table */
1626 	LIST_REMOVE(p, p_link);
1627 
1628 	/* Remove all references to this pipe from flow_sets */
1629 	dn_iterate_flowset(dn_unref_pipe_cb, p);
1630 
1631 	fs_remove_from_heap(&ready_heap, &p->fs);
1632 	purge_pipe(p);	/* Remove all data associated to this pipe */
1633 
1634 	/* Remove reference to here from extract_heap and wfq_ready_heap */
1635 	pipe_remove_from_heap(&extract_heap, p);
1636 	pipe_remove_from_heap(&wfq_ready_heap, p);
1637 
1638 	kfree(p, M_DUMMYNET);
1639     } else {	/* This is a WF2Q queue (dn_flow_set) */
1640 	struct dn_flow_set *fs;
1641 
1642 	/* Locate flow_set */
1643 	fs = dn_find_flowset(ioc_pipe->fs.fs_nr);
1644 	if (fs == NULL)
1645 	    goto back; /* Not found */
1646 
1647 	LIST_REMOVE(fs, fs_link);
1648 
1649 	if ((p = fs->pipe) != NULL) {
1650 	    /* Update total weight on parent pipe and cleanup parent heaps */
1651 	    p->sum -= fs->weight * fs->backlogged;
1652 	    fs_remove_from_heap(&p->not_eligible_heap, fs);
1653 	    fs_remove_from_heap(&p->scheduler_heap, fs);
1654 #if 1	/* XXX should i remove from idle_heap as well ? */
1655 	    fs_remove_from_heap(&p->idle_heap, fs);
1656 #endif
1657 	}
1658 	purge_flow_set(fs, 1);
1659     }
1660     error = 0;
1661 
1662 back:
1663     return error;
1664 }
1665 
1666 /*
1667  * helper function used to copy data from kernel in DUMMYNET_GET
1668  */
1669 static void
1670 dn_copy_flowid(const struct dn_flow_id *id, struct dn_ioc_flowid *ioc_id)
1671 {
1672     ioc_id->type = ETHERTYPE_IP;
1673     ioc_id->u.ip.dst_ip = id->fid_dst_ip;
1674     ioc_id->u.ip.src_ip = id->fid_src_ip;
1675     ioc_id->u.ip.dst_port = id->fid_dst_port;
1676     ioc_id->u.ip.src_port = id->fid_src_port;
1677     ioc_id->u.ip.proto = id->fid_proto;
1678     ioc_id->u.ip.flags = id->fid_flags;
1679 }
1680 
1681 static void *
1682 dn_copy_flowqueues(const struct dn_flow_set *fs, void *bp)
1683 {
1684     struct dn_ioc_flowqueue *ioc_fq = bp;
1685     int i, copied = 0;
1686 
1687     for (i = 0; i <= fs->rq_size; i++) {
1688 	const struct dn_flow_queue *q;
1689 
1690 	LIST_FOREACH(q, &fs->rq[i], q_link) {
1691 	    if (q->hash_slot != i) {	/* XXX ASSERT */
1692 		kprintf("++ at %d: wrong slot (have %d, "
1693 			"should be %d)\n", copied, q->hash_slot, i);
1694 	    }
1695 	    if (q->fs != fs) {		/* XXX ASSERT */
1696 		kprintf("++ at %d: wrong fs ptr (have %p, should be %p)\n",
1697 			i, q->fs, fs);
1698 	    }
1699 
1700 	    copied++;
1701 
1702 	    ioc_fq->len = q->len;
1703 	    ioc_fq->len_bytes = q->len_bytes;
1704 	    ioc_fq->tot_pkts = q->tot_pkts;
1705 	    ioc_fq->tot_bytes = q->tot_bytes;
1706 	    ioc_fq->drops = q->drops;
1707 	    ioc_fq->hash_slot = q->hash_slot;
1708 	    ioc_fq->S = q->S;
1709 	    ioc_fq->F = q->F;
1710 	    dn_copy_flowid(&q->id, &ioc_fq->id);
1711 
1712 	    ioc_fq++;
1713 	}
1714     }
1715 
1716     if (copied != fs->rq_elements) {	/* XXX ASSERT */
1717 	kprintf("++ wrong count, have %d should be %d\n",
1718 		copied, fs->rq_elements);
1719     }
1720     return ioc_fq;
1721 }
1722 
1723 static void
1724 dn_copy_flowset(const struct dn_flow_set *fs, struct dn_ioc_flowset *ioc_fs,
1725 		u_short fs_type)
1726 {
1727     ioc_fs->fs_type = fs_type;
1728 
1729     ioc_fs->fs_nr = fs->fs_nr;
1730     ioc_fs->flags_fs = fs->flags_fs;
1731     ioc_fs->parent_nr = fs->parent_nr;
1732 
1733     ioc_fs->weight = fs->weight;
1734     ioc_fs->qsize = fs->qsize;
1735     ioc_fs->plr = fs->plr;
1736 
1737     ioc_fs->rq_size = fs->rq_size;
1738     ioc_fs->rq_elements = fs->rq_elements;
1739 
1740     ioc_fs->w_q = fs->w_q;
1741     ioc_fs->max_th = fs->max_th;
1742     ioc_fs->min_th = fs->min_th;
1743     ioc_fs->max_p = fs->max_p;
1744 
1745     dn_copy_flowid(&fs->flow_mask, &ioc_fs->flow_mask);
1746 }
1747 
1748 static void
1749 dn_calc_pipe_size_cb(struct dn_pipe *pipe, void *sz)
1750 {
1751     size_t *size = sz;
1752 
1753     *size += sizeof(struct dn_ioc_pipe) +
1754 	     pipe->fs.rq_elements * sizeof(struct dn_ioc_flowqueue);
1755 }
1756 
1757 static void
1758 dn_calc_fs_size_cb(struct dn_flow_set *fs, void *sz)
1759 {
1760     size_t *size = sz;
1761 
1762     *size += sizeof(struct dn_ioc_flowset) +
1763 	     fs->rq_elements * sizeof(struct dn_ioc_flowqueue);
1764 }
1765 
1766 static void
1767 dn_copyout_pipe_cb(struct dn_pipe *pipe, void *bp0)
1768 {
1769     char **bp = bp0;
1770     struct dn_ioc_pipe *ioc_pipe = (struct dn_ioc_pipe *)(*bp);
1771 
1772     /*
1773      * Copy flow set descriptor associated with this pipe
1774      */
1775     dn_copy_flowset(&pipe->fs, &ioc_pipe->fs, DN_IS_PIPE);
1776 
1777     /*
1778      * Copy pipe descriptor
1779      */
1780     ioc_pipe->bandwidth = pipe->bandwidth;
1781     ioc_pipe->pipe_nr = pipe->pipe_nr;
1782     ioc_pipe->V = pipe->V;
1783     /* Convert delay to milliseconds */
1784     ioc_pipe->delay = (pipe->delay * 1000) / dn_hz;
1785 
1786     /*
1787      * Copy flow queue descriptors
1788      */
1789     *bp += sizeof(*ioc_pipe);
1790     *bp = dn_copy_flowqueues(&pipe->fs, *bp);
1791 }
1792 
1793 static void
1794 dn_copyout_fs_cb(struct dn_flow_set *fs, void *bp0)
1795 {
1796     char **bp = bp0;
1797     struct dn_ioc_flowset *ioc_fs = (struct dn_ioc_flowset *)(*bp);
1798 
1799     /*
1800      * Copy flow set descriptor
1801      */
1802     dn_copy_flowset(fs, ioc_fs, DN_IS_QUEUE);
1803 
1804     /*
1805      * Copy flow queue descriptors
1806      */
1807     *bp += sizeof(*ioc_fs);
1808     *bp = dn_copy_flowqueues(fs, *bp);
1809 }
1810 
1811 static int
1812 dummynet_get(struct dn_sopt *dn_sopt)
1813 {
1814     char *buf, *bp;
1815     size_t size = 0;
1816 
1817     /*
1818      * Compute size of data structures: list of pipes and flow_sets.
1819      */
1820     dn_iterate_pipe(dn_calc_pipe_size_cb, &size);
1821     dn_iterate_flowset(dn_calc_fs_size_cb, &size);
1822 
1823     /*
1824      * Copyout pipe/flow_set/flow_queue
1825      */
1826     bp = buf = kmalloc(size, M_TEMP, M_WAITOK | M_ZERO);
1827     dn_iterate_pipe(dn_copyout_pipe_cb, &bp);
1828     dn_iterate_flowset(dn_copyout_fs_cb, &bp);
1829 
1830     /* Temp memory will be freed by caller */
1831     dn_sopt->dn_sopt_arg = buf;
1832     dn_sopt->dn_sopt_arglen = size;
1833     return 0;
1834 }
1835 
1836 /*
1837  * Handler for the various dummynet socket options (get, flush, config, del)
1838  */
1839 static int
1840 dummynet_ctl(struct dn_sopt *dn_sopt)
1841 {
1842     int error = 0;
1843 
1844     switch (dn_sopt->dn_sopt_name) {
1845     case IP_DUMMYNET_GET:
1846 	error = dummynet_get(dn_sopt);
1847 	break;
1848 
1849     case IP_DUMMYNET_FLUSH:
1850 	dummynet_flush();
1851 	break;
1852 
1853     case IP_DUMMYNET_CONFIGURE:
1854 	KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1855 	error = config_pipe(dn_sopt->dn_sopt_arg);
1856 	break;
1857 
1858     case IP_DUMMYNET_DEL:	/* Remove a pipe or flow_set */
1859 	KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1860 	error = delete_pipe(dn_sopt->dn_sopt_arg);
1861 	break;
1862 
1863     default:
1864 	kprintf("%s -- unknown option %d\n", __func__, dn_sopt->dn_sopt_name);
1865 	error = EINVAL;
1866 	break;
1867     }
1868     return error;
1869 }
1870 
1871 static void
1872 dummynet_clock(systimer_t info __unused, int in_ipi __unused,
1873     struct intrframe *frame __unused)
1874 {
1875     KASSERT(mycpuid == ip_dn_cpu,
1876 	    ("dummynet systimer comes on cpu%d, should be %d!",
1877 	     mycpuid, ip_dn_cpu));
1878 
1879     crit_enter();
1880     if (DUMMYNET_LOADED && (dn_netmsg.lmsg.ms_flags & MSGF_DONE))
1881 	lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), &dn_netmsg.lmsg);
1882     crit_exit();
1883 }
1884 
1885 static int
1886 sysctl_dn_hz(SYSCTL_HANDLER_ARGS)
1887 {
1888     int error, val, origcpu;
1889 
1890     val = dn_hz;
1891     error = sysctl_handle_int(oidp, &val, 0, req);
1892     if (error || req->newptr == NULL)
1893 	return error;
1894     if (val <= 0)
1895 	return EINVAL;
1896     else if (val > DN_CALLOUT_FREQ_MAX)
1897 	val = DN_CALLOUT_FREQ_MAX;
1898 
1899     origcpu = mycpuid;
1900     lwkt_migratecpu(ip_dn_cpu);
1901 
1902     crit_enter();
1903     dn_hz = val;
1904     systimer_adjust_periodic(&dn_clock, val);
1905     crit_exit();
1906 
1907     lwkt_migratecpu(origcpu);
1908 
1909     return 0;
1910 }
1911 
1912 static void
1913 ip_dn_init_dispatch(netmsg_t msg)
1914 {
1915     int i, error = 0;
1916 
1917     KASSERT(mycpuid == ip_dn_cpu,
1918     	    ("%s runs on cpu%d, instead of cpu%d", __func__,
1919 	     mycpuid, ip_dn_cpu));
1920 
1921     crit_enter();
1922 
1923     if (DUMMYNET_LOADED) {
1924 	kprintf("DUMMYNET already loaded\n");
1925 	error = EEXIST;
1926 	goto back;
1927     }
1928 
1929     kprintf("DUMMYNET initialized (011031)\n");
1930 
1931     for (i = 0; i < DN_NR_HASH_MAX; ++i)
1932     	LIST_INIT(&pipe_table[i]);
1933 
1934     for (i = 0; i < DN_NR_HASH_MAX; ++i)
1935 	LIST_INIT(&flowset_table[i]);
1936 
1937     ready_heap.size = ready_heap.elements = 0;
1938     ready_heap.offset = 0;
1939 
1940     wfq_ready_heap.size = wfq_ready_heap.elements = 0;
1941     wfq_ready_heap.offset = 0;
1942 
1943     extract_heap.size = extract_heap.elements = 0;
1944     extract_heap.offset = 0;
1945 
1946     ip_dn_ctl_ptr = dummynet_ctl;
1947     ip_dn_io_ptr = dummynet_io;
1948 
1949     netmsg_init(&dn_netmsg, NULL, &netisr_adone_rport,
1950 		0, dummynet);
1951     systimer_init_periodic_nq(&dn_clock, dummynet_clock, NULL, dn_hz);
1952 
1953 back:
1954     crit_exit();
1955     lwkt_replymsg(&msg->lmsg, error);
1956 }
1957 
1958 static int
1959 ip_dn_init(void)
1960 {
1961     struct netmsg_base smsg;
1962 
1963     if (ip_dn_cpu >= ncpus) {
1964 	kprintf("%s: CPU%d does not exist, switch to CPU0\n",
1965 		__func__, ip_dn_cpu);
1966 	ip_dn_cpu = 0;
1967     }
1968 
1969     netmsg_init(&smsg, NULL, &curthread->td_msgport,
1970 		0, ip_dn_init_dispatch);
1971     lwkt_domsg(netisr_cpuport(ip_dn_cpu), &smsg.lmsg, 0);
1972     return smsg.lmsg.ms_error;
1973 }
1974 
1975 #ifdef KLD_MODULE
1976 
1977 static void
1978 ip_dn_stop_dispatch(netmsg_t msg)
1979 {
1980     crit_enter();
1981 
1982     dummynet_flush();
1983 
1984     ip_dn_ctl_ptr = NULL;
1985     ip_dn_io_ptr = NULL;
1986 
1987     systimer_del(&dn_clock);
1988 
1989     crit_exit();
1990     lwkt_replymsg(&msg->lmsg, 0);
1991 }
1992 
1993 
1994 static void
1995 ip_dn_stop(void)
1996 {
1997     struct netmsg_base smsg;
1998 
1999     netmsg_init(&smsg, NULL, &curthread->td_msgport,
2000 		0, ip_dn_stop_dispatch);
2001     lwkt_domsg(netisr_cpuport(ip_dn_cpu), &smsg.lmsg, 0);
2002 
2003     netmsg_service_sync();
2004 }
2005 
2006 #endif	/* KLD_MODULE */
2007 
2008 static int
2009 dummynet_modevent(module_t mod, int type, void *data)
2010 {
2011     switch (type) {
2012     case MOD_LOAD:
2013 	return ip_dn_init();
2014 
2015     case MOD_UNLOAD:
2016 #ifndef KLD_MODULE
2017 	kprintf("dummynet statically compiled, cannot unload\n");
2018 	return EINVAL;
2019 #else
2020 	ip_dn_stop();
2021 #endif
2022 	break;
2023 
2024     default:
2025 	break;
2026     }
2027     return 0;
2028 }
2029 
2030 static moduledata_t dummynet_mod = {
2031     "dummynet",
2032     dummynet_modevent,
2033     NULL
2034 };
2035 DECLARE_MODULE(dummynet, dummynet_mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2036 MODULE_VERSION(dummynet, 1);
2037