xref: /dragonfly/sys/net/dummynet/ip_dummynet.c (revision 1465342b)
1 /*
2  * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3  * Portions Copyright (c) 2000 Akamba Corp.
4  * All rights reserved
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.24.2.22 2003/05/13 09:31:06 maxim Exp $
28  * $DragonFly: src/sys/net/dummynet/ip_dummynet.c,v 1.53 2007/12/08 04:33:58 sephe Exp $
29  */
30 
31 #ifdef DUMMYNET_DEBUG
32 #define DPRINTF(fmt, ...)	kprintf(fmt, __VA_ARGS__)
33 #else
34 #define DPRINTF(fmt, ...)	((void)0)
35 #endif
36 
37 /*
38  * This module implements IP dummynet, a bandwidth limiter/delay emulator.
39  * Description of the data structures used is in ip_dummynet.h
40  * Here you mainly find the following blocks of code:
41  *  + variable declarations;
42  *  + heap management functions;
43  *  + scheduler and dummynet functions;
44  *  + configuration and initialization.
45  *
46  * Most important Changes:
47  *
48  * 011004: KLDable
49  * 010124: Fixed WF2Q behaviour
50  * 010122: Fixed spl protection.
51  * 000601: WF2Q support
52  * 000106: Large rewrite, use heaps to handle very many pipes.
53  * 980513: Initial release
54  */
55 
56 #include <sys/param.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/socketvar.h>
61 #include <sys/sysctl.h>
62 #include <sys/systimer.h>
63 #include <sys/thread2.h>
64 
65 #include <net/ethernet.h>
66 #include <net/netmsg2.h>
67 #include <net/route.h>
68 
69 #include <netinet/in_var.h>
70 #include <netinet/ip_var.h>
71 
72 #include <net/dummynet/ip_dummynet.h>
73 
74 #ifndef DN_CALLOUT_FREQ_MAX
75 #define DN_CALLOUT_FREQ_MAX	10000
76 #endif
77 
78 /*
79  * The maximum/minimum hash table size for queues.
80  * These values must be a power of 2.
81  */
82 #define DN_MIN_HASH_SIZE	4
83 #define DN_MAX_HASH_SIZE	65536
84 
85 /*
86  * Some macros are used to compare key values and handle wraparounds.
87  * MAX64 returns the largest of two key values.
88  */
89 #define DN_KEY_LT(a, b)		((int64_t)((a) - (b)) < 0)
90 #define DN_KEY_LEQ(a, b)	((int64_t)((a) - (b)) <= 0)
91 #define DN_KEY_GT(a, b)		((int64_t)((a) - (b)) > 0)
92 #define DN_KEY_GEQ(a, b)	((int64_t)((a) - (b)) >= 0)
93 #define MAX64(x, y)		((((int64_t)((y) - (x))) > 0) ? (y) : (x))
94 
95 #define DN_NR_HASH_MAX		16
96 #define DN_NR_HASH_MASK		(DN_NR_HASH_MAX - 1)
97 #define DN_NR_HASH(nr)		\
98 	((((nr) >> 12) ^ ((nr) >> 8) ^ ((nr) >> 4) ^ (nr)) & DN_NR_HASH_MASK)
99 
100 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
101 
102 extern int	ip_dn_cpu;
103 
104 static dn_key	curr_time = 0;		/* current simulation time */
105 static int	dn_hash_size = 64;	/* default hash size */
106 static int	pipe_expire = 1;	/* expire queue if empty */
107 static int	dn_max_ratio = 16;	/* max queues/buckets ratio */
108 
109 /*
110  * Statistics on number of queue searches and search steps
111  */
112 static int	searches;
113 static int	search_steps;
114 
115 /*
116  * RED parameters
117  */
118 static int	red_lookup_depth = 256;	/* default lookup table depth */
119 static int	red_avg_pkt_size = 512;	/* default medium packet size */
120 static int	red_max_pkt_size = 1500;/* default max packet size */
121 
122 /*
123  * Three heaps contain queues and pipes that the scheduler handles:
124  *
125  *  + ready_heap	contains all dn_flow_queue related to fixed-rate pipes.
126  *  + wfq_ready_heap	contains the pipes associated with WF2Q flows
127  *  + extract_heap	contains pipes associated with delay lines.
128  */
129 static struct dn_heap	ready_heap;
130 static struct dn_heap	extract_heap;
131 static struct dn_heap	wfq_ready_heap;
132 
133 static struct dn_pipe_head	pipe_table[DN_NR_HASH_MAX];
134 static struct dn_flowset_head	flowset_table[DN_NR_HASH_MAX];
135 
136 /*
137  * Variables for dummynet systimer
138  */
139 static struct netmsg	dn_netmsg;
140 static struct systimer	dn_clock;
141 static int		dn_hz = 1000;
142 
143 static int	sysctl_dn_hz(SYSCTL_HANDLER_ARGS);
144 
145 SYSCTL_DECL(_net_inet_ip_dummynet);
146 
147 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLFLAG_RW,
148 	   &dn_hash_size, 0, "Default hash table size");
149 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, curr_time, CTLFLAG_RD,
150 	   &curr_time, 0, "Current tick");
151 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW,
152 	   &pipe_expire, 0, "Expire queue if empty");
153 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, CTLFLAG_RW,
154 	   &dn_max_ratio, 0, "Max ratio between dynamic queues and buckets");
155 
156 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap, CTLFLAG_RD,
157 	   &ready_heap.size, 0, "Size of ready heap");
158 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap, CTLFLAG_RD,
159 	   &extract_heap.size, 0, "Size of extract heap");
160 
161 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches, CTLFLAG_RD,
162 	   &searches, 0, "Number of queue searches");
163 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, CTLFLAG_RD,
164 	   &search_steps, 0, "Number of queue search steps");
165 
166 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD,
167 	   &red_lookup_depth, 0, "Depth of RED lookup table");
168 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD,
169 	   &red_avg_pkt_size, 0, "RED Medium packet size");
170 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD,
171 	   &red_max_pkt_size, 0, "RED Max packet size");
172 
173 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hz, CTLTYPE_INT | CTLFLAG_RW,
174 	    0, 0, sysctl_dn_hz, "I", "Dummynet callout frequency");
175 
176 static int	heap_init(struct dn_heap *, int);
177 static int	heap_insert(struct dn_heap *, dn_key, void *);
178 static void	heap_extract(struct dn_heap *, void *);
179 
180 static void	transmit_event(struct dn_pipe *);
181 static void	ready_event(struct dn_flow_queue *);
182 static void	ready_event_wfq(struct dn_pipe *);
183 
184 static int	config_pipe(struct dn_ioc_pipe *);
185 static void	dummynet_flush(void);
186 
187 static void	dummynet_clock(systimer_t, struct intrframe *);
188 static void	dummynet(struct netmsg *);
189 
190 static struct dn_pipe *dn_find_pipe(int);
191 static struct dn_flow_set *dn_locate_flowset(int, int);
192 
193 typedef void	(*dn_pipe_iter_t)(struct dn_pipe *, void *);
194 static void	dn_iterate_pipe(dn_pipe_iter_t, void *);
195 
196 typedef void	(*dn_flowset_iter_t)(struct dn_flow_set *, void *);
197 static void	dn_iterate_flowset(dn_flowset_iter_t, void *);
198 
199 static ip_dn_io_t	dummynet_io;
200 static ip_dn_ctl_t	dummynet_ctl;
201 
202 /*
203  * Heap management functions.
204  *
205  * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
206  * Some macros help finding parent/children so we can optimize them.
207  *
208  * heap_init() is called to expand the heap when needed.
209  * Increment size in blocks of 16 entries.
210  * XXX failure to allocate a new element is a pretty bad failure
211  * as we basically stall a whole queue forever!!
212  * Returns 1 on error, 0 on success
213  */
214 #define HEAP_FATHER(x)		(((x) - 1) / 2)
215 #define HEAP_LEFT(x)		(2*(x) + 1)
216 #define HEAP_IS_LEFT(x)		((x) & 1)
217 #define HEAP_RIGHT(x)		(2*(x) + 2)
218 #define HEAP_SWAP(a, b, buffer)	{ buffer = a; a = b; b = buffer; }
219 #define HEAP_INCREMENT		15
220 
221 static int
222 heap_init(struct dn_heap *h, int new_size)
223 {
224     struct dn_heap_entry *p;
225 
226     if (h->size >= new_size) {
227 	kprintf("%s, Bogus call, have %d want %d\n", __func__,
228 		h->size, new_size);
229 	return 0;
230     }
231 
232     new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
233     p = kmalloc(new_size * sizeof(*p), M_DUMMYNET, M_WAITOK | M_ZERO);
234     if (h->size > 0) {
235 	bcopy(h->p, p, h->size * sizeof(*p));
236 	kfree(h->p, M_DUMMYNET);
237     }
238     h->p = p;
239     h->size = new_size;
240     return 0;
241 }
242 
243 /*
244  * Insert element in heap. Normally, p != NULL, we insert p in
245  * a new position and bubble up.  If p == NULL, then the element is
246  * already in place, and key is the position where to start the
247  * bubble-up.
248  * Returns 1 on failure (cannot allocate new heap entry)
249  *
250  * If offset > 0 the position (index, int) of the element in the heap is
251  * also stored in the element itself at the given offset in bytes.
252  */
253 #define SET_OFFSET(heap, node) \
254     if (heap->offset > 0) \
255 	*((int *)((char *)(heap->p[node].object) + heap->offset)) = node;
256 
257 /*
258  * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
259  */
260 #define RESET_OFFSET(heap, node) \
261     if (heap->offset > 0) \
262 	*((int *)((char *)(heap->p[node].object) + heap->offset)) = -1;
263 
264 static int
265 heap_insert(struct dn_heap *h, dn_key key1, void *p)
266 {
267     int son = h->elements;
268 
269     if (p == NULL) {	/* Data already there, set starting point */
270 	son = key1;
271     } else {		/* Insert new element at the end, possibly resize */
272 	son = h->elements;
273 	if (son == h->size) { /* Need resize... */
274 	    if (heap_init(h, h->elements + 1))
275 		return 1; /* Failure... */
276 	}
277 	h->p[son].object = p;
278 	h->p[son].key = key1;
279 	h->elements++;
280     }
281 
282     while (son > 0) {	/* Bubble up */
283 	int father = HEAP_FATHER(son);
284 	struct dn_heap_entry tmp;
285 
286 	if (DN_KEY_LT(h->p[father].key, h->p[son].key))
287 	    break; /* Found right position */
288 
289 	/* 'son' smaller than 'father', swap and repeat */
290 	HEAP_SWAP(h->p[son], h->p[father], tmp);
291 	SET_OFFSET(h, son);
292 	son = father;
293     }
294     SET_OFFSET(h, son);
295     return 0;
296 }
297 
298 /*
299  * Remove top element from heap, or obj if obj != NULL
300  */
301 static void
302 heap_extract(struct dn_heap *h, void *obj)
303 {
304     int child, father, max = h->elements - 1;
305 
306     if (max < 0) {
307 	kprintf("warning, extract from empty heap 0x%p\n", h);
308 	return;
309     }
310 
311     father = 0; /* Default: move up smallest child */
312     if (obj != NULL) { /* Extract specific element, index is at offset */
313 	if (h->offset <= 0)
314 	    panic("%s from middle not supported on this heap!!!\n", __func__);
315 
316 	father = *((int *)((char *)obj + h->offset));
317 	if (father < 0 || father >= h->elements) {
318 	    panic("%s father %d out of bound 0..%d\n", __func__,
319 	    	  father, h->elements);
320 	}
321     }
322     RESET_OFFSET(h, father);
323 
324     child = HEAP_LEFT(father);		/* Left child */
325     while (child <= max) {		/* Valid entry */
326 	if (child != max && DN_KEY_LT(h->p[child + 1].key, h->p[child].key))
327 	    child = child + 1;		/* Take right child, otherwise left */
328 	h->p[father] = h->p[child];
329 	SET_OFFSET(h, father);
330 	father = child;
331 	child = HEAP_LEFT(child);	/* Left child for next loop */
332     }
333     h->elements--;
334     if (father != max) {
335 	/*
336 	 * Fill hole with last entry and bubble up, reusing the insert code
337 	 */
338 	h->p[father] = h->p[max];
339 	heap_insert(h, father, NULL);	/* This one cannot fail */
340     }
341 }
342 
343 /*
344  * heapify() will reorganize data inside an array to maintain the
345  * heap property.  It is needed when we delete a bunch of entries.
346  */
347 static void
348 heapify(struct dn_heap *h)
349 {
350     int i;
351 
352     for (i = 0; i < h->elements; i++)
353 	heap_insert(h, i , NULL);
354 }
355 
356 /*
357  * Cleanup the heap and free data structure
358  */
359 static void
360 heap_free(struct dn_heap *h)
361 {
362     if (h->size > 0)
363 	kfree(h->p, M_DUMMYNET);
364     bzero(h, sizeof(*h));
365 }
366 
367 /*
368  * --- End of heap management functions ---
369  */
370 
371 /*
372  * Scheduler functions:
373  *
374  * transmit_event() is called when the delay-line needs to enter
375  * the scheduler, either because of existing pkts getting ready,
376  * or new packets entering the queue.  The event handled is the delivery
377  * time of the packet.
378  *
379  * ready_event() does something similar with fixed-rate queues, and the
380  * event handled is the finish time of the head pkt.
381  *
382  * ready_event_wfq() does something similar with WF2Q queues, and the
383  * event handled is the start time of the head pkt.
384  *
385  * In all cases, we make sure that the data structures are consistent
386  * before passing pkts out, because this might trigger recursive
387  * invocations of the procedures.
388  */
389 static void
390 transmit_event(struct dn_pipe *pipe)
391 {
392     struct dn_pkt *pkt;
393 
394     while ((pkt = TAILQ_FIRST(&pipe->p_queue)) &&
395     	   DN_KEY_LEQ(pkt->output_time, curr_time)) {
396 	TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
397 	ip_dn_packet_redispatch(pkt);
398     }
399 
400     /*
401      * If there are leftover packets, put into the heap for next event
402      */
403     if ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
404 	/*
405 	 * XXX should check errors on heap_insert, by draining the
406 	 * whole pipe and hoping in the future we are more successful
407 	 */
408 	heap_insert(&extract_heap, pkt->output_time, pipe);
409     }
410 }
411 
412 /*
413  * The following macro computes how many ticks we have to wait
414  * before being able to transmit a packet. The credit is taken from
415  * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
416  */
417 #define SET_TICKS(pkt, q, p)	\
418     (pkt->dn_m->m_pkthdr.len*8*dn_hz - (q)->numbytes + p->bandwidth - 1 ) / \
419 	    p->bandwidth;
420 
421 /*
422  * Extract pkt from queue, compute output time (could be now)
423  * and put into delay line (p_queue)
424  */
425 static void
426 move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q,
427 	 struct dn_pipe *p, int len)
428 {
429     TAILQ_REMOVE(&q->queue, pkt, dn_next);
430     q->len--;
431     q->len_bytes -= len;
432 
433     pkt->output_time = curr_time + p->delay;
434 
435     TAILQ_INSERT_TAIL(&p->p_queue, pkt, dn_next);
436 }
437 
438 /*
439  * ready_event() is invoked every time the queue must enter the
440  * scheduler, either because the first packet arrives, or because
441  * a previously scheduled event fired.
442  * On invokation, drain as many pkts as possible (could be 0) and then
443  * if there are leftover packets reinsert the pkt in the scheduler.
444  */
445 static void
446 ready_event(struct dn_flow_queue *q)
447 {
448     struct dn_pkt *pkt;
449     struct dn_pipe *p = q->fs->pipe;
450     int p_was_empty;
451 
452     if (p == NULL) {
453 	kprintf("ready_event- pipe is gone\n");
454 	return;
455     }
456     p_was_empty = TAILQ_EMPTY(&p->p_queue);
457 
458     /*
459      * Schedule fixed-rate queues linked to this pipe:
460      * Account for the bw accumulated since last scheduling, then
461      * drain as many pkts as allowed by q->numbytes and move to
462      * the delay line (in p) computing output time.
463      * bandwidth==0 (no limit) means we can drain the whole queue,
464      * setting len_scaled = 0 does the job.
465      */
466     q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
467     while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
468 	int len = pkt->dn_m->m_pkthdr.len;
469 	int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
470 
471 	if (len_scaled > q->numbytes)
472 	    break;
473 	q->numbytes -= len_scaled;
474 	move_pkt(pkt, q, p, len);
475     }
476 
477     /*
478      * If we have more packets queued, schedule next ready event
479      * (can only occur when bandwidth != 0, otherwise we would have
480      * flushed the whole queue in the previous loop).
481      * To this purpose we record the current time and compute how many
482      * ticks to go for the finish time of the packet.
483      */
484     if ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
485     	/* This implies bandwidth != 0 */
486 	dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
487 
488 	q->sched_time = curr_time;
489 
490 	/*
491 	 * XXX should check errors on heap_insert, and drain the whole
492 	 * queue on error hoping next time we are luckier.
493 	 */
494 	heap_insert(&ready_heap, curr_time + t, q);
495     } else {	/* RED needs to know when the queue becomes empty */
496 	q->q_time = curr_time;
497 	q->numbytes = 0;
498     }
499 
500     /*
501      * If the delay line was empty call transmit_event(p) now.
502      * Otherwise, the scheduler will take care of it.
503      */
504     if (p_was_empty)
505 	transmit_event(p);
506 }
507 
508 /*
509  * Called when we can transmit packets on WF2Q queues.  Take pkts out of
510  * the queues at their start time, and enqueue into the delay line.
511  * Packets are drained until p->numbytes < 0.  As long as
512  * len_scaled >= p->numbytes, the packet goes into the delay line
513  * with a deadline p->delay.  For the last packet, if p->numbytes < 0,
514  * there is an additional delay.
515  */
516 static void
517 ready_event_wfq(struct dn_pipe *p)
518 {
519     int p_was_empty = TAILQ_EMPTY(&p->p_queue);
520     struct dn_heap *sch = &p->scheduler_heap;
521     struct dn_heap *neh = &p->not_eligible_heap;
522 
523     p->numbytes += (curr_time - p->sched_time) * p->bandwidth;
524 
525     /*
526      * While we have backlogged traffic AND credit, we need to do
527      * something on the queue.
528      */
529     while (p->numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
530 	if (sch->elements > 0) { /* Have some eligible pkts to send out */
531 	    struct dn_flow_queue *q = sch->p[0].object;
532 	    struct dn_pkt *pkt = TAILQ_FIRST(&q->queue);
533 	    struct dn_flow_set *fs = q->fs;
534 	    uint64_t len = pkt->dn_m->m_pkthdr.len;
535 	    int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
536 
537 	    heap_extract(sch, NULL);	/* Remove queue from heap */
538 	    p->numbytes -= len_scaled;
539 	    move_pkt(pkt, q, p, len);
540 
541 	    p->V += (len << MY_M) / p->sum;	/* Update V */
542 	    q->S = q->F;			/* Update start time */
543 
544 	    if (q->len == 0) {	/* Flow not backlogged any more */
545 		fs->backlogged--;
546 		heap_insert(&p->idle_heap, q->F, q);
547 	    } else {		/* Still backlogged */
548 		/*
549 		 * Update F and position in backlogged queue, then
550 		 * put flow in not_eligible_heap (we will fix this later).
551 		 */
552 		len = TAILQ_FIRST(&q->queue)->dn_m->m_pkthdr.len;
553 		q->F += (len << MY_M) / (uint64_t)fs->weight;
554 		if (DN_KEY_LEQ(q->S, p->V))
555 		    heap_insert(neh, q->S, q);
556 		else
557 		    heap_insert(sch, q->F, q);
558 	    }
559 	}
560 
561 	/*
562 	 * Now compute V = max(V, min(S_i)).  Remember that all elements in
563 	 * sch have by definition S_i <= V so if sch is not empty, V is surely
564 	 * the max and we must not update it.  Conversely, if sch is empty
565 	 * we only need to look at neh.
566 	 */
567 	if (sch->elements == 0 && neh->elements > 0)
568 	    p->V = MAX64(p->V, neh->p[0].key);
569 
570 	/*
571 	 * Move from neh to sch any packets that have become eligible
572 	 */
573 	while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
574 	    struct dn_flow_queue *q = neh->p[0].object;
575 
576 	    heap_extract(neh, NULL);
577 	    heap_insert(sch, q->F, q);
578 	}
579     }
580 
581     if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0 &&
582     	p->idle_heap.elements > 0) {
583 	/*
584 	 * No traffic and no events scheduled.  We can get rid of idle-heap.
585 	 */
586 	int i;
587 
588 	for (i = 0; i < p->idle_heap.elements; i++) {
589 	    struct dn_flow_queue *q = p->idle_heap.p[i].object;
590 
591 	    q->F = 0;
592 	    q->S = q->F + 1;
593 	}
594 	p->sum = 0;
595 	p->V = 0;
596 	p->idle_heap.elements = 0;
597     }
598 
599     /*
600      * If we are getting clocks from dummynet and if we are under credit,
601      * schedule the next ready event.
602      * Also fix the delivery time of the last packet.
603      */
604     if (p->numbytes < 0) { /* This implies bandwidth>0 */
605 	dn_key t = 0; /* Number of ticks i have to wait */
606 
607 	if (p->bandwidth > 0)
608 	    t = (p->bandwidth - 1 - p->numbytes) / p->bandwidth;
609 	TAILQ_LAST(&p->p_queue, dn_pkt_queue)->output_time += t;
610 	p->sched_time = curr_time;
611 
612 	/*
613 	 * XXX should check errors on heap_insert, and drain the whole
614 	 * queue on error hoping next time we are luckier.
615 	 */
616 	heap_insert(&wfq_ready_heap, curr_time + t, p);
617     }
618 
619     /*
620      * If the delay line was empty call transmit_event(p) now.
621      * Otherwise, the scheduler will take care of it.
622      */
623     if (p_was_empty)
624 	transmit_event(p);
625 }
626 
627 static void
628 dn_expire_pipe_cb(struct dn_pipe *pipe, void *dummy __unused)
629 {
630     if (pipe->idle_heap.elements > 0 &&
631 	DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
632 	struct dn_flow_queue *q = pipe->idle_heap.p[0].object;
633 
634 	heap_extract(&pipe->idle_heap, NULL);
635 	q->S = q->F + 1; /* Mark timestamp as invalid */
636 	pipe->sum -= q->fs->weight;
637     }
638 }
639 
640 /*
641  * This is called once per tick, or dn_hz times per second.  It is used to
642  * increment the current tick counter and schedule expired events.
643  */
644 static void
645 dummynet(struct netmsg *msg)
646 {
647     void *p;
648     struct dn_heap *h;
649     struct dn_heap *heaps[3];
650     int i;
651 
652     heaps[0] = &ready_heap;		/* Fixed-rate queues */
653     heaps[1] = &wfq_ready_heap;		/* WF2Q queues */
654     heaps[2] = &extract_heap;		/* Delay line */
655 
656     /* Reply ASAP */
657     crit_enter();
658     lwkt_replymsg(&msg->nm_lmsg, 0);
659     crit_exit();
660 
661     curr_time++;
662     for (i = 0; i < 3; i++) {
663 	h = heaps[i];
664 	while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
665 	    if (h->p[0].key > curr_time) {
666 		kprintf("-- dummynet: warning, heap %d is %d ticks late\n",
667 		    i, (int)(curr_time - h->p[0].key));
668 	    }
669 
670 	    p = h->p[0].object;		/* Store a copy before heap_extract */
671 	    heap_extract(h, NULL);	/* Need to extract before processing */
672 
673 	    if (i == 0)
674 		ready_event(p);
675 	    else if (i == 1)
676 		ready_event_wfq(p);
677 	    else
678 		transmit_event(p);
679 	}
680     }
681 
682     /* Sweep pipes trying to expire idle flow_queues */
683     dn_iterate_pipe(dn_expire_pipe_cb, NULL);
684 }
685 
686 /*
687  * Unconditionally expire empty queues in case of shortage.
688  * Returns the number of queues freed.
689  */
690 static int
691 expire_queues(struct dn_flow_set *fs)
692 {
693     int i, initial_elements = fs->rq_elements;
694 
695     if (fs->last_expired == time_second)
696 	return 0;
697 
698     fs->last_expired = time_second;
699 
700     for (i = 0; i <= fs->rq_size; i++) { /* Last one is overflow */
701 	struct dn_flow_queue *q, *qn;
702 
703 	LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
704 	    if (!TAILQ_EMPTY(&q->queue) || q->S != q->F + 1)
705 		continue;
706 
707  	    /*
708 	     * Entry is idle, expire it
709 	     */
710 	    LIST_REMOVE(q, q_link);
711 	    kfree(q, M_DUMMYNET);
712 
713 	    KASSERT(fs->rq_elements > 0,
714 		    ("invalid rq_elements %d\n", fs->rq_elements));
715 	    fs->rq_elements--;
716 	}
717     }
718     return initial_elements - fs->rq_elements;
719 }
720 
721 /*
722  * If room, create a new queue and put at head of slot i;
723  * otherwise, create or use the default queue.
724  */
725 static struct dn_flow_queue *
726 create_queue(struct dn_flow_set *fs, int i)
727 {
728     struct dn_flow_queue *q;
729 
730     if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
731 	expire_queues(fs) == 0) {
732 	/*
733 	 * No way to get room, use or create overflow queue.
734 	 */
735 	i = fs->rq_size;
736 	if (!LIST_EMPTY(&fs->rq[i]))
737 	    return LIST_FIRST(&fs->rq[i]);
738     }
739 
740     q = kmalloc(sizeof(*q), M_DUMMYNET, M_INTWAIT | M_NULLOK | M_ZERO);
741     if (q == NULL)
742 	return NULL;
743 
744     q->fs = fs;
745     q->hash_slot = i;
746     q->S = q->F + 1;   /* hack - mark timestamp as invalid */
747     TAILQ_INIT(&q->queue);
748 
749     LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
750     fs->rq_elements++;
751 
752     return q;
753 }
754 
755 /*
756  * Given a flow_set and a pkt in last_pkt, find a matching queue
757  * after appropriate masking. The queue is moved to front
758  * so that further searches take less time.
759  */
760 static struct dn_flow_queue *
761 find_queue(struct dn_flow_set *fs, struct dn_flow_id *id)
762 {
763     struct dn_flow_queue *q;
764     int i = 0;
765 
766     if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
767 	q = LIST_FIRST(&fs->rq[0]);
768     } else {
769 	struct dn_flow_queue *qn;
770 
771 	/* First, do the masking */
772 	id->fid_dst_ip &= fs->flow_mask.fid_dst_ip;
773 	id->fid_src_ip &= fs->flow_mask.fid_src_ip;
774 	id->fid_dst_port &= fs->flow_mask.fid_dst_port;
775 	id->fid_src_port &= fs->flow_mask.fid_src_port;
776 	id->fid_proto &= fs->flow_mask.fid_proto;
777 	id->fid_flags = 0; /* we don't care about this one */
778 
779 	/* Then, hash function */
780 	i = ((id->fid_dst_ip) & 0xffff) ^
781 	    ((id->fid_dst_ip >> 15) & 0xffff) ^
782 	    ((id->fid_src_ip << 1) & 0xffff) ^
783 	    ((id->fid_src_ip >> 16 ) & 0xffff) ^
784 	    (id->fid_dst_port << 1) ^ (id->fid_src_port) ^
785 	    (id->fid_proto);
786 	i = i % fs->rq_size;
787 
788 	/*
789 	 * Finally, scan the current list for a match and
790 	 * expire idle flow queues
791 	 */
792 	searches++;
793 	LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
794 	    search_steps++;
795 	    if (id->fid_dst_ip == q->id.fid_dst_ip &&
796 		id->fid_src_ip == q->id.fid_src_ip &&
797 		id->fid_dst_port == q->id.fid_dst_port &&
798 		id->fid_src_port == q->id.fid_src_port &&
799 		id->fid_proto == q->id.fid_proto &&
800 		id->fid_flags == q->id.fid_flags) {
801 		break; /* Found */
802 	    } else if (pipe_expire && TAILQ_EMPTY(&q->queue) &&
803 	    	       q->S == q->F + 1) {
804 		/*
805 		 * Entry is idle and not in any heap, expire it
806 		 */
807 		LIST_REMOVE(q, q_link);
808 		kfree(q, M_DUMMYNET);
809 
810 		KASSERT(fs->rq_elements > 0,
811 			("invalid rq_elements %d\n", fs->rq_elements));
812 		fs->rq_elements--;
813 	    }
814 	}
815 	if (q && LIST_FIRST(&fs->rq[i]) != q) { /* Found and not in front */
816 	    LIST_REMOVE(q, q_link);
817 	    LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
818 	}
819     }
820     if (q == NULL) {	/* No match, need to allocate a new entry */
821 	q = create_queue(fs, i);
822 	if (q != NULL)
823 	    q->id = *id;
824     }
825     return q;
826 }
827 
828 static int
829 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
830 {
831     /*
832      * RED algorithm
833      *
834      * RED calculates the average queue size (avg) using a low-pass filter
835      * with an exponential weighted (w_q) moving average:
836      * 	avg  <-  (1-w_q) * avg + w_q * q_size
837      * where q_size is the queue length (measured in bytes or * packets).
838      *
839      * If q_size == 0, we compute the idle time for the link, and set
840      *	avg = (1 - w_q)^(idle/s)
841      * where s is the time needed for transmitting a medium-sized packet.
842      *
843      * Now, if avg < min_th the packet is enqueued.
844      * If avg > max_th the packet is dropped. Otherwise, the packet is
845      * dropped with probability P function of avg.
846      */
847 
848     int64_t p_b = 0;
849     u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
850 
851     DPRINTF("\n%d q: %2u ", (int)curr_time, q_size);
852 
853     /* Average queue size estimation */
854     if (q_size != 0) {
855 	/*
856 	 * Queue is not empty, avg <- avg + (q_size - avg) * w_q
857 	 */
858 	int diff = SCALE(q_size) - q->avg;
859 	int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
860 
861 	q->avg += (int)v;
862     } else {
863 	/*
864 	 * Queue is empty, find for how long the queue has been
865 	 * empty and use a lookup table for computing
866 	 * (1 - * w_q)^(idle_time/s) where s is the time to send a
867 	 * (small) packet.
868 	 * XXX check wraps...
869 	 */
870 	if (q->avg) {
871 	    u_int t = (curr_time - q->q_time) / fs->lookup_step;
872 
873 	    q->avg = (t < fs->lookup_depth) ?
874 		     SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
875 	}
876     }
877     DPRINTF("avg: %u ", SCALE_VAL(q->avg));
878 
879     /* Should i drop? */
880 
881     if (q->avg < fs->min_th) {
882 	/* Accept packet */
883 	q->count = -1;
884 	return 0;
885     }
886 
887     if (q->avg >= fs->max_th) { /* Average queue >=  Max threshold */
888 	if (fs->flags_fs & DN_IS_GENTLE_RED) {
889 	    /*
890 	     * According to Gentle-RED, if avg is greater than max_th the
891 	     * packet is dropped with a probability
892 	     *	p_b = c_3 * avg - c_4
893 	     * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
894 	     */
895 	    p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4;
896 	} else {
897 	    q->count = -1;
898 	    kprintf("- drop\n");
899 	    return 1;
900 	}
901     } else if (q->avg > fs->min_th) {
902 	/*
903 	 * We compute p_b using the linear dropping function p_b = c_1 *
904 	 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
905 	 * max_p * min_th / (max_th - min_th)
906 	 */
907 	p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
908     }
909     if (fs->flags_fs & DN_QSIZE_IS_BYTES)
910 	p_b = (p_b * len) / fs->max_pkt_size;
911 
912     if (++q->count == 0) {
913 	q->random = krandom() & 0xffff;
914     } else {
915 	/*
916 	 * q->count counts packets arrived since last drop, so a greater
917 	 * value of q->count means a greater packet drop probability.
918 	 */
919 	if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
920 	    q->count = 0;
921 	    DPRINTF("%s", "- red drop");
922 	    /* After a drop we calculate a new random value */
923 	    q->random = krandom() & 0xffff;
924 	    return 1;    /* Drop */
925 	}
926     }
927     /* End of RED algorithm */
928     return 0; /* Accept */
929 }
930 
931 static void
932 dn_iterate_pipe(dn_pipe_iter_t func, void *arg)
933 {
934     int i;
935 
936     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
937 	struct dn_pipe_head *pipe_hdr = &pipe_table[i];
938 	struct dn_pipe *pipe, *pipe_next;
939 
940 	LIST_FOREACH_MUTABLE(pipe, pipe_hdr, p_link, pipe_next)
941 	    func(pipe, arg);
942     }
943 }
944 
945 static void
946 dn_iterate_flowset(dn_flowset_iter_t func, void *arg)
947 {
948     int i;
949 
950     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
951 	struct dn_flowset_head *fs_hdr = &flowset_table[i];
952 	struct dn_flow_set *fs, *fs_next;
953 
954 	LIST_FOREACH_MUTABLE(fs, fs_hdr, fs_link, fs_next)
955 	    func(fs, arg);
956     }
957 }
958 
959 static struct dn_pipe *
960 dn_find_pipe(int pipe_nr)
961 {
962     struct dn_pipe_head *pipe_hdr;
963     struct dn_pipe *p;
964 
965     pipe_hdr = &pipe_table[DN_NR_HASH(pipe_nr)];
966     LIST_FOREACH(p, pipe_hdr, p_link) {
967 	if (p->pipe_nr == pipe_nr)
968 	    break;
969     }
970     return p;
971 }
972 
973 static struct dn_flow_set *
974 dn_find_flowset(int fs_nr)
975 {
976     struct dn_flowset_head *fs_hdr;
977     struct dn_flow_set *fs;
978 
979     fs_hdr = &flowset_table[DN_NR_HASH(fs_nr)];
980     LIST_FOREACH(fs, fs_hdr, fs_link) {
981 	if (fs->fs_nr == fs_nr)
982 	    break;
983     }
984     return fs;
985 }
986 
987 static struct dn_flow_set *
988 dn_locate_flowset(int pipe_nr, int is_pipe)
989 {
990     struct dn_flow_set *fs = NULL;
991 
992     if (!is_pipe) {
993 	fs = dn_find_flowset(pipe_nr);
994     } else {
995 	struct dn_pipe *p;
996 
997 	p = dn_find_pipe(pipe_nr);
998 	if (p != NULL)
999 	    fs = &p->fs;
1000     }
1001     return fs;
1002 }
1003 
1004 /*
1005  * Dummynet hook for packets.  Below 'pipe' is a pipe or a queue
1006  * depending on whether WF2Q or fixed bw is used.
1007  *
1008  * pipe_nr	pipe or queue the packet is destined for.
1009  * dir		where shall we send the packet after dummynet.
1010  * m		the mbuf with the packet
1011  * fwa->oif	the 'ifp' parameter from the caller.
1012  *		NULL in ip_input, destination interface in ip_output
1013  * fwa->ro	route parameter (only used in ip_output, NULL otherwise)
1014  * fwa->dst	destination address, only used by ip_output
1015  * fwa->rule	matching rule, in case of multiple passes
1016  * fwa->flags	flags from the caller, only used in ip_output
1017  */
1018 static int
1019 dummynet_io(struct mbuf *m)
1020 {
1021     struct dn_pkt *pkt;
1022     struct m_tag *tag;
1023     struct dn_flow_set *fs;
1024     struct dn_pipe *pipe;
1025     uint64_t len = m->m_pkthdr.len;
1026     struct dn_flow_queue *q = NULL;
1027     int is_pipe, pipe_nr;
1028 
1029     tag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1030     pkt = m_tag_data(tag);
1031 
1032     is_pipe = pkt->dn_flags & DN_FLAGS_IS_PIPE;
1033     pipe_nr = pkt->pipe_nr;
1034 
1035     /*
1036      * This is a dummynet rule, so we expect a O_PIPE or O_QUEUE rule
1037      */
1038     fs = dn_locate_flowset(pipe_nr, is_pipe);
1039     if (fs == NULL)
1040 	goto dropit;	/* This queue/pipe does not exist! */
1041 
1042     pipe = fs->pipe;
1043     if (pipe == NULL) { /* Must be a queue, try find a matching pipe */
1044 	pipe = dn_find_pipe(fs->parent_nr);
1045 	if (pipe != NULL) {
1046 	    fs->pipe = pipe;
1047 	} else {
1048 	    kprintf("No pipe %d for queue %d, drop pkt\n",
1049 	    	    fs->parent_nr, fs->fs_nr);
1050 	    goto dropit;
1051 	}
1052     }
1053 
1054     q = find_queue(fs, &pkt->id);
1055     if (q == NULL)
1056 	goto dropit;	/* Cannot allocate queue */
1057 
1058     /*
1059      * Update statistics, then check reasons to drop pkt
1060      */
1061     q->tot_bytes += len;
1062     q->tot_pkts++;
1063 
1064     if (fs->plr && krandom() < fs->plr)
1065 	goto dropit;	/* Random pkt drop */
1066 
1067     if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1068     	if (q->len_bytes > fs->qsize)
1069 	    goto dropit;	/* Queue size overflow */
1070     } else {
1071 	if (q->len >= fs->qsize)
1072 	    goto dropit;	/* Queue count overflow */
1073     }
1074 
1075     if ((fs->flags_fs & DN_IS_RED) && red_drops(fs, q, len))
1076 	goto dropit;
1077 
1078     TAILQ_INSERT_TAIL(&q->queue, pkt, dn_next);
1079     q->len++;
1080     q->len_bytes += len;
1081 
1082     if (TAILQ_FIRST(&q->queue) != pkt)	/* Flow was not idle, we are done */
1083 	goto done;
1084 
1085     /*
1086      * If we reach this point the flow was previously idle, so we need
1087      * to schedule it.  This involves different actions for fixed-rate
1088      * or WF2Q queues.
1089      */
1090     if (is_pipe) {
1091 	/*
1092 	 * Fixed-rate queue: just insert into the ready_heap.
1093 	 */
1094 	dn_key t = 0;
1095 
1096 	if (pipe->bandwidth)
1097 	    t = SET_TICKS(pkt, q, pipe);
1098 
1099 	q->sched_time = curr_time;
1100 	if (t == 0)	/* Must process it now */
1101 	    ready_event(q);
1102 	else
1103 	    heap_insert(&ready_heap, curr_time + t, q);
1104     } else {
1105 	/*
1106 	 * WF2Q:
1107 	 * First, compute start time S: if the flow was idle (S=F+1)
1108 	 * set S to the virtual time V for the controlling pipe, and update
1109 	 * the sum of weights for the pipe; otherwise, remove flow from
1110 	 * idle_heap and set S to max(F, V).
1111 	 * Second, compute finish time F = S + len/weight.
1112 	 * Third, if pipe was idle, update V = max(S, V).
1113 	 * Fourth, count one more backlogged flow.
1114 	 */
1115 	if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid */
1116 	    q->S = pipe->V;
1117 	    pipe->sum += fs->weight; /* Add weight of new queue */
1118 	} else {
1119 	    heap_extract(&pipe->idle_heap, q);
1120 	    q->S = MAX64(q->F, pipe->V);
1121 	}
1122 	q->F = q->S + (len << MY_M) / (uint64_t)fs->weight;
1123 
1124 	if (pipe->not_eligible_heap.elements == 0 &&
1125 	    pipe->scheduler_heap.elements == 0)
1126 	    pipe->V = MAX64(q->S, pipe->V);
1127 
1128 	fs->backlogged++;
1129 
1130 	/*
1131 	 * Look at eligibility.  A flow is not eligibile if S>V (when
1132 	 * this happens, it means that there is some other flow already
1133 	 * scheduled for the same pipe, so the scheduler_heap cannot be
1134 	 * empty).  If the flow is not eligible we just store it in the
1135 	 * not_eligible_heap.  Otherwise, we store in the scheduler_heap
1136 	 * and possibly invoke ready_event_wfq() right now if there is
1137 	 * leftover credit.
1138 	 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1139 	 * and for all flows in not_eligible_heap (NEH), S_i > V.
1140 	 * So when we need to compute max(V, min(S_i)) forall i in SCH+NEH,
1141 	 * we only need to look into NEH.
1142 	 */
1143 	if (DN_KEY_GT(q->S, pipe->V)) {	/* Not eligible */
1144 	    if (pipe->scheduler_heap.elements == 0)
1145 		kprintf("++ ouch! not eligible but empty scheduler!\n");
1146 	    heap_insert(&pipe->not_eligible_heap, q->S, q);
1147 	} else {
1148 	    heap_insert(&pipe->scheduler_heap, q->F, q);
1149 	    if (pipe->numbytes >= 0) {	/* Pipe is idle */
1150 		if (pipe->scheduler_heap.elements != 1)
1151 		    kprintf("*** OUCH! pipe should have been idle!\n");
1152 		DPRINTF("Waking up pipe %d at %d\n",
1153 			pipe->pipe_nr, (int)(q->F >> MY_M));
1154 		pipe->sched_time = curr_time;
1155 		ready_event_wfq(pipe);
1156 	    }
1157 	}
1158     }
1159 done:
1160     return 0;
1161 
1162 dropit:
1163     if (q)
1164 	q->drops++;
1165     return ENOBUFS;
1166 }
1167 
1168 /*
1169  * Dispose all packets and flow_queues on a flow_set.
1170  * If all=1, also remove red lookup table and other storage,
1171  * including the descriptor itself.
1172  * For the one in dn_pipe MUST also cleanup ready_heap...
1173  */
1174 static void
1175 purge_flow_set(struct dn_flow_set *fs, int all)
1176 {
1177     int i;
1178 #ifdef INVARIANTS
1179     int rq_elements = 0;
1180 #endif
1181 
1182     for (i = 0; i <= fs->rq_size; i++) {
1183 	struct dn_flow_queue *q;
1184 
1185 	while ((q = LIST_FIRST(&fs->rq[i])) != NULL) {
1186 	    struct dn_pkt *pkt;
1187 
1188 	    while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
1189 	    	TAILQ_REMOVE(&q->queue, pkt, dn_next);
1190 	    	ip_dn_packet_free(pkt);
1191 	    }
1192 
1193 	    LIST_REMOVE(q, q_link);
1194 	    kfree(q, M_DUMMYNET);
1195 
1196 #ifdef INVARIANTS
1197 	    rq_elements++;
1198 #endif
1199 	}
1200     }
1201     KASSERT(rq_elements == fs->rq_elements,
1202 	    ("# rq elements mismatch, freed %d, total %d\n",
1203 	     rq_elements, fs->rq_elements));
1204     fs->rq_elements = 0;
1205 
1206     if (all) {
1207 	/* RED - free lookup table */
1208 	if (fs->w_q_lookup)
1209 	    kfree(fs->w_q_lookup, M_DUMMYNET);
1210 
1211 	if (fs->rq)
1212 	    kfree(fs->rq, M_DUMMYNET);
1213 
1214 	/*
1215 	 * If this fs is not part of a pipe, free it
1216 	 *
1217 	 * fs->pipe == NULL could happen, if 'fs' is a WF2Q and
1218 	 * - No packet belongs to that flow set is delivered by
1219 	 *   dummynet_io(), i.e. parent pipe is not installed yet.
1220 	 * - Parent pipe is deleted.
1221 	 */
1222 	if (fs->pipe == NULL || (fs->pipe && fs != &fs->pipe->fs))
1223 	    kfree(fs, M_DUMMYNET);
1224     }
1225 }
1226 
1227 /*
1228  * Dispose all packets queued on a pipe (not a flow_set).
1229  * Also free all resources associated to a pipe, which is about
1230  * to be deleted.
1231  */
1232 static void
1233 purge_pipe(struct dn_pipe *pipe)
1234 {
1235     struct dn_pkt *pkt;
1236 
1237     purge_flow_set(&pipe->fs, 1);
1238 
1239     while ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
1240 	TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
1241 	ip_dn_packet_free(pkt);
1242     }
1243 
1244     heap_free(&pipe->scheduler_heap);
1245     heap_free(&pipe->not_eligible_heap);
1246     heap_free(&pipe->idle_heap);
1247 }
1248 
1249 /*
1250  * Delete all pipes and heaps returning memory.
1251  */
1252 static void
1253 dummynet_flush(void)
1254 {
1255     struct dn_pipe_head pipe_list;
1256     struct dn_flowset_head fs_list;
1257     struct dn_pipe *p;
1258     struct dn_flow_set *fs;
1259     int i;
1260 
1261     /*
1262      * Prevent future matches...
1263      */
1264     LIST_INIT(&pipe_list);
1265     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1266 	struct dn_pipe_head *pipe_hdr = &pipe_table[i];
1267 
1268     	while ((p = LIST_FIRST(pipe_hdr)) != NULL) {
1269 	    LIST_REMOVE(p, p_link);
1270 	    LIST_INSERT_HEAD(&pipe_list, p, p_link);
1271 	}
1272     }
1273 
1274     LIST_INIT(&fs_list);
1275     for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1276 	struct dn_flowset_head *fs_hdr = &flowset_table[i];
1277 
1278 	while ((fs = LIST_FIRST(fs_hdr)) != NULL) {
1279 	    LIST_REMOVE(fs, fs_link);
1280 	    LIST_INSERT_HEAD(&fs_list, fs, fs_link);
1281 	}
1282     }
1283 
1284     /* Free heaps so we don't have unwanted events */
1285     heap_free(&ready_heap);
1286     heap_free(&wfq_ready_heap);
1287     heap_free(&extract_heap);
1288 
1289     /*
1290      * Now purge all queued pkts and delete all pipes
1291      */
1292     /* Scan and purge all flow_sets. */
1293     while ((fs = LIST_FIRST(&fs_list)) != NULL) {
1294 	LIST_REMOVE(fs, fs_link);
1295 	purge_flow_set(fs, 1);
1296     }
1297 
1298     while ((p = LIST_FIRST(&pipe_list)) != NULL) {
1299 	LIST_REMOVE(p, p_link);
1300 	purge_pipe(p);
1301 	kfree(p, M_DUMMYNET);
1302     }
1303 }
1304 
1305 /*
1306  * setup RED parameters
1307  */
1308 static int
1309 config_red(const struct dn_ioc_flowset *ioc_fs, struct dn_flow_set *x)
1310 {
1311     int i;
1312 
1313     x->w_q = ioc_fs->w_q;
1314     x->min_th = SCALE(ioc_fs->min_th);
1315     x->max_th = SCALE(ioc_fs->max_th);
1316     x->max_p = ioc_fs->max_p;
1317 
1318     x->c_1 = ioc_fs->max_p / (ioc_fs->max_th - ioc_fs->min_th);
1319     x->c_2 = SCALE_MUL(x->c_1, SCALE(ioc_fs->min_th));
1320     if (x->flags_fs & DN_IS_GENTLE_RED) {
1321 	x->c_3 = (SCALE(1) - ioc_fs->max_p) / ioc_fs->max_th;
1322 	x->c_4 = (SCALE(1) - 2 * ioc_fs->max_p);
1323     }
1324 
1325     /* If the lookup table already exist, free and create it again */
1326     if (x->w_q_lookup) {
1327 	kfree(x->w_q_lookup, M_DUMMYNET);
1328 	x->w_q_lookup = NULL ;
1329     }
1330 
1331     if (red_lookup_depth == 0) {
1332 	kprintf("net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1333 	kfree(x, M_DUMMYNET);
1334 	return EINVAL;
1335     }
1336     x->lookup_depth = red_lookup_depth;
1337     x->w_q_lookup = kmalloc(x->lookup_depth * sizeof(int),
1338     			    M_DUMMYNET, M_WAITOK);
1339 
1340     /* Fill the lookup table with (1 - w_q)^x */
1341     x->lookup_step = ioc_fs->lookup_step;
1342     x->lookup_weight = ioc_fs->lookup_weight;
1343 
1344     x->w_q_lookup[0] = SCALE(1) - x->w_q;
1345     for (i = 1; i < x->lookup_depth; i++)
1346 	x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1347 
1348     if (red_avg_pkt_size < 1)
1349 	red_avg_pkt_size = 512;
1350     x->avg_pkt_size = red_avg_pkt_size;
1351 
1352     if (red_max_pkt_size < 1)
1353 	red_max_pkt_size = 1500;
1354     x->max_pkt_size = red_max_pkt_size;
1355 
1356     return 0;
1357 }
1358 
1359 static void
1360 alloc_hash(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1361 {
1362     int i, alloc_size;
1363 
1364     if (x->flags_fs & DN_HAVE_FLOW_MASK) {
1365 	int l = ioc_fs->rq_size;
1366 
1367 	/* Allocate some slots */
1368 	if (l == 0)
1369 	    l = dn_hash_size;
1370 
1371 	if (l < DN_MIN_HASH_SIZE)
1372 	    l = DN_MIN_HASH_SIZE;
1373 	else if (l > DN_MAX_HASH_SIZE)
1374 	    l = DN_MAX_HASH_SIZE;
1375 
1376 	x->rq_size = l;
1377     } else {
1378 	/* One is enough for null mask */
1379 	x->rq_size = 1;
1380     }
1381     alloc_size = x->rq_size + 1;
1382 
1383     x->rq = kmalloc(alloc_size * sizeof(struct dn_flowqueue_head),
1384 		    M_DUMMYNET, M_WAITOK | M_ZERO);
1385     x->rq_elements = 0;
1386 
1387     for (i = 0; i < alloc_size; ++i)
1388 	LIST_INIT(&x->rq[i]);
1389 }
1390 
1391 static void
1392 set_flowid_parms(struct dn_flow_id *id, const struct dn_ioc_flowid *ioc_id)
1393 {
1394     id->fid_dst_ip = ioc_id->u.ip.dst_ip;
1395     id->fid_src_ip = ioc_id->u.ip.src_ip;
1396     id->fid_dst_port = ioc_id->u.ip.dst_port;
1397     id->fid_src_port = ioc_id->u.ip.src_port;
1398     id->fid_proto = ioc_id->u.ip.proto;
1399     id->fid_flags = ioc_id->u.ip.flags;
1400 }
1401 
1402 static void
1403 set_fs_parms(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1404 {
1405     x->flags_fs = ioc_fs->flags_fs;
1406     x->qsize = ioc_fs->qsize;
1407     x->plr = ioc_fs->plr;
1408     set_flowid_parms(&x->flow_mask, &ioc_fs->flow_mask);
1409     if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1410 	if (x->qsize > 1024 * 1024)
1411 	    x->qsize = 1024 * 1024;
1412     } else {
1413 	if (x->qsize == 0 || x->qsize > 100)
1414 	    x->qsize = 50;
1415     }
1416 
1417     /* Configuring RED */
1418     if (x->flags_fs & DN_IS_RED)
1419 	config_red(ioc_fs, x);	/* XXX should check errors */
1420 }
1421 
1422 /*
1423  * setup pipe or queue parameters.
1424  */
1425 
1426 static int
1427 config_pipe(struct dn_ioc_pipe *ioc_pipe)
1428 {
1429     struct dn_ioc_flowset *ioc_fs = &ioc_pipe->fs;
1430     int error;
1431 
1432     /*
1433      * The config program passes parameters as follows:
1434      * bw	bits/second (0 means no limits)
1435      * delay	ms (must be translated into ticks)
1436      * qsize	slots or bytes
1437      */
1438     ioc_pipe->delay = (ioc_pipe->delay * dn_hz) / 1000;
1439 
1440     /*
1441      * We need either a pipe number or a flow_set number
1442      */
1443     if (ioc_pipe->pipe_nr == 0 && ioc_fs->fs_nr == 0)
1444 	return EINVAL;
1445     if (ioc_pipe->pipe_nr != 0 && ioc_fs->fs_nr != 0)
1446 	return EINVAL;
1447 
1448     /*
1449      * Validate pipe number
1450      */
1451     if (ioc_pipe->pipe_nr > DN_PIPE_NR_MAX || ioc_pipe->pipe_nr < 0)
1452 	return EINVAL;
1453 
1454     error = EINVAL;
1455     if (ioc_pipe->pipe_nr != 0) {	/* This is a pipe */
1456 	struct dn_pipe *x, *p;
1457 
1458 	/* Locate pipe */
1459 	p = dn_find_pipe(ioc_pipe->pipe_nr);
1460 
1461 	if (p == NULL) {	/* New pipe */
1462 	    x = kmalloc(sizeof(struct dn_pipe), M_DUMMYNET, M_WAITOK | M_ZERO);
1463 	    x->pipe_nr = ioc_pipe->pipe_nr;
1464 	    x->fs.pipe = x;
1465 	    TAILQ_INIT(&x->p_queue);
1466 
1467 	    /*
1468 	     * idle_heap is the only one from which we extract from the middle.
1469 	     */
1470 	    x->idle_heap.size = x->idle_heap.elements = 0;
1471 	    x->idle_heap.offset = __offsetof(struct dn_flow_queue, heap_pos);
1472 	} else {
1473 	    int i;
1474 
1475 	    x = p;
1476 
1477 	    /* Flush accumulated credit for all queues */
1478 	    for (i = 0; i <= x->fs.rq_size; i++) {
1479 		struct dn_flow_queue *q;
1480 
1481 		LIST_FOREACH(q, &x->fs.rq[i], q_link)
1482 		    q->numbytes = 0;
1483 	    }
1484 	}
1485 
1486 	x->bandwidth = ioc_pipe->bandwidth;
1487 	x->numbytes = 0; /* Just in case... */
1488 	x->delay = ioc_pipe->delay;
1489 
1490 	set_fs_parms(&x->fs, ioc_fs);
1491 
1492 	if (x->fs.rq == NULL) {	/* A new pipe */
1493 	    struct dn_pipe_head *pipe_hdr;
1494 
1495 	    alloc_hash(&x->fs, ioc_fs);
1496 
1497 	    pipe_hdr = &pipe_table[DN_NR_HASH(x->pipe_nr)];
1498 	    LIST_INSERT_HEAD(pipe_hdr, x, p_link);
1499 	}
1500     } else {	/* Config flow_set */
1501 	struct dn_flow_set *x, *fs;
1502 
1503 	/* Locate flow_set */
1504 	fs = dn_find_flowset(ioc_fs->fs_nr);
1505 
1506 	if (fs == NULL) {	/* New flow_set */
1507 	    if (ioc_fs->parent_nr == 0)	/* Need link to a pipe */
1508 		goto back;
1509 
1510 	    x = kmalloc(sizeof(struct dn_flow_set), M_DUMMYNET,
1511 	    		M_WAITOK | M_ZERO);
1512 	    x->fs_nr = ioc_fs->fs_nr;
1513 	    x->parent_nr = ioc_fs->parent_nr;
1514 	    x->weight = ioc_fs->weight;
1515 	    if (x->weight == 0)
1516 		x->weight = 1;
1517 	    else if (x->weight > 100)
1518 		x->weight = 100;
1519 	} else {
1520 	    /* Change parent pipe not allowed; must delete and recreate */
1521 	    if (ioc_fs->parent_nr != 0 && fs->parent_nr != ioc_fs->parent_nr)
1522 		goto back;
1523 	    x = fs;
1524 	}
1525 
1526 	set_fs_parms(x, ioc_fs);
1527 
1528 	if (x->rq == NULL) {	/* A new flow_set */
1529 	    struct dn_flowset_head *fs_hdr;
1530 
1531 	    alloc_hash(x, ioc_fs);
1532 
1533 	    fs_hdr = &flowset_table[DN_NR_HASH(x->fs_nr)];
1534 	    LIST_INSERT_HEAD(fs_hdr, x, fs_link);
1535 	}
1536     }
1537     error = 0;
1538 
1539 back:
1540     return error;
1541 }
1542 
1543 /*
1544  * Helper function to remove from a heap queues which are linked to
1545  * a flow_set about to be deleted.
1546  */
1547 static void
1548 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
1549 {
1550     int i = 0, found = 0;
1551 
1552     while (i < h->elements) {
1553 	if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
1554 	    h->elements--;
1555 	    h->p[i] = h->p[h->elements];
1556 	    found++;
1557 	} else {
1558 	    i++;
1559 	}
1560     }
1561     if (found)
1562 	heapify(h);
1563 }
1564 
1565 /*
1566  * helper function to remove a pipe from a heap (can be there at most once)
1567  */
1568 static void
1569 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
1570 {
1571     if (h->elements > 0) {
1572 	int i;
1573 
1574 	for (i = 0; i < h->elements; i++) {
1575 	    if (h->p[i].object == p) { /* found it */
1576 		h->elements--;
1577 		h->p[i] = h->p[h->elements];
1578 		heapify(h);
1579 		break;
1580 	    }
1581 	}
1582     }
1583 }
1584 
1585 static void
1586 dn_unref_pipe_cb(struct dn_flow_set *fs, void *pipe0)
1587 {
1588     struct dn_pipe *pipe = pipe0;
1589 
1590     if (fs->pipe == pipe) {
1591 	kprintf("++ ref to pipe %d from fs %d\n",
1592 		pipe->pipe_nr, fs->fs_nr);
1593 	fs->pipe = NULL;
1594 	purge_flow_set(fs, 0);
1595     }
1596 }
1597 
1598 /*
1599  * Fully delete a pipe or a queue, cleaning up associated info.
1600  */
1601 static int
1602 delete_pipe(const struct dn_ioc_pipe *ioc_pipe)
1603 {
1604     struct dn_pipe *p;
1605     int error;
1606 
1607     if (ioc_pipe->pipe_nr == 0 && ioc_pipe->fs.fs_nr == 0)
1608 	return EINVAL;
1609     if (ioc_pipe->pipe_nr != 0 && ioc_pipe->fs.fs_nr != 0)
1610 	return EINVAL;
1611 
1612     if (ioc_pipe->pipe_nr > DN_NR_HASH_MAX || ioc_pipe->pipe_nr < 0)
1613     	return EINVAL;
1614 
1615     error = EINVAL;
1616     if (ioc_pipe->pipe_nr != 0) {	/* This is an old-style pipe */
1617 	/* Locate pipe */
1618 	p = dn_find_pipe(ioc_pipe->pipe_nr);
1619 	if (p == NULL)
1620 	    goto back; /* Not found */
1621 
1622 	/* Unlink from pipe hash table */
1623 	LIST_REMOVE(p, p_link);
1624 
1625 	/* Remove all references to this pipe from flow_sets */
1626 	dn_iterate_flowset(dn_unref_pipe_cb, p);
1627 
1628 	fs_remove_from_heap(&ready_heap, &p->fs);
1629 	purge_pipe(p);	/* Remove all data associated to this pipe */
1630 
1631 	/* Remove reference to here from extract_heap and wfq_ready_heap */
1632 	pipe_remove_from_heap(&extract_heap, p);
1633 	pipe_remove_from_heap(&wfq_ready_heap, p);
1634 
1635 	kfree(p, M_DUMMYNET);
1636     } else {	/* This is a WF2Q queue (dn_flow_set) */
1637 	struct dn_flow_set *fs;
1638 
1639 	/* Locate flow_set */
1640 	fs = dn_find_flowset(ioc_pipe->fs.fs_nr);
1641 	if (fs == NULL)
1642 	    goto back; /* Not found */
1643 
1644 	LIST_REMOVE(fs, fs_link);
1645 
1646 	if ((p = fs->pipe) != NULL) {
1647 	    /* Update total weight on parent pipe and cleanup parent heaps */
1648 	    p->sum -= fs->weight * fs->backlogged;
1649 	    fs_remove_from_heap(&p->not_eligible_heap, fs);
1650 	    fs_remove_from_heap(&p->scheduler_heap, fs);
1651 #if 1	/* XXX should i remove from idle_heap as well ? */
1652 	    fs_remove_from_heap(&p->idle_heap, fs);
1653 #endif
1654 	}
1655 	purge_flow_set(fs, 1);
1656     }
1657     error = 0;
1658 
1659 back:
1660     return error;
1661 }
1662 
1663 /*
1664  * helper function used to copy data from kernel in DUMMYNET_GET
1665  */
1666 static void
1667 dn_copy_flowid(const struct dn_flow_id *id, struct dn_ioc_flowid *ioc_id)
1668 {
1669     ioc_id->type = ETHERTYPE_IP;
1670     ioc_id->u.ip.dst_ip = id->fid_dst_ip;
1671     ioc_id->u.ip.src_ip = id->fid_src_ip;
1672     ioc_id->u.ip.dst_port = id->fid_dst_port;
1673     ioc_id->u.ip.src_port = id->fid_src_port;
1674     ioc_id->u.ip.proto = id->fid_proto;
1675     ioc_id->u.ip.flags = id->fid_flags;
1676 }
1677 
1678 static void *
1679 dn_copy_flowqueues(const struct dn_flow_set *fs, void *bp)
1680 {
1681     struct dn_ioc_flowqueue *ioc_fq = bp;
1682     int i, copied = 0;
1683 
1684     for (i = 0; i <= fs->rq_size; i++) {
1685 	const struct dn_flow_queue *q;
1686 
1687 	LIST_FOREACH(q, &fs->rq[i], q_link) {
1688 	    if (q->hash_slot != i) {	/* XXX ASSERT */
1689 		kprintf("++ at %d: wrong slot (have %d, "
1690 			"should be %d)\n", copied, q->hash_slot, i);
1691 	    }
1692 	    if (q->fs != fs) {		/* XXX ASSERT */
1693 		kprintf("++ at %d: wrong fs ptr (have %p, should be %p)\n",
1694 			i, q->fs, fs);
1695 	    }
1696 
1697 	    copied++;
1698 
1699 	    ioc_fq->len = q->len;
1700 	    ioc_fq->len_bytes = q->len_bytes;
1701 	    ioc_fq->tot_pkts = q->tot_pkts;
1702 	    ioc_fq->tot_bytes = q->tot_bytes;
1703 	    ioc_fq->drops = q->drops;
1704 	    ioc_fq->hash_slot = q->hash_slot;
1705 	    ioc_fq->S = q->S;
1706 	    ioc_fq->F = q->F;
1707 	    dn_copy_flowid(&q->id, &ioc_fq->id);
1708 
1709 	    ioc_fq++;
1710 	}
1711     }
1712 
1713     if (copied != fs->rq_elements) {	/* XXX ASSERT */
1714 	kprintf("++ wrong count, have %d should be %d\n",
1715 		copied, fs->rq_elements);
1716     }
1717     return ioc_fq;
1718 }
1719 
1720 static void
1721 dn_copy_flowset(const struct dn_flow_set *fs, struct dn_ioc_flowset *ioc_fs,
1722 		u_short fs_type)
1723 {
1724     ioc_fs->fs_type = fs_type;
1725 
1726     ioc_fs->fs_nr = fs->fs_nr;
1727     ioc_fs->flags_fs = fs->flags_fs;
1728     ioc_fs->parent_nr = fs->parent_nr;
1729 
1730     ioc_fs->weight = fs->weight;
1731     ioc_fs->qsize = fs->qsize;
1732     ioc_fs->plr = fs->plr;
1733 
1734     ioc_fs->rq_size = fs->rq_size;
1735     ioc_fs->rq_elements = fs->rq_elements;
1736 
1737     ioc_fs->w_q = fs->w_q;
1738     ioc_fs->max_th = fs->max_th;
1739     ioc_fs->min_th = fs->min_th;
1740     ioc_fs->max_p = fs->max_p;
1741 
1742     dn_copy_flowid(&fs->flow_mask, &ioc_fs->flow_mask);
1743 }
1744 
1745 static void
1746 dn_calc_pipe_size_cb(struct dn_pipe *pipe, void *sz)
1747 {
1748     size_t *size = sz;
1749 
1750     *size += sizeof(struct dn_ioc_pipe) +
1751 	     pipe->fs.rq_elements * sizeof(struct dn_ioc_flowqueue);
1752 }
1753 
1754 static void
1755 dn_calc_fs_size_cb(struct dn_flow_set *fs, void *sz)
1756 {
1757     size_t *size = sz;
1758 
1759     *size += sizeof(struct dn_ioc_flowset) +
1760 	     fs->rq_elements * sizeof(struct dn_ioc_flowqueue);
1761 }
1762 
1763 static void
1764 dn_copyout_pipe_cb(struct dn_pipe *pipe, void *bp0)
1765 {
1766     char **bp = bp0;
1767     struct dn_ioc_pipe *ioc_pipe = (struct dn_ioc_pipe *)(*bp);
1768 
1769     /*
1770      * Copy flow set descriptor associated with this pipe
1771      */
1772     dn_copy_flowset(&pipe->fs, &ioc_pipe->fs, DN_IS_PIPE);
1773 
1774     /*
1775      * Copy pipe descriptor
1776      */
1777     ioc_pipe->bandwidth = pipe->bandwidth;
1778     ioc_pipe->pipe_nr = pipe->pipe_nr;
1779     ioc_pipe->V = pipe->V;
1780     /* Convert delay to milliseconds */
1781     ioc_pipe->delay = (pipe->delay * 1000) / dn_hz;
1782 
1783     /*
1784      * Copy flow queue descriptors
1785      */
1786     *bp += sizeof(*ioc_pipe);
1787     *bp = dn_copy_flowqueues(&pipe->fs, *bp);
1788 }
1789 
1790 static void
1791 dn_copyout_fs_cb(struct dn_flow_set *fs, void *bp0)
1792 {
1793     char **bp = bp0;
1794     struct dn_ioc_flowset *ioc_fs = (struct dn_ioc_flowset *)(*bp);
1795 
1796     /*
1797      * Copy flow set descriptor
1798      */
1799     dn_copy_flowset(fs, ioc_fs, DN_IS_QUEUE);
1800 
1801     /*
1802      * Copy flow queue descriptors
1803      */
1804     *bp += sizeof(*ioc_fs);
1805     *bp = dn_copy_flowqueues(fs, *bp);
1806 }
1807 
1808 static int
1809 dummynet_get(struct dn_sopt *dn_sopt)
1810 {
1811     char *buf, *bp;
1812     size_t size = 0;
1813 
1814     /*
1815      * Compute size of data structures: list of pipes and flow_sets.
1816      */
1817     dn_iterate_pipe(dn_calc_pipe_size_cb, &size);
1818     dn_iterate_flowset(dn_calc_fs_size_cb, &size);
1819 
1820     /*
1821      * Copyout pipe/flow_set/flow_queue
1822      */
1823     bp = buf = kmalloc(size, M_TEMP, M_WAITOK | M_ZERO);
1824     dn_iterate_pipe(dn_copyout_pipe_cb, &bp);
1825     dn_iterate_flowset(dn_copyout_fs_cb, &bp);
1826 
1827     /* Temp memory will be freed by caller */
1828     dn_sopt->dn_sopt_arg = buf;
1829     dn_sopt->dn_sopt_arglen = size;
1830     return 0;
1831 }
1832 
1833 /*
1834  * Handler for the various dummynet socket options (get, flush, config, del)
1835  */
1836 static int
1837 dummynet_ctl(struct dn_sopt *dn_sopt)
1838 {
1839     int error = 0;
1840 
1841     switch (dn_sopt->dn_sopt_name) {
1842     case IP_DUMMYNET_GET:
1843 	error = dummynet_get(dn_sopt);
1844 	break;
1845 
1846     case IP_DUMMYNET_FLUSH:
1847 	dummynet_flush();
1848 	break;
1849 
1850     case IP_DUMMYNET_CONFIGURE:
1851 	KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1852 	error = config_pipe(dn_sopt->dn_sopt_arg);
1853 	break;
1854 
1855     case IP_DUMMYNET_DEL:	/* Remove a pipe or flow_set */
1856 	KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1857 	error = delete_pipe(dn_sopt->dn_sopt_arg);
1858 	break;
1859 
1860     default:
1861 	kprintf("%s -- unknown option %d\n", __func__, dn_sopt->dn_sopt_name);
1862 	error = EINVAL;
1863 	break;
1864     }
1865     return error;
1866 }
1867 
1868 static void
1869 dummynet_clock(systimer_t info __unused, struct intrframe *frame __unused)
1870 {
1871     KASSERT(mycpuid == ip_dn_cpu,
1872     	    ("dummynet systimer comes on cpu%d, should be %d!\n",
1873 	     mycpuid, ip_dn_cpu));
1874 
1875     crit_enter();
1876     if (DUMMYNET_LOADED && (dn_netmsg.nm_lmsg.ms_flags & MSGF_DONE))
1877 	lwkt_sendmsg(cpu_portfn(mycpuid), &dn_netmsg.nm_lmsg);
1878     crit_exit();
1879 }
1880 
1881 static int
1882 sysctl_dn_hz(SYSCTL_HANDLER_ARGS)
1883 {
1884     int error, val;
1885 
1886     val = dn_hz;
1887     error = sysctl_handle_int(oidp, &val, 0, req);
1888     if (error || req->newptr == NULL)
1889 	return error;
1890     if (val <= 0)
1891 	return EINVAL;
1892     else if (val > DN_CALLOUT_FREQ_MAX)
1893 	val = DN_CALLOUT_FREQ_MAX;
1894 
1895     crit_enter();
1896     dn_hz = val;
1897     systimer_adjust_periodic(&dn_clock, val);
1898     crit_exit();
1899 
1900     return 0;
1901 }
1902 
1903 static void
1904 ip_dn_init_dispatch(struct netmsg *msg)
1905 {
1906     int i, error = 0;
1907 
1908     KASSERT(mycpuid == ip_dn_cpu,
1909     	    ("%s runs on cpu%d, instead of cpu%d", __func__,
1910 	     mycpuid, ip_dn_cpu));
1911 
1912     crit_enter();
1913 
1914     if (DUMMYNET_LOADED) {
1915 	kprintf("DUMMYNET already loaded\n");
1916 	error = EEXIST;
1917 	goto back;
1918     }
1919 
1920     kprintf("DUMMYNET initialized (011031)\n");
1921 
1922     for (i = 0; i < DN_NR_HASH_MAX; ++i)
1923     	LIST_INIT(&pipe_table[i]);
1924 
1925     for (i = 0; i < DN_NR_HASH_MAX; ++i)
1926 	LIST_INIT(&flowset_table[i]);
1927 
1928     ready_heap.size = ready_heap.elements = 0;
1929     ready_heap.offset = 0;
1930 
1931     wfq_ready_heap.size = wfq_ready_heap.elements = 0;
1932     wfq_ready_heap.offset = 0;
1933 
1934     extract_heap.size = extract_heap.elements = 0;
1935     extract_heap.offset = 0;
1936 
1937     ip_dn_ctl_ptr = dummynet_ctl;
1938     ip_dn_io_ptr = dummynet_io;
1939 
1940     netmsg_init(&dn_netmsg, &netisr_adone_rport, 0, dummynet);
1941     systimer_init_periodic_nq(&dn_clock, dummynet_clock, NULL, dn_hz);
1942 
1943 back:
1944     crit_exit();
1945     lwkt_replymsg(&msg->nm_lmsg, error);
1946 }
1947 
1948 static void
1949 ip_dn_stop_dispatch(struct netmsg *msg)
1950 {
1951     crit_enter();
1952 
1953     dummynet_flush();
1954 
1955     ip_dn_ctl_ptr = NULL;
1956     ip_dn_io_ptr = NULL;
1957 
1958     systimer_del(&dn_clock);
1959 
1960     crit_exit();
1961     lwkt_replymsg(&msg->nm_lmsg, 0);
1962 }
1963 
1964 static int
1965 ip_dn_init(void)
1966 {
1967     struct netmsg smsg;
1968 
1969     if (ip_dn_cpu >= ncpus) {
1970 	kprintf("%s: CPU%d does not exist, switch to CPU0\n",
1971 		__func__, ip_dn_cpu);
1972 	ip_dn_cpu = 0;
1973     }
1974 
1975     netmsg_init(&smsg, &curthread->td_msgport, 0, ip_dn_init_dispatch);
1976     lwkt_domsg(cpu_portfn(ip_dn_cpu), &smsg.nm_lmsg, 0);
1977     return smsg.nm_lmsg.ms_error;
1978 }
1979 
1980 static void
1981 ip_dn_stop(void)
1982 {
1983     struct netmsg smsg;
1984 
1985     netmsg_init(&smsg, &curthread->td_msgport, 0, ip_dn_stop_dispatch);
1986     lwkt_domsg(cpu_portfn(ip_dn_cpu), &smsg.nm_lmsg, 0);
1987 
1988     netmsg_service_sync();
1989 }
1990 
1991 static int
1992 dummynet_modevent(module_t mod, int type, void *data)
1993 {
1994     switch (type) {
1995     case MOD_LOAD:
1996 	return ip_dn_init();
1997 
1998     case MOD_UNLOAD:
1999 #ifndef KLD_MODULE
2000 	kprintf("dummynet statically compiled, cannot unload\n");
2001 	return EINVAL;
2002 #else
2003 	ip_dn_stop();
2004 #endif
2005 	break;
2006 
2007     default:
2008 	break;
2009     }
2010     return 0;
2011 }
2012 
2013 static moduledata_t dummynet_mod = {
2014     "dummynet",
2015     dummynet_modevent,
2016     NULL
2017 };
2018 DECLARE_MODULE(dummynet, dummynet_mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2019 MODULE_VERSION(dummynet, 1);
2020