xref: /dragonfly/sys/net/dummynet3/ip_dummynet3.c (revision bb8c85ff)
1 /*
2  * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3  * Portions Copyright (c) 2000 Akamba Corp.
4  * All rights reserved
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.24.2.22 2003/05/13 09:31:06 maxim Exp $
28  */
29 
30 #include "opt_ipdn.h"
31 
32 /*
33  * This module implements IP dummynet, a bandwidth limiter/delay emulator.
34  * Description of the data structures used is in ip_dummynet.h
35  * Here you mainly find the following blocks of code:
36  *  + variable declarations;
37  *  + heap management functions;
38  *  + scheduler and dummynet functions;
39  *  + configuration and initialization.
40  *
41  * Most important Changes:
42  *
43  * 011004: KLDable
44  * 010124: Fixed WF2Q behaviour
45  * 010122: Fixed spl protection.
46  * 000601: WF2Q support
47  * 000106: Large rewrite, use heaps to handle very many pipes.
48  * 980513: Initial release
49  */
50 
51 #include <sys/param.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/mbuf.h>
55 #include <sys/socketvar.h>
56 #include <sys/sysctl.h>
57 #include <sys/systimer.h>
58 #include <sys/thread2.h>
59 
60 #include <net/ethernet.h>
61 #include <net/netmsg2.h>
62 #include <net/netisr2.h>
63 #include <net/route.h>
64 
65 #include <netinet/in_var.h>
66 #include <netinet/ip_var.h>
67 
68 #include <net/dummynet3/ip_dummynet3.h>
69 #include <net/ipfw3/ip_fw.h>
70 
71 void check_pipe(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
72 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
73 void check_queue(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
74 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
75 
76 void
77 check_pipe(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
78 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
79 {
80 	(*args)->rule = *f;
81 	(*args)->cookie = cmd->arg1;
82 	*cmd_val = IP_FW_DUMMYNET;
83 	*cmd_ctl = IP_FW_CTL_DONE;
84 }
85 
86 void
87 check_queue(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
88 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
89 {
90 	(*args)->rule = *f;
91 	(*args)->cookie = cmd->arg1;
92 	*cmd_val = IP_FW_DUMMYNET;
93 	*cmd_ctl = IP_FW_CTL_DONE;
94 }
95 
96 #ifdef DUMMYNET_DEBUG
97 #define DPRINTF(fmt, ...)	kprintf(fmt, __VA_ARGS__)
98 #else
99 #define DPRINTF(fmt, ...)	((void)0)
100 #endif
101 
102 #ifndef DN_CALLOUT_FREQ_MAX
103 #define DN_CALLOUT_FREQ_MAX	10000
104 #endif
105 
106 /*
107  * The maximum/minimum hash table size for queues.
108  * These values must be a power of 2.
109  */
110 #define DN_MIN_HASH_SIZE	4
111 #define DN_MAX_HASH_SIZE	65536
112 
113 /*
114  * Some macros are used to compare key values and handle wraparounds.
115  * MAX64 returns the largest of two key values.
116  */
117 #define DN_KEY_LT(a, b)		((int64_t)((a) - (b)) < 0)
118 #define DN_KEY_LEQ(a, b)	((int64_t)((a) - (b)) <= 0)
119 #define DN_KEY_GT(a, b)		((int64_t)((a) - (b)) > 0)
120 #define DN_KEY_GEQ(a, b)	((int64_t)((a) - (b)) >= 0)
121 #define MAX64(x, y)		((((int64_t)((y) - (x))) > 0) ? (y) : (x))
122 
123 #define DN_NR_HASH_MAX		16
124 #define DN_NR_HASH_MASK		(DN_NR_HASH_MAX - 1)
125 #define DN_NR_HASH(nr)		\
126 	((((nr) >> 12) ^ ((nr) >> 8) ^ ((nr) >> 4) ^ (nr)) & DN_NR_HASH_MASK)
127 
128 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
129 
130 extern int	ip_dn_cpu;
131 
132 static dn_key	curr_time = 0;		/* current simulation time */
133 static int	dn_hash_size = 64;	/* default hash size */
134 static int	pipe_expire = 1;	/* expire queue if empty */
135 static int	dn_max_ratio = 16;	/* max queues/buckets ratio */
136 
137 /*
138  * Statistics on number of queue searches and search steps
139  */
140 static int	searches;
141 static int	search_steps;
142 
143 /*
144  * RED parameters
145  */
146 static int	red_lookup_depth = 256;	/* default lookup table depth */
147 static int	red_avg_pkt_size = 512;	/* default medium packet size */
148 static int	red_max_pkt_size = 1500;/* default max packet size */
149 
150 /*
151  * Three heaps contain queues and pipes that the scheduler handles:
152  *
153  *  + ready_heap	contains all dn_flow_queue related to fixed-rate pipes.
154  *  + wfq_ready_heap	contains the pipes associated with WF2Q flows
155  *  + extract_heap	contains pipes associated with delay lines.
156  */
157 static struct dn_heap	ready_heap;
158 static struct dn_heap	extract_heap;
159 static struct dn_heap	wfq_ready_heap;
160 
161 static struct dn_pipe_head	pipe_table[DN_NR_HASH_MAX];
162 static struct dn_flowset_head	flowset_table[DN_NR_HASH_MAX];
163 
164 /*
165  * Variables for dummynet systimer
166  */
167 static struct netmsg_base dn_netmsg;
168 static struct systimer	dn_clock;
169 static int		dn_hz = 1000;
170 
171 static int	sysctl_dn_hz(SYSCTL_HANDLER_ARGS);
172 
173 SYSCTL_DECL(_net_inet_ip_dummynet);
174 
175 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLFLAG_RW,
176 		&dn_hash_size, 0, "Default hash table size");
177 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, curr_time, CTLFLAG_RD,
178 		&curr_time, 0, "Current tick");
179 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW,
180 		&pipe_expire, 0, "Expire queue if empty");
181 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, CTLFLAG_RW,
182 		&dn_max_ratio, 0, "Max ratio between dynamic queues and buckets");
183 
184 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap, CTLFLAG_RD,
185 		&ready_heap.size, 0, "Size of ready heap");
186 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap, CTLFLAG_RD,
187 		&extract_heap.size, 0, "Size of extract heap");
188 
189 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches, CTLFLAG_RD,
190 		&searches, 0, "Number of queue searches");
191 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, CTLFLAG_RD,
192 		&search_steps, 0, "Number of queue search steps");
193 
194 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD,
195 		&red_lookup_depth, 0, "Depth of RED lookup table");
196 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD,
197 		&red_avg_pkt_size, 0, "RED Medium packet size");
198 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD,
199 		&red_max_pkt_size, 0, "RED Max packet size");
200 
201 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hz, CTLTYPE_INT | CTLFLAG_RW,
202 		0, 0, sysctl_dn_hz, "I", "Dummynet callout frequency");
203 
204 static int	heap_init(struct dn_heap *, int);
205 static int	heap_insert(struct dn_heap *, dn_key, void *);
206 static void	heap_extract(struct dn_heap *, void *);
207 
208 static void	transmit_event(struct dn_pipe *);
209 static void	ready_event(struct dn_flow_queue *);
210 static void	ready_event_wfq(struct dn_pipe *);
211 
212 static int	config_pipe(struct dn_ioc_pipe *);
213 static void	dummynet_flush(void);
214 
215 static void	dummynet_clock(systimer_t, int, struct intrframe *);
216 static void	dummynet(netmsg_t);
217 
218 static struct dn_pipe *dn_find_pipe(int);
219 static struct dn_flow_set *dn_locate_flowset(int, int);
220 
221 typedef void	(*dn_pipe_iter_t)(struct dn_pipe *, void *);
222 static void	dn_iterate_pipe(dn_pipe_iter_t, void *);
223 
224 typedef void	(*dn_flowset_iter_t)(struct dn_flow_set *, void *);
225 static void	dn_iterate_flowset(dn_flowset_iter_t, void *);
226 
227 static ip_dn_io_t	dummynet_io;
228 static ip_dn_ctl_t	dummynet_ctl;
229 
230 /*
231  * Heap management functions.
232  *
233  * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
234  * Some macros help finding parent/children so we can optimize them.
235  *
236  * heap_init() is called to expand the heap when needed.
237  * Increment size in blocks of 16 entries.
238  * XXX failure to allocate a new element is a pretty bad failure
239  * as we basically stall a whole queue forever!!
240  * Returns 1 on error, 0 on success
241  */
242 #define HEAP_FATHER(x)		(((x) - 1) / 2)
243 #define HEAP_LEFT(x)		(2*(x) + 1)
244 #define HEAP_IS_LEFT(x)		((x) & 1)
245 #define HEAP_RIGHT(x)		(2*(x) + 2)
246 #define HEAP_SWAP(a, b, buffer)	{ buffer = a; a = b; b = buffer; }
247 #define HEAP_INCREMENT		15
248 
249 static int
250 heap_init(struct dn_heap *h, int new_size)
251 {
252 	struct dn_heap_entry *p;
253 
254 	if (h->size >= new_size) {
255 		kprintf("%s, Bogus call, have %d want %d\n", __func__,
256 				h->size, new_size);
257 		return 0;
258 	}
259 
260 	new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
261 	p = kmalloc(new_size * sizeof(*p), M_DUMMYNET, M_WAITOK | M_ZERO);
262 	if (h->size > 0) {
263 		bcopy(h->p, p, h->size * sizeof(*p));
264 		kfree(h->p, M_DUMMYNET);
265 	}
266 	h->p = p;
267 	h->size = new_size;
268 	return 0;
269 }
270 
271 /*
272  * Insert element in heap. Normally, p != NULL, we insert p in
273  * a new position and bubble up.  If p == NULL, then the element is
274  * already in place, and key is the position where to start the
275  * bubble-up.
276  * Returns 1 on failure (cannot allocate new heap entry)
277  *
278  * If offset > 0 the position (index, int) of the element in the heap is
279  * also stored in the element itself at the given offset in bytes.
280  */
281 #define SET_OFFSET(heap, node) \
282 	if (heap->offset > 0) \
283 	*((int *)((char *)(heap->p[node].object) + heap->offset)) = node;
284 
285 /*
286  * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
287  */
288 #define RESET_OFFSET(heap, node) \
289 	if (heap->offset > 0) \
290 	*((int *)((char *)(heap->p[node].object) + heap->offset)) = -1;
291 
292 static int
293 heap_insert(struct dn_heap *h, dn_key key1, void *p)
294 {
295 	int son;
296 
297 	if (p == NULL) {	/* Data already there, set starting point */
298 		son = key1;
299 	} else {		/* Insert new element at the end, possibly resize */
300 		son = h->elements;
301 		if (son == h->size) { /* Need resize... */
302 			if (heap_init(h, h->elements + 1))
303 				return 1; /* Failure... */
304 		}
305 		h->p[son].object = p;
306 		h->p[son].key = key1;
307 		h->elements++;
308 	}
309 
310 	while (son > 0) {	/* Bubble up */
311 		int father = HEAP_FATHER(son);
312 		struct dn_heap_entry tmp;
313 
314 		if (DN_KEY_LT(h->p[father].key, h->p[son].key))
315 			break; /* Found right position */
316 
317 		/* 'son' smaller than 'father', swap and repeat */
318 		HEAP_SWAP(h->p[son], h->p[father], tmp);
319 		SET_OFFSET(h, son);
320 		son = father;
321 	}
322 	SET_OFFSET(h, son);
323 	return 0;
324 }
325 
326 /*
327  * Remove top element from heap, or obj if obj != NULL
328  */
329 static void
330 heap_extract(struct dn_heap *h, void *obj)
331 {
332 	int child, father, max = h->elements - 1;
333 
334 	if (max < 0) {
335 		kprintf("warning, extract from empty heap 0x%p\n", h);
336 		return;
337 	}
338 
339 	father = 0; /* Default: move up smallest child */
340 	if (obj != NULL) { /* Extract specific element, index is at offset */
341 		if (h->offset <= 0)
342 			panic("%s from middle not supported on this heap!!!", __func__);
343 
344 		father = *((int *)((char *)obj + h->offset));
345 		if (father < 0 || father >= h->elements) {
346 			panic("%s father %d out of bound 0..%d", __func__,
347 					father, h->elements);
348 		}
349 	}
350 	RESET_OFFSET(h, father);
351 
352 	child = HEAP_LEFT(father);		/* Left child */
353 	while (child <= max) {		/* Valid entry */
354 		if (child != max && DN_KEY_LT(h->p[child + 1].key, h->p[child].key))
355 			child = child + 1;		/* Take right child, otherwise left */
356 		h->p[father] = h->p[child];
357 		SET_OFFSET(h, father);
358 		father = child;
359 		child = HEAP_LEFT(child);	/* Left child for next loop */
360 	}
361 	h->elements--;
362 	if (father != max) {
363 		/*
364 		 * Fill hole with last entry and bubble up, reusing the insert code
365 		 */
366 		h->p[father] = h->p[max];
367 		heap_insert(h, father, NULL);	/* This one cannot fail */
368 	}
369 }
370 
371 /*
372  * heapify() will reorganize data inside an array to maintain the
373  * heap property.  It is needed when we delete a bunch of entries.
374  */
375 static void
376 heapify(struct dn_heap *h)
377 {
378 	int i;
379 
380 	for (i = 0; i < h->elements; i++)
381 		heap_insert(h, i , NULL);
382 }
383 
384 /*
385  * Cleanup the heap and free data structure
386  */
387 static void
388 heap_free(struct dn_heap *h)
389 {
390 	if (h->size > 0)
391 		kfree(h->p, M_DUMMYNET);
392 	bzero(h, sizeof(*h));
393 }
394 
395 /*
396  * --- End of heap management functions ---
397  */
398 
399 /*
400  * Scheduler functions:
401  *
402  * transmit_event() is called when the delay-line needs to enter
403  * the scheduler, either because of existing pkts getting ready,
404  * or new packets entering the queue.  The event handled is the delivery
405  * time of the packet.
406  *
407  * ready_event() does something similar with fixed-rate queues, and the
408  * event handled is the finish time of the head pkt.
409  *
410  * ready_event_wfq() does something similar with WF2Q queues, and the
411  * event handled is the start time of the head pkt.
412  *
413  * In all cases, we make sure that the data structures are consistent
414  * before passing pkts out, because this might trigger recursive
415  * invocations of the procedures.
416  */
417 static void
418 transmit_event(struct dn_pipe *pipe)
419 {
420 	struct dn_pkt *pkt;
421 
422 	while ((pkt = TAILQ_FIRST(&pipe->p_queue)) &&
423 			DN_KEY_LEQ(pkt->output_time, curr_time)) {
424 		TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
425 		ip_dn_packet_redispatch(pkt);
426 	}
427 
428 	/*
429 	 * If there are leftover packets, put into the heap for next event
430 	 */
431 	if ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
432 		/*
433 		 * XXX should check errors on heap_insert, by draining the
434 		 * whole pipe and hoping in the future we are more successful
435 		 */
436 		heap_insert(&extract_heap, pkt->output_time, pipe);
437 	}
438 }
439 
440 /*
441  * The following macro computes how many ticks we have to wait
442  * before being able to transmit a packet. The credit is taken from
443  * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
444  */
445 #define SET_TICKS(pkt, q, p)	\
446 	(pkt->dn_m->m_pkthdr.len*8*dn_hz - (q)->numbytes + p->bandwidth - 1 ) / \
447 		p->bandwidth;
448 
449 /*
450  * Extract pkt from queue, compute output time (could be now)
451  * and put into delay line (p_queue)
452  */
453 static void
454 move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q,
455 	 struct dn_pipe *p, int len)
456 {
457 	TAILQ_REMOVE(&q->queue, pkt, dn_next);
458 	q->len--;
459 	q->len_bytes -= len;
460 
461 	pkt->output_time = curr_time + p->delay;
462 
463 	TAILQ_INSERT_TAIL(&p->p_queue, pkt, dn_next);
464 }
465 
466 /*
467  * ready_event() is invoked every time the queue must enter the
468  * scheduler, either because the first packet arrives, or because
469  * a previously scheduled event fired.
470  * On invokation, drain as many pkts as possible (could be 0) and then
471  * if there are leftover packets reinsert the pkt in the scheduler.
472  */
473 static void
474 ready_event(struct dn_flow_queue *q)
475 {
476 	struct dn_pkt *pkt;
477 	struct dn_pipe *p = q->fs->pipe;
478 	int p_was_empty;
479 
480 	if (p == NULL) {
481 		kprintf("ready_event- pipe is gone\n");
482 		return;
483 	}
484 	p_was_empty = TAILQ_EMPTY(&p->p_queue);
485 
486 	/*
487 	 * Schedule fixed-rate queues linked to this pipe:
488 	 * Account for the bw accumulated since last scheduling, then
489 	 * drain as many pkts as allowed by q->numbytes and move to
490 	 * the delay line (in p) computing output time.
491 	 * bandwidth==0 (no limit) means we can drain the whole queue,
492 	 * setting len_scaled = 0 does the job.
493 	 */
494 	q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
495 	while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
496 		int len = pkt->dn_m->m_pkthdr.len;
497 		int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
498 
499 		if (len_scaled > q->numbytes)
500 			break;
501 		q->numbytes -= len_scaled;
502 		move_pkt(pkt, q, p, len);
503 	}
504 
505 	/*
506 	 * If we have more packets queued, schedule next ready event
507 	 * (can only occur when bandwidth != 0, otherwise we would have
508 	 * flushed the whole queue in the previous loop).
509 	 * To this purpose we record the current time and compute how many
510 	 * ticks to go for the finish time of the packet.
511 	 */
512 	if ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
513 		/* This implies bandwidth != 0 */
514 		dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
515 
516 		q->sched_time = curr_time;
517 
518 		/*
519 		 * XXX should check errors on heap_insert, and drain the whole
520 		 * queue on error hoping next time we are luckier.
521 		 */
522 		heap_insert(&ready_heap, curr_time + t, q);
523 	} else {	/* RED needs to know when the queue becomes empty */
524 		q->q_time = curr_time;
525 		q->numbytes = 0;
526 	}
527 
528 	/*
529 	 * If the delay line was empty call transmit_event(p) now.
530 	 * Otherwise, the scheduler will take care of it.
531 	 */
532 	if (p_was_empty)
533 		transmit_event(p);
534 }
535 
536 /*
537  * Called when we can transmit packets on WF2Q queues.  Take pkts out of
538  * the queues at their start time, and enqueue into the delay line.
539  * Packets are drained until p->numbytes < 0.  As long as
540  * len_scaled >= p->numbytes, the packet goes into the delay line
541  * with a deadline p->delay.  For the last packet, if p->numbytes < 0,
542  * there is an additional delay.
543  */
544 static void
545 ready_event_wfq(struct dn_pipe *p)
546 {
547 	int p_was_empty = TAILQ_EMPTY(&p->p_queue);
548 	struct dn_heap *sch = &p->scheduler_heap;
549 	struct dn_heap *neh = &p->not_eligible_heap;
550 
551 	p->numbytes += (curr_time - p->sched_time) * p->bandwidth;
552 
553 	/*
554 	 * While we have backlogged traffic AND credit, we need to do
555 	 * something on the queue.
556 	 */
557 	while (p->numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
558 		if (sch->elements > 0) { /* Have some eligible pkts to send out */
559 			struct dn_flow_queue *q = sch->p[0].object;
560 			struct dn_pkt *pkt = TAILQ_FIRST(&q->queue);
561 			struct dn_flow_set *fs = q->fs;
562 			uint64_t len = pkt->dn_m->m_pkthdr.len;
563 			int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
564 
565 			heap_extract(sch, NULL);	/* Remove queue from heap */
566 			p->numbytes -= len_scaled;
567 			move_pkt(pkt, q, p, len);
568 
569 			p->V += (len << MY_M) / p->sum;	/* Update V */
570 			q->S = q->F;			/* Update start time */
571 
572 			if (q->len == 0) {	/* Flow not backlogged any more */
573 				fs->backlogged--;
574 				heap_insert(&p->idle_heap, q->F, q);
575 			} else {		/* Still backlogged */
576 				/*
577 				 * Update F and position in backlogged queue, then
578 				 * put flow in not_eligible_heap (we will fix this later).
579 				 */
580 				len = TAILQ_FIRST(&q->queue)->dn_m->m_pkthdr.len;
581 				q->F += (len << MY_M) / (uint64_t)fs->weight;
582 				if (DN_KEY_LEQ(q->S, p->V))
583 					heap_insert(neh, q->S, q);
584 				else
585 					heap_insert(sch, q->F, q);
586 			}
587 		}
588 
589 		/*
590 		 * Now compute V = max(V, min(S_i)).  Remember that all elements in
591 		 * sch have by definition S_i <= V so if sch is not empty, V is surely
592 		 * the max and we must not update it.  Conversely, if sch is empty
593 		 * we only need to look at neh.
594 		 */
595 		if (sch->elements == 0 && neh->elements > 0)
596 			p->V = MAX64(p->V, neh->p[0].key);
597 
598 		/*
599 		 * Move from neh to sch any packets that have become eligible
600 		 */
601 		while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
602 			struct dn_flow_queue *q = neh->p[0].object;
603 
604 			heap_extract(neh, NULL);
605 			heap_insert(sch, q->F, q);
606 		}
607 	}
608 
609 	if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0 &&
610 			p->idle_heap.elements > 0) {
611 		/*
612 		 * No traffic and no events scheduled.  We can get rid of idle-heap.
613 		 */
614 		int i;
615 
616 		for (i = 0; i < p->idle_heap.elements; i++) {
617 			struct dn_flow_queue *q = p->idle_heap.p[i].object;
618 
619 			q->F = 0;
620 			q->S = q->F + 1;
621 		}
622 		p->sum = 0;
623 		p->V = 0;
624 		p->idle_heap.elements = 0;
625 	}
626 
627 	/*
628 	 * If we are getting clocks from dummynet and if we are under credit,
629 	 * schedule the next ready event.
630 	 * Also fix the delivery time of the last packet.
631 	 */
632 	if (p->numbytes < 0) { /* This implies bandwidth>0 */
633 		dn_key t = 0; /* Number of ticks i have to wait */
634 
635 		if (p->bandwidth > 0)
636 			t = (p->bandwidth - 1 - p->numbytes) / p->bandwidth;
637 		TAILQ_LAST(&p->p_queue, dn_pkt_queue)->output_time += t;
638 		p->sched_time = curr_time;
639 
640 		/*
641 		 * XXX should check errors on heap_insert, and drain the whole
642 		 * queue on error hoping next time we are luckier.
643 		 */
644 		heap_insert(&wfq_ready_heap, curr_time + t, p);
645 	}
646 
647 	/*
648 	 * If the delay line was empty call transmit_event(p) now.
649 	 * Otherwise, the scheduler will take care of it.
650 	 */
651 	if (p_was_empty)
652 		transmit_event(p);
653 }
654 
655 static void
656 dn_expire_pipe_cb(struct dn_pipe *pipe, void *dummy __unused)
657 {
658 	if (pipe->idle_heap.elements > 0 &&
659 			DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
660 		struct dn_flow_queue *q = pipe->idle_heap.p[0].object;
661 
662 		heap_extract(&pipe->idle_heap, NULL);
663 		q->S = q->F + 1; /* Mark timestamp as invalid */
664 		pipe->sum -= q->fs->weight;
665 	}
666 }
667 
668 /*
669  * This is called once per tick, or dn_hz times per second.  It is used to
670  * increment the current tick counter and schedule expired events.
671  */
672 static void
673 dummynet(netmsg_t msg)
674 {
675 	void *p;
676 	struct dn_heap *h;
677 	struct dn_heap *heaps[3];
678 	int i;
679 
680 	heaps[0] = &ready_heap;		/* Fixed-rate queues */
681 	heaps[1] = &wfq_ready_heap;		/* WF2Q queues */
682 	heaps[2] = &extract_heap;		/* Delay line */
683 
684 	/* Reply ASAP */
685 	crit_enter();
686 	lwkt_replymsg(&msg->lmsg, 0);
687 	crit_exit();
688 
689 	curr_time++;
690 	for (i = 0; i < 3; i++) {
691 		h = heaps[i];
692 		while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
693 			if (h->p[0].key > curr_time) {
694 				kprintf("-- dummynet: warning, heap %d is %d ticks late\n",
695 						i, (int)(curr_time - h->p[0].key));
696 			}
697 
698 			p = h->p[0].object;		/* Store a copy before heap_extract */
699 			heap_extract(h, NULL);	/* Need to extract before processing */
700 
701 			if (i == 0)
702 				ready_event(p);
703 			else if (i == 1)
704 				ready_event_wfq(p);
705 			else
706 				transmit_event(p);
707 		}
708 	}
709 
710 	/* Sweep pipes trying to expire idle flow_queues */
711 	dn_iterate_pipe(dn_expire_pipe_cb, NULL);
712 }
713 
714 /*
715  * Unconditionally expire empty queues in case of shortage.
716  * Returns the number of queues freed.
717  */
718 static int
719 expire_queues(struct dn_flow_set *fs)
720 {
721 	int i, initial_elements = fs->rq_elements;
722 
723 	if (fs->last_expired == time_uptime)
724 		return 0;
725 
726 	fs->last_expired = time_uptime;
727 
728 	for (i = 0; i <= fs->rq_size; i++) { /* Last one is overflow */
729 		struct dn_flow_queue *q, *qn;
730 
731 		LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
732 			if (!TAILQ_EMPTY(&q->queue) || q->S != q->F + 1)
733 				continue;
734 
735 			/*
736 			 * Entry is idle, expire it
737 			 */
738 			LIST_REMOVE(q, q_link);
739 			kfree(q, M_DUMMYNET);
740 
741 			KASSERT(fs->rq_elements > 0,
742 					("invalid rq_elements %d", fs->rq_elements));
743 			fs->rq_elements--;
744 		}
745 	}
746 	return initial_elements - fs->rq_elements;
747 }
748 
749 /*
750  * If room, create a new queue and put at head of slot i;
751  * otherwise, create or use the default queue.
752  */
753 static struct dn_flow_queue *
754 create_queue(struct dn_flow_set *fs, int i)
755 {
756 	struct dn_flow_queue *q;
757 
758 	if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
759 			expire_queues(fs) == 0) {
760 		/*
761 		 * No way to get room, use or create overflow queue.
762 		 */
763 		i = fs->rq_size;
764 		if (!LIST_EMPTY(&fs->rq[i]))
765 			return LIST_FIRST(&fs->rq[i]);
766 	}
767 
768 	q = kmalloc(sizeof(*q), M_DUMMYNET, M_INTWAIT | M_NULLOK | M_ZERO);
769 	if (q == NULL)
770 		return NULL;
771 
772 	q->fs = fs;
773 	q->hash_slot = i;
774 	q->S = q->F + 1;   /* hack - mark timestamp as invalid */
775 	TAILQ_INIT(&q->queue);
776 
777 	LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
778 	fs->rq_elements++;
779 
780 	return q;
781 }
782 
783 /*
784  * Given a flow_set and a pkt in last_pkt, find a matching queue
785  * after appropriate masking. The queue is moved to front
786  * so that further searches take less time.
787  */
788 static struct dn_flow_queue *
789 find_queue(struct dn_flow_set *fs, struct dn_flow_id *id)
790 {
791 	struct dn_flow_queue *q;
792 	int i = 0;
793 
794 	if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
795 		q = LIST_FIRST(&fs->rq[0]);
796 	} else {
797 		struct dn_flow_queue *qn;
798 
799 		/* First, do the masking */
800 		id->fid_dst_ip &= fs->flow_mask.fid_dst_ip;
801 		id->fid_src_ip &= fs->flow_mask.fid_src_ip;
802 		id->fid_dst_port &= fs->flow_mask.fid_dst_port;
803 		id->fid_src_port &= fs->flow_mask.fid_src_port;
804 		id->fid_proto &= fs->flow_mask.fid_proto;
805 		id->fid_flags = 0; /* we don't care about this one */
806 
807 		/* Then, hash function */
808 		i = ((id->fid_dst_ip) & 0xffff) ^
809 			((id->fid_dst_ip >> 15) & 0xffff) ^
810 			((id->fid_src_ip << 1) & 0xffff) ^
811 			((id->fid_src_ip >> 16 ) & 0xffff) ^
812 			(id->fid_dst_port << 1) ^ (id->fid_src_port) ^
813 			(id->fid_proto);
814 		i = i % fs->rq_size;
815 
816 		/*
817 		 * Finally, scan the current list for a match and
818 		 * expire idle flow queues
819 		 */
820 		searches++;
821 		LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
822 			search_steps++;
823 			if (id->fid_dst_ip == q->id.fid_dst_ip &&
824 					id->fid_src_ip == q->id.fid_src_ip &&
825 					id->fid_dst_port == q->id.fid_dst_port &&
826 					id->fid_src_port == q->id.fid_src_port &&
827 					id->fid_proto == q->id.fid_proto &&
828 					id->fid_flags == q->id.fid_flags) {
829 				break; /* Found */
830 			} else if (pipe_expire && TAILQ_EMPTY(&q->queue) &&
831 					q->S == q->F + 1) {
832 				/*
833 				 * Entry is idle and not in any heap, expire it
834 				 */
835 				LIST_REMOVE(q, q_link);
836 				kfree(q, M_DUMMYNET);
837 
838 				KASSERT(fs->rq_elements > 0,
839 						("invalid rq_elements %d", fs->rq_elements));
840 				fs->rq_elements--;
841 			}
842 		}
843 		if (q && LIST_FIRST(&fs->rq[i]) != q) { /* Found and not in front */
844 			LIST_REMOVE(q, q_link);
845 			LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
846 		}
847 	}
848 	if (q == NULL) {	/* No match, need to allocate a new entry */
849 		q = create_queue(fs, i);
850 		if (q != NULL)
851 			q->id = *id;
852 	}
853 	return q;
854 }
855 
856 static int
857 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
858 {
859 	/*
860 	 * RED algorithm
861 	 *
862 	 * RED calculates the average queue size (avg) using a low-pass filter
863 	 * with an exponential weighted (w_q) moving average:
864 	 * 	avg  <-  (1-w_q) * avg + w_q * q_size
865 	 * where q_size is the queue length (measured in bytes or * packets).
866 	 *
867 	 * If q_size == 0, we compute the idle time for the link, and set
868 	 *	avg = (1 - w_q)^(idle/s)
869 	 * where s is the time needed for transmitting a medium-sized packet.
870 	 *
871 	 * Now, if avg < min_th the packet is enqueued.
872 	 * If avg > max_th the packet is dropped. Otherwise, the packet is
873 	 * dropped with probability P function of avg.
874 	 */
875 
876 	int64_t p_b = 0;
877 	u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
878 
879 	DPRINTF("\n%d q: %2u ", (int)curr_time, q_size);
880 
881 	/* Average queue size estimation */
882 	if (q_size != 0) {
883 		/*
884 		 * Queue is not empty, avg <- avg + (q_size - avg) * w_q
885 		 */
886 		int diff = SCALE(q_size) - q->avg;
887 		int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
888 
889 		q->avg += (int)v;
890 	} else {
891 		/*
892 		 * Queue is empty, find for how long the queue has been
893 		 * empty and use a lookup table for computing
894 		 * (1 - * w_q)^(idle_time/s) where s is the time to send a
895 		 * (small) packet.
896 		 * XXX check wraps...
897 		 */
898 		if (q->avg) {
899 			u_int t = (curr_time - q->q_time) / fs->lookup_step;
900 
901 			q->avg = (t < fs->lookup_depth) ?
902 				SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
903 		}
904 	}
905 	DPRINTF("avg: %u ", SCALE_VAL(q->avg));
906 
907 	/* Should i drop? */
908 
909 	if (q->avg < fs->min_th) {
910 		/* Accept packet */
911 		q->count = -1;
912 		return 0;
913 	}
914 
915 	if (q->avg >= fs->max_th) { /* Average queue >=  Max threshold */
916 		if (fs->flags_fs & DN_IS_GENTLE_RED) {
917 			/*
918 			 * According to Gentle-RED, if avg is greater than max_th the
919 			 * packet is dropped with a probability
920 			 *	p_b = c_3 * avg - c_4
921 			 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
922 			 */
923 			p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4;
924 		} else {
925 			q->count = -1;
926 			kprintf("- drop\n");
927 			return 1;
928 		}
929 	} else if (q->avg > fs->min_th) {
930 		/*
931 		 * We compute p_b using the linear dropping function p_b = c_1 *
932 		 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
933 		 * max_p * min_th / (max_th - min_th)
934 		 */
935 		p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
936 	}
937 	if (fs->flags_fs & DN_QSIZE_IS_BYTES)
938 		p_b = (p_b * len) / fs->max_pkt_size;
939 
940 	if (++q->count == 0) {
941 		q->random = krandom() & 0xffff;
942 	} else {
943 		/*
944 		 * q->count counts packets arrived since last drop, so a greater
945 		 * value of q->count means a greater packet drop probability.
946 		 */
947 		if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
948 			q->count = 0;
949 			DPRINTF("%s", "- red drop");
950 			/* After a drop we calculate a new random value */
951 			q->random = krandom() & 0xffff;
952 			return 1;	/* Drop */
953 		}
954 	}
955 	/* End of RED algorithm */
956 	return 0; /* Accept */
957 }
958 
959 static void
960 dn_iterate_pipe(dn_pipe_iter_t func, void *arg)
961 {
962 	int i;
963 
964 	for (i = 0; i < DN_NR_HASH_MAX; ++i) {
965 		struct dn_pipe_head *pipe_hdr = &pipe_table[i];
966 		struct dn_pipe *pipe, *pipe_next;
967 
968 		LIST_FOREACH_MUTABLE(pipe, pipe_hdr, p_link, pipe_next)
969 			func(pipe, arg);
970 	}
971 }
972 
973 static void
974 dn_iterate_flowset(dn_flowset_iter_t func, void *arg)
975 {
976 	int i;
977 
978 	for (i = 0; i < DN_NR_HASH_MAX; ++i) {
979 		struct dn_flowset_head *fs_hdr = &flowset_table[i];
980 		struct dn_flow_set *fs, *fs_next;
981 
982 		LIST_FOREACH_MUTABLE(fs, fs_hdr, fs_link, fs_next)
983 			func(fs, arg);
984 	}
985 }
986 
987 static struct dn_pipe *
988 dn_find_pipe(int pipe_nr)
989 {
990 	struct dn_pipe_head *pipe_hdr;
991 	struct dn_pipe *p;
992 
993 	pipe_hdr = &pipe_table[DN_NR_HASH(pipe_nr)];
994 	LIST_FOREACH(p, pipe_hdr, p_link) {
995 		if (p->pipe_nr == pipe_nr)
996 			break;
997 	}
998 	return p;
999 }
1000 
1001 static struct dn_flow_set *
1002 dn_find_flowset(int fs_nr)
1003 {
1004 	struct dn_flowset_head *fs_hdr;
1005 	struct dn_flow_set *fs;
1006 
1007 	fs_hdr = &flowset_table[DN_NR_HASH(fs_nr)];
1008 	LIST_FOREACH(fs, fs_hdr, fs_link) {
1009 		if (fs->fs_nr == fs_nr)
1010 			break;
1011 	}
1012 	return fs;
1013 }
1014 
1015 static struct dn_flow_set *
1016 dn_locate_flowset(int pipe_nr, int is_pipe)
1017 {
1018 	struct dn_flow_set *fs = NULL;
1019 
1020 	if (!is_pipe) {
1021 		fs = dn_find_flowset(pipe_nr);
1022 	} else {
1023 		struct dn_pipe *p;
1024 
1025 		p = dn_find_pipe(pipe_nr);
1026 		if (p != NULL)
1027 			fs = &p->fs;
1028 	}
1029 	return fs;
1030 }
1031 
1032 /*
1033  * Dummynet hook for packets.  Below 'pipe' is a pipe or a queue
1034  * depending on whether WF2Q or fixed bw is used.
1035  *
1036  * pipe_nr	pipe or queue the packet is destined for.
1037  * dir		where shall we send the packet after dummynet.
1038  * m		the mbuf with the packet
1039  * fwa->oif	the 'ifp' parameter from the caller.
1040  *		NULL in ip_input, destination interface in ip_output
1041  * fwa->ro	route parameter (only used in ip_output, NULL otherwise)
1042  * fwa->dst	destination address, only used by ip_output
1043  * fwa->rule	matching rule, in case of multiple passes
1044  * fwa->flags	flags from the caller, only used in ip_output
1045  */
1046 static int
1047 dummynet_io(struct mbuf *m)
1048 {
1049 	struct dn_pkt *pkt;
1050 	struct m_tag *tag;
1051 	struct dn_flow_set *fs;
1052 	struct dn_pipe *pipe;
1053 	uint64_t len = m->m_pkthdr.len;
1054 	struct dn_flow_queue *q = NULL;
1055 	int is_pipe, pipe_nr;
1056 
1057 	tag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1058 	pkt = m_tag_data(tag);
1059 
1060 	is_pipe = pkt->dn_flags & DN_FLAGS_IS_PIPE;
1061 	pipe_nr = pkt->pipe_nr;
1062 
1063 	/*
1064 	 * This is a dummynet rule, so we expect a O_PIPE or O_QUEUE rule
1065 	 */
1066 	fs = dn_locate_flowset(pipe_nr, is_pipe);
1067 	if (fs == NULL)
1068 		goto dropit;	/* This queue/pipe does not exist! */
1069 
1070 	pipe = fs->pipe;
1071 	if (pipe == NULL) { /* Must be a queue, try find a matching pipe */
1072 		pipe = dn_find_pipe(fs->parent_nr);
1073 		if (pipe != NULL) {
1074 			fs->pipe = pipe;
1075 		} else {
1076 			kprintf("No pipe %d for queue %d, drop pkt\n",
1077 					fs->parent_nr, fs->fs_nr);
1078 			goto dropit;
1079 		}
1080 	}
1081 
1082 	q = find_queue(fs, &pkt->id);
1083 	if (q == NULL)
1084 		goto dropit;	/* Cannot allocate queue */
1085 
1086 	/*
1087 	 * Update statistics, then check reasons to drop pkt
1088 	 */
1089 	q->tot_bytes += len;
1090 	q->tot_pkts++;
1091 
1092 	if (fs->plr && krandom() < fs->plr)
1093 		goto dropit;	/* Random pkt drop */
1094 
1095 	if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1096 		if (q->len_bytes > fs->qsize)
1097 			goto dropit;	/* Queue size overflow */
1098 	} else {
1099 		if (q->len >= fs->qsize)
1100 			goto dropit;	/* Queue count overflow */
1101 	}
1102 
1103 	if ((fs->flags_fs & DN_IS_RED) && red_drops(fs, q, len))
1104 		goto dropit;
1105 
1106 	TAILQ_INSERT_TAIL(&q->queue, pkt, dn_next);
1107 	q->len++;
1108 	q->len_bytes += len;
1109 
1110 	if (TAILQ_FIRST(&q->queue) != pkt)	/* Flow was not idle, we are done */
1111 		goto done;
1112 
1113 	/*
1114 	 * If we reach this point the flow was previously idle, so we need
1115 	 * to schedule it.  This involves different actions for fixed-rate
1116 	 * or WF2Q queues.
1117 	 */
1118 	if (is_pipe) {
1119 		/*
1120 		 * Fixed-rate queue: just insert into the ready_heap.
1121 		 */
1122 		dn_key t = 0;
1123 
1124 		if (pipe->bandwidth)
1125 			t = SET_TICKS(pkt, q, pipe);
1126 
1127 		q->sched_time = curr_time;
1128 		if (t == 0)	/* Must process it now */
1129 			ready_event(q);
1130 		else
1131 			heap_insert(&ready_heap, curr_time + t, q);
1132 	} else {
1133 		/*
1134 		 * WF2Q:
1135 		 * First, compute start time S: if the flow was idle (S=F+1)
1136 		 * set S to the virtual time V for the controlling pipe, and update
1137 		 * the sum of weights for the pipe; otherwise, remove flow from
1138 		 * idle_heap and set S to max(F, V).
1139 		 * Second, compute finish time F = S + len/weight.
1140 		 * Third, if pipe was idle, update V = max(S, V).
1141 		 * Fourth, count one more backlogged flow.
1142 		 */
1143 		if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid */
1144 			q->S = pipe->V;
1145 			pipe->sum += fs->weight; /* Add weight of new queue */
1146 		} else {
1147 			heap_extract(&pipe->idle_heap, q);
1148 			q->S = MAX64(q->F, pipe->V);
1149 		}
1150 		q->F = q->S + (len << MY_M) / (uint64_t)fs->weight;
1151 
1152 		if (pipe->not_eligible_heap.elements == 0 &&
1153 				pipe->scheduler_heap.elements == 0)
1154 			pipe->V = MAX64(q->S, pipe->V);
1155 
1156 		fs->backlogged++;
1157 
1158 		/*
1159 		 * Look at eligibility.  A flow is not eligibile if S>V (when
1160 		 * this happens, it means that there is some other flow already
1161 		 * scheduled for the same pipe, so the scheduler_heap cannot be
1162 		 * empty).  If the flow is not eligible we just store it in the
1163 		 * not_eligible_heap.  Otherwise, we store in the scheduler_heap
1164 		 * and possibly invoke ready_event_wfq() right now if there is
1165 		 * leftover credit.
1166 		 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1167 		 * and for all flows in not_eligible_heap (NEH), S_i > V.
1168 		 * So when we need to compute max(V, min(S_i)) forall i in SCH+NEH,
1169 		 * we only need to look into NEH.
1170 		 */
1171 		if (DN_KEY_GT(q->S, pipe->V)) {	/* Not eligible */
1172 			if (pipe->scheduler_heap.elements == 0)
1173 				kprintf("++ ouch! not eligible but empty scheduler!\n");
1174 			heap_insert(&pipe->not_eligible_heap, q->S, q);
1175 		} else {
1176 			heap_insert(&pipe->scheduler_heap, q->F, q);
1177 			if (pipe->numbytes >= 0) {	/* Pipe is idle */
1178 				if (pipe->scheduler_heap.elements != 1)
1179 					kprintf("*** OUCH! pipe should have been idle!\n");
1180 				DPRINTF("Waking up pipe %d at %d\n",
1181 						pipe->pipe_nr, (int)(q->F >> MY_M));
1182 				pipe->sched_time = curr_time;
1183 				ready_event_wfq(pipe);
1184 			}
1185 		}
1186 	}
1187 done:
1188 	return 0;
1189 
1190 dropit:
1191 	if (q)
1192 		q->drops++;
1193 	return ENOBUFS;
1194 }
1195 
1196 /*
1197  * Dispose all packets and flow_queues on a flow_set.
1198  * If all=1, also remove red lookup table and other storage,
1199  * including the descriptor itself.
1200  * For the one in dn_pipe MUST also cleanup ready_heap...
1201  */
1202 static void
1203 purge_flow_set(struct dn_flow_set *fs, int all)
1204 {
1205 	int i;
1206 #ifdef INVARIANTS
1207 	int rq_elements = 0;
1208 #endif
1209 
1210 	for (i = 0; i <= fs->rq_size; i++) {
1211 		struct dn_flow_queue *q;
1212 
1213 		while ((q = LIST_FIRST(&fs->rq[i])) != NULL) {
1214 			struct dn_pkt *pkt;
1215 
1216 			while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
1217 				TAILQ_REMOVE(&q->queue, pkt, dn_next);
1218 				ip_dn_packet_free(pkt);
1219 			}
1220 
1221 			LIST_REMOVE(q, q_link);
1222 			kfree(q, M_DUMMYNET);
1223 
1224 #ifdef INVARIANTS
1225 			rq_elements++;
1226 #endif
1227 		}
1228 	}
1229 	KASSERT(rq_elements == fs->rq_elements,
1230 			("# rq elements mismatch, freed %d, total %d",
1231 			 rq_elements, fs->rq_elements));
1232 	fs->rq_elements = 0;
1233 
1234 	if (all) {
1235 		/* RED - free lookup table */
1236 		if (fs->w_q_lookup)
1237 			kfree(fs->w_q_lookup, M_DUMMYNET);
1238 
1239 		if (fs->rq)
1240 			kfree(fs->rq, M_DUMMYNET);
1241 
1242 		/*
1243 		 * If this fs is not part of a pipe, free it
1244 		 *
1245 		 * fs->pipe == NULL could happen, if 'fs' is a WF2Q and
1246 		 * - No packet belongs to that flow set is delivered by
1247 		 *   dummynet_io(), i.e. parent pipe is not installed yet.
1248 		 * - Parent pipe is deleted.
1249 		 */
1250 		if (fs->pipe == NULL || (fs->pipe && fs != &fs->pipe->fs))
1251 			kfree(fs, M_DUMMYNET);
1252 	}
1253 }
1254 
1255 /*
1256  * Dispose all packets queued on a pipe (not a flow_set).
1257  * Also free all resources associated to a pipe, which is about
1258  * to be deleted.
1259  */
1260 static void
1261 purge_pipe(struct dn_pipe *pipe)
1262 {
1263 	struct dn_pkt *pkt;
1264 
1265 	purge_flow_set(&pipe->fs, 1);
1266 
1267 	while ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
1268 		TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
1269 		ip_dn_packet_free(pkt);
1270 	}
1271 
1272 	heap_free(&pipe->scheduler_heap);
1273 	heap_free(&pipe->not_eligible_heap);
1274 	heap_free(&pipe->idle_heap);
1275 }
1276 
1277 /*
1278  * Delete all pipes and heaps returning memory.
1279  */
1280 static void
1281 dummynet_flush(void)
1282 {
1283 	struct dn_pipe_head pipe_list;
1284 	struct dn_flowset_head fs_list;
1285 	struct dn_pipe *p;
1286 	struct dn_flow_set *fs;
1287 	int i;
1288 
1289 	/*
1290 	 * Prevent future matches...
1291 	 */
1292 	LIST_INIT(&pipe_list);
1293 	for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1294 		struct dn_pipe_head *pipe_hdr = &pipe_table[i];
1295 
1296 		while ((p = LIST_FIRST(pipe_hdr)) != NULL) {
1297 			LIST_REMOVE(p, p_link);
1298 			LIST_INSERT_HEAD(&pipe_list, p, p_link);
1299 		}
1300 	}
1301 
1302 	LIST_INIT(&fs_list);
1303 	for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1304 		struct dn_flowset_head *fs_hdr = &flowset_table[i];
1305 
1306 		while ((fs = LIST_FIRST(fs_hdr)) != NULL) {
1307 			LIST_REMOVE(fs, fs_link);
1308 			LIST_INSERT_HEAD(&fs_list, fs, fs_link);
1309 		}
1310 	}
1311 
1312 	/* Free heaps so we don't have unwanted events */
1313 	heap_free(&ready_heap);
1314 	heap_free(&wfq_ready_heap);
1315 	heap_free(&extract_heap);
1316 
1317 	/*
1318 	 * Now purge all queued pkts and delete all pipes
1319 	 */
1320 	/* Scan and purge all flow_sets. */
1321 	while ((fs = LIST_FIRST(&fs_list)) != NULL) {
1322 		LIST_REMOVE(fs, fs_link);
1323 		purge_flow_set(fs, 1);
1324 	}
1325 
1326 	while ((p = LIST_FIRST(&pipe_list)) != NULL) {
1327 		LIST_REMOVE(p, p_link);
1328 		purge_pipe(p);
1329 		kfree(p, M_DUMMYNET);
1330 	}
1331 }
1332 
1333 /*
1334  * setup RED parameters
1335  */
1336 static int
1337 config_red(const struct dn_ioc_flowset *ioc_fs, struct dn_flow_set *x)
1338 {
1339 	int i;
1340 
1341 	x->w_q = ioc_fs->w_q;
1342 	x->min_th = SCALE(ioc_fs->min_th);
1343 	x->max_th = SCALE(ioc_fs->max_th);
1344 	x->max_p = ioc_fs->max_p;
1345 
1346 	x->c_1 = ioc_fs->max_p / (ioc_fs->max_th - ioc_fs->min_th);
1347 	x->c_2 = SCALE_MUL(x->c_1, SCALE(ioc_fs->min_th));
1348 	if (x->flags_fs & DN_IS_GENTLE_RED) {
1349 		x->c_3 = (SCALE(1) - ioc_fs->max_p) / ioc_fs->max_th;
1350 		x->c_4 = (SCALE(1) - 2 * ioc_fs->max_p);
1351 	}
1352 
1353 	/* If the lookup table already exist, free and create it again */
1354 	if (x->w_q_lookup) {
1355 		kfree(x->w_q_lookup, M_DUMMYNET);
1356 		x->w_q_lookup = NULL ;
1357 	}
1358 
1359 	if (red_lookup_depth == 0) {
1360 		kprintf("net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1361 		kfree(x, M_DUMMYNET);
1362 		return EINVAL;
1363 	}
1364 	x->lookup_depth = red_lookup_depth;
1365 	x->w_q_lookup = kmalloc(x->lookup_depth * sizeof(int),
1366 			M_DUMMYNET, M_WAITOK);
1367 
1368 	/* Fill the lookup table with (1 - w_q)^x */
1369 	x->lookup_step = ioc_fs->lookup_step;
1370 	x->lookup_weight = ioc_fs->lookup_weight;
1371 
1372 	x->w_q_lookup[0] = SCALE(1) - x->w_q;
1373 	for (i = 1; i < x->lookup_depth; i++)
1374 		x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1375 
1376 	if (red_avg_pkt_size < 1)
1377 		red_avg_pkt_size = 512;
1378 	x->avg_pkt_size = red_avg_pkt_size;
1379 
1380 	if (red_max_pkt_size < 1)
1381 		red_max_pkt_size = 1500;
1382 	x->max_pkt_size = red_max_pkt_size;
1383 
1384 	return 0;
1385 }
1386 
1387 static void
1388 alloc_hash(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1389 {
1390 	int i, alloc_size;
1391 
1392 	if (x->flags_fs & DN_HAVE_FLOW_MASK) {
1393 		int l = ioc_fs->rq_size;
1394 
1395 		/* Allocate some slots */
1396 		if (l == 0)
1397 			l = dn_hash_size;
1398 
1399 		if (l < DN_MIN_HASH_SIZE)
1400 			l = DN_MIN_HASH_SIZE;
1401 		else if (l > DN_MAX_HASH_SIZE)
1402 			l = DN_MAX_HASH_SIZE;
1403 
1404 		x->rq_size = l;
1405 	} else {
1406 		/* One is enough for null mask */
1407 		x->rq_size = 1;
1408 	}
1409 	alloc_size = x->rq_size + 1;
1410 
1411 	x->rq = kmalloc(alloc_size * sizeof(struct dn_flowqueue_head),
1412 			M_DUMMYNET, M_WAITOK | M_ZERO);
1413 	x->rq_elements = 0;
1414 
1415 	for (i = 0; i < alloc_size; ++i)
1416 		LIST_INIT(&x->rq[i]);
1417 }
1418 
1419 static void
1420 set_flowid_parms(struct dn_flow_id *id, const struct dn_ioc_flowid *ioc_id)
1421 {
1422 	id->fid_dst_ip = ioc_id->u.ip.dst_ip;
1423 	id->fid_src_ip = ioc_id->u.ip.src_ip;
1424 	id->fid_dst_port = ioc_id->u.ip.dst_port;
1425 	id->fid_src_port = ioc_id->u.ip.src_port;
1426 	id->fid_proto = ioc_id->u.ip.proto;
1427 	id->fid_flags = ioc_id->u.ip.flags;
1428 }
1429 
1430 static void
1431 set_fs_parms(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1432 {
1433 	x->flags_fs = ioc_fs->flags_fs;
1434 	x->qsize = ioc_fs->qsize;
1435 	x->plr = ioc_fs->plr;
1436 	set_flowid_parms(&x->flow_mask, &ioc_fs->flow_mask);
1437 	if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1438 		if (x->qsize > 1024 * 1024)
1439 			x->qsize = 1024 * 1024;
1440 	} else {
1441 		if (x->qsize == 0 || x->qsize > 100)
1442 			x->qsize = 50;
1443 	}
1444 
1445 	/* Configuring RED */
1446 	if (x->flags_fs & DN_IS_RED)
1447 		config_red(ioc_fs, x);	/* XXX should check errors */
1448 }
1449 
1450 /*
1451  * setup pipe or queue parameters.
1452  */
1453 
1454 static int
1455 config_pipe(struct dn_ioc_pipe *ioc_pipe)
1456 {
1457 	struct dn_ioc_flowset *ioc_fs = &ioc_pipe->fs;
1458 	int error;
1459 
1460 	/*
1461 	 * The config program passes parameters as follows:
1462 	 * bw	bits/second (0 means no limits)
1463 	 * delay	ms (must be translated into ticks)
1464 	 * qsize	slots or bytes
1465 	 */
1466 	ioc_pipe->delay = (ioc_pipe->delay * dn_hz) / 1000;
1467 
1468 	/*
1469 	 * We need either a pipe number or a flow_set number
1470 	 */
1471 	if (ioc_pipe->pipe_nr == 0 && ioc_fs->fs_nr == 0)
1472 		return EINVAL;
1473 	if (ioc_pipe->pipe_nr != 0 && ioc_fs->fs_nr != 0)
1474 		return EINVAL;
1475 
1476 	/*
1477 	 * Validate pipe number
1478 	 */
1479 	if (ioc_pipe->pipe_nr > DN_PIPE_NR_MAX || ioc_pipe->pipe_nr < 0)
1480 		return EINVAL;
1481 
1482 	error = EINVAL;
1483 	if (ioc_pipe->pipe_nr != 0) {	/* This is a pipe */
1484 		struct dn_pipe *x, *p;
1485 
1486 		/* Locate pipe */
1487 		p = dn_find_pipe(ioc_pipe->pipe_nr);
1488 
1489 		if (p == NULL) {	/* New pipe */
1490 			x = kmalloc(sizeof(struct dn_pipe), M_DUMMYNET, M_WAITOK | M_ZERO);
1491 			x->pipe_nr = ioc_pipe->pipe_nr;
1492 			x->fs.pipe = x;
1493 			TAILQ_INIT(&x->p_queue);
1494 
1495 			/*
1496 			 * idle_heap is the only one from which we extract from the middle.
1497 			 */
1498 			x->idle_heap.size = x->idle_heap.elements = 0;
1499 			x->idle_heap.offset = __offsetof(struct dn_flow_queue, heap_pos);
1500 		} else {
1501 			int i;
1502 
1503 			x = p;
1504 
1505 			/* Flush accumulated credit for all queues */
1506 			for (i = 0; i <= x->fs.rq_size; i++) {
1507 				struct dn_flow_queue *q;
1508 
1509 				LIST_FOREACH(q, &x->fs.rq[i], q_link)
1510 					q->numbytes = 0;
1511 			}
1512 		}
1513 
1514 		x->bandwidth = ioc_pipe->bandwidth;
1515 		x->numbytes = 0; /* Just in case... */
1516 		x->delay = ioc_pipe->delay;
1517 
1518 		set_fs_parms(&x->fs, ioc_fs);
1519 
1520 		if (x->fs.rq == NULL) {	/* A new pipe */
1521 			struct dn_pipe_head *pipe_hdr;
1522 
1523 			alloc_hash(&x->fs, ioc_fs);
1524 
1525 			pipe_hdr = &pipe_table[DN_NR_HASH(x->pipe_nr)];
1526 			LIST_INSERT_HEAD(pipe_hdr, x, p_link);
1527 		}
1528 	} else {	/* Config flow_set */
1529 		struct dn_flow_set *x, *fs;
1530 
1531 		/* Locate flow_set */
1532 		fs = dn_find_flowset(ioc_fs->fs_nr);
1533 
1534 		if (fs == NULL) {	/* New flow_set */
1535 			if (ioc_fs->parent_nr == 0)	/* Need link to a pipe */
1536 				goto back;
1537 
1538 			x = kmalloc(sizeof(struct dn_flow_set), M_DUMMYNET,
1539 					M_WAITOK | M_ZERO);
1540 			x->fs_nr = ioc_fs->fs_nr;
1541 			x->parent_nr = ioc_fs->parent_nr;
1542 			x->weight = ioc_fs->weight;
1543 			if (x->weight == 0)
1544 				x->weight = 1;
1545 			else if (x->weight > 100)
1546 				x->weight = 100;
1547 		} else {
1548 			/* Change parent pipe not allowed; must delete and recreate */
1549 			if (ioc_fs->parent_nr != 0 && fs->parent_nr != ioc_fs->parent_nr)
1550 				goto back;
1551 			x = fs;
1552 		}
1553 
1554 		set_fs_parms(x, ioc_fs);
1555 
1556 		if (x->rq == NULL) {	/* A new flow_set */
1557 			struct dn_flowset_head *fs_hdr;
1558 
1559 			alloc_hash(x, ioc_fs);
1560 
1561 			fs_hdr = &flowset_table[DN_NR_HASH(x->fs_nr)];
1562 			LIST_INSERT_HEAD(fs_hdr, x, fs_link);
1563 		}
1564 	}
1565 	error = 0;
1566 
1567 back:
1568 	return error;
1569 }
1570 
1571 /*
1572  * Helper function to remove from a heap queues which are linked to
1573  * a flow_set about to be deleted.
1574  */
1575 static void
1576 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
1577 {
1578 	int i = 0, found = 0;
1579 
1580 	while (i < h->elements) {
1581 		if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
1582 			h->elements--;
1583 			h->p[i] = h->p[h->elements];
1584 			found++;
1585 		} else {
1586 			i++;
1587 		}
1588 	}
1589 	if (found)
1590 		heapify(h);
1591 }
1592 
1593 /*
1594  * helper function to remove a pipe from a heap (can be there at most once)
1595  */
1596 static void
1597 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
1598 {
1599 	if (h->elements > 0) {
1600 		int i;
1601 
1602 		for (i = 0; i < h->elements; i++) {
1603 			if (h->p[i].object == p) { /* found it */
1604 				h->elements--;
1605 				h->p[i] = h->p[h->elements];
1606 				heapify(h);
1607 				break;
1608 			}
1609 		}
1610 	}
1611 }
1612 
1613 static void
1614 dn_unref_pipe_cb(struct dn_flow_set *fs, void *pipe0)
1615 {
1616 	struct dn_pipe *pipe = pipe0;
1617 
1618 	if (fs->pipe == pipe) {
1619 		kprintf("++ ref to pipe %d from fs %d\n",
1620 				pipe->pipe_nr, fs->fs_nr);
1621 		fs->pipe = NULL;
1622 		purge_flow_set(fs, 0);
1623 	}
1624 }
1625 
1626 /*
1627  * Fully delete a pipe or a queue, cleaning up associated info.
1628  */
1629 static int
1630 delete_pipe(const struct dn_ioc_pipe *ioc_pipe)
1631 {
1632 	struct dn_pipe *p;
1633 	int error;
1634 
1635 	if (ioc_pipe->pipe_nr == 0 && ioc_pipe->fs.fs_nr == 0)
1636 		return EINVAL;
1637 	if (ioc_pipe->pipe_nr != 0 && ioc_pipe->fs.fs_nr != 0)
1638 		return EINVAL;
1639 
1640 	if (ioc_pipe->pipe_nr > DN_NR_HASH_MAX || ioc_pipe->pipe_nr < 0)
1641 		return EINVAL;
1642 
1643 	error = EINVAL;
1644 	if (ioc_pipe->pipe_nr != 0) {	/* This is an old-style pipe */
1645 		/* Locate pipe */
1646 		p = dn_find_pipe(ioc_pipe->pipe_nr);
1647 		if (p == NULL)
1648 			goto back; /* Not found */
1649 
1650 		/* Unlink from pipe hash table */
1651 		LIST_REMOVE(p, p_link);
1652 
1653 		/* Remove all references to this pipe from flow_sets */
1654 		dn_iterate_flowset(dn_unref_pipe_cb, p);
1655 
1656 		fs_remove_from_heap(&ready_heap, &p->fs);
1657 		purge_pipe(p);	/* Remove all data associated to this pipe */
1658 
1659 		/* Remove reference to here from extract_heap and wfq_ready_heap */
1660 		pipe_remove_from_heap(&extract_heap, p);
1661 		pipe_remove_from_heap(&wfq_ready_heap, p);
1662 
1663 		kfree(p, M_DUMMYNET);
1664 	} else {	/* This is a WF2Q queue (dn_flow_set) */
1665 		struct dn_flow_set *fs;
1666 
1667 		/* Locate flow_set */
1668 		fs = dn_find_flowset(ioc_pipe->fs.fs_nr);
1669 		if (fs == NULL)
1670 			goto back; /* Not found */
1671 
1672 		LIST_REMOVE(fs, fs_link);
1673 
1674 		if ((p = fs->pipe) != NULL) {
1675 			/* Update total weight on parent pipe and cleanup parent heaps */
1676 			p->sum -= fs->weight * fs->backlogged;
1677 			fs_remove_from_heap(&p->not_eligible_heap, fs);
1678 			fs_remove_from_heap(&p->scheduler_heap, fs);
1679 #if 1	/* XXX should i remove from idle_heap as well ? */
1680 			fs_remove_from_heap(&p->idle_heap, fs);
1681 #endif
1682 		}
1683 		purge_flow_set(fs, 1);
1684 	}
1685 	error = 0;
1686 
1687 back:
1688 	return error;
1689 }
1690 
1691 /*
1692  * helper function used to copy data from kernel in DUMMYNET_GET
1693  */
1694 static void
1695 dn_copy_flowid(const struct dn_flow_id *id, struct dn_ioc_flowid *ioc_id)
1696 {
1697 	ioc_id->type = ETHERTYPE_IP;
1698 	ioc_id->u.ip.dst_ip = id->fid_dst_ip;
1699 	ioc_id->u.ip.src_ip = id->fid_src_ip;
1700 	ioc_id->u.ip.dst_port = id->fid_dst_port;
1701 	ioc_id->u.ip.src_port = id->fid_src_port;
1702 	ioc_id->u.ip.proto = id->fid_proto;
1703 	ioc_id->u.ip.flags = id->fid_flags;
1704 }
1705 
1706 static void *
1707 dn_copy_flowqueues(const struct dn_flow_set *fs, void *bp)
1708 {
1709 	struct dn_ioc_flowqueue *ioc_fq = bp;
1710 	int i, copied = 0;
1711 
1712 	for (i = 0; i <= fs->rq_size; i++) {
1713 		const struct dn_flow_queue *q;
1714 
1715 		LIST_FOREACH(q, &fs->rq[i], q_link) {
1716 			if (q->hash_slot != i) {	/* XXX ASSERT */
1717 				kprintf("++ at %d: wrong slot (have %d, "
1718 						"should be %d)\n",
1719 						copied, q->hash_slot, i);
1720 			}
1721 			if (q->fs != fs) {		/* XXX ASSERT */
1722 				kprintf("++ at %d: wrong fs ptr (have %p, should be %p)\n",
1723 						i, q->fs, fs);
1724 			}
1725 
1726 			copied++;
1727 
1728 			ioc_fq->len = q->len;
1729 			ioc_fq->len_bytes = q->len_bytes;
1730 			ioc_fq->tot_pkts = q->tot_pkts;
1731 			ioc_fq->tot_bytes = q->tot_bytes;
1732 			ioc_fq->drops = q->drops;
1733 			ioc_fq->hash_slot = q->hash_slot;
1734 			ioc_fq->S = q->S;
1735 			ioc_fq->F = q->F;
1736 			dn_copy_flowid(&q->id, &ioc_fq->id);
1737 
1738 			ioc_fq++;
1739 		}
1740 	}
1741 
1742 	if (copied != fs->rq_elements) {	/* XXX ASSERT */
1743 		kprintf("++ wrong count, have %d should be %d\n",
1744 				copied, fs->rq_elements);
1745 	}
1746 	return ioc_fq;
1747 }
1748 
1749 static void
1750 dn_copy_flowset(const struct dn_flow_set *fs, struct dn_ioc_flowset *ioc_fs,
1751 		u_short fs_type)
1752 {
1753 	ioc_fs->fs_type = fs_type;
1754 
1755 	ioc_fs->fs_nr = fs->fs_nr;
1756 	ioc_fs->flags_fs = fs->flags_fs;
1757 	ioc_fs->parent_nr = fs->parent_nr;
1758 
1759 	ioc_fs->weight = fs->weight;
1760 	ioc_fs->qsize = fs->qsize;
1761 	ioc_fs->plr = fs->plr;
1762 
1763 	ioc_fs->rq_size = fs->rq_size;
1764 	ioc_fs->rq_elements = fs->rq_elements;
1765 
1766 	ioc_fs->w_q = fs->w_q;
1767 	ioc_fs->max_th = fs->max_th;
1768 	ioc_fs->min_th = fs->min_th;
1769 	ioc_fs->max_p = fs->max_p;
1770 
1771 	dn_copy_flowid(&fs->flow_mask, &ioc_fs->flow_mask);
1772 }
1773 
1774 static void
1775 dn_calc_pipe_size_cb(struct dn_pipe *pipe, void *sz)
1776 {
1777 	size_t *size = sz;
1778 
1779 	*size += sizeof(struct dn_ioc_pipe) +
1780 		pipe->fs.rq_elements * sizeof(struct dn_ioc_flowqueue);
1781 }
1782 
1783 static void
1784 dn_calc_fs_size_cb(struct dn_flow_set *fs, void *sz)
1785 {
1786 	size_t *size = sz;
1787 
1788 	*size += sizeof(struct dn_ioc_flowset) +
1789 		fs->rq_elements * sizeof(struct dn_ioc_flowqueue);
1790 }
1791 
1792 static void
1793 dn_copyout_pipe_cb(struct dn_pipe *pipe, void *bp0)
1794 {
1795 	char **bp = bp0;
1796 	struct dn_ioc_pipe *ioc_pipe = (struct dn_ioc_pipe *)(*bp);
1797 
1798 	/*
1799 	 * Copy flow set descriptor associated with this pipe
1800 	 */
1801 	dn_copy_flowset(&pipe->fs, &ioc_pipe->fs, DN_IS_PIPE);
1802 
1803 	/*
1804 	 * Copy pipe descriptor
1805 	 */
1806 	ioc_pipe->bandwidth = pipe->bandwidth;
1807 	ioc_pipe->pipe_nr = pipe->pipe_nr;
1808 	ioc_pipe->V = pipe->V;
1809 	/* Convert delay to milliseconds */
1810 	ioc_pipe->delay = (pipe->delay * 1000) / dn_hz;
1811 
1812 	/*
1813 	 * Copy flow queue descriptors
1814 	 */
1815 	*bp += sizeof(*ioc_pipe);
1816 	*bp = dn_copy_flowqueues(&pipe->fs, *bp);
1817 }
1818 
1819 static void
1820 dn_copyout_fs_cb(struct dn_flow_set *fs, void *bp0)
1821 {
1822 	char **bp = bp0;
1823 	struct dn_ioc_flowset *ioc_fs = (struct dn_ioc_flowset *)(*bp);
1824 
1825 	/*
1826 	 * Copy flow set descriptor
1827 	 */
1828 	dn_copy_flowset(fs, ioc_fs, DN_IS_QUEUE);
1829 
1830 	/*
1831 	 * Copy flow queue descriptors
1832 	 */
1833 	*bp += sizeof(*ioc_fs);
1834 	*bp = dn_copy_flowqueues(fs, *bp);
1835 }
1836 
1837 static int
1838 dummynet_get(struct dn_sopt *dn_sopt)
1839 {
1840 	char *buf, *bp;
1841 	size_t size = 0;
1842 
1843 	/*
1844 	 * Compute size of data structures: list of pipes and flow_sets.
1845 	 */
1846 	dn_iterate_pipe(dn_calc_pipe_size_cb, &size);
1847 	dn_iterate_flowset(dn_calc_fs_size_cb, &size);
1848 
1849 	/*
1850 	 * Copyout pipe/flow_set/flow_queue
1851 	 */
1852 	bp = buf = kmalloc(size, M_TEMP, M_WAITOK | M_ZERO);
1853 	dn_iterate_pipe(dn_copyout_pipe_cb, &bp);
1854 	dn_iterate_flowset(dn_copyout_fs_cb, &bp);
1855 
1856 	/* Temp memory will be freed by caller */
1857 	dn_sopt->dn_sopt_arg = buf;
1858 	dn_sopt->dn_sopt_arglen = size;
1859 	return 0;
1860 }
1861 
1862 /*
1863  * Handler for the various dummynet socket options (get, flush, config, del)
1864  */
1865 static int
1866 dummynet_ctl(struct dn_sopt *dn_sopt)
1867 {
1868 	int error = 0;
1869 
1870 	switch (dn_sopt->dn_sopt_name) {
1871 		case IP_DUMMYNET_GET:
1872 			error = dummynet_get(dn_sopt);
1873 			break;
1874 
1875 		case IP_DUMMYNET_FLUSH:
1876 			dummynet_flush();
1877 			break;
1878 
1879 		case IP_DUMMYNET_CONFIGURE:
1880 			KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1881 			error = config_pipe(dn_sopt->dn_sopt_arg);
1882 			break;
1883 
1884 		case IP_DUMMYNET_DEL:	/* Remove a pipe or flow_set */
1885 			KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1886 			error = delete_pipe(dn_sopt->dn_sopt_arg);
1887 			break;
1888 
1889 		default:
1890 			kprintf("%s -- unknown option %d\n", __func__, dn_sopt->dn_sopt_name);
1891 			error = EINVAL;
1892 			break;
1893 	}
1894 	return error;
1895 }
1896 
1897 static void
1898 dummynet_clock(systimer_t info __unused, int in_ipi __unused,
1899 	struct intrframe *frame __unused)
1900 {
1901 	KASSERT(mycpuid == ip_dn_cpu,
1902 			("dummynet systimer comes on cpu%d, should be %d!",
1903 			 mycpuid, ip_dn_cpu));
1904 
1905 	crit_enter();
1906 	if (DUMMYNET_LOADED && (dn_netmsg.lmsg.ms_flags & MSGF_DONE))
1907 		lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), &dn_netmsg.lmsg);
1908 	crit_exit();
1909 }
1910 
1911 static int
1912 sysctl_dn_hz(SYSCTL_HANDLER_ARGS)
1913 {
1914 	int error, val;
1915 
1916 	val = dn_hz;
1917 	error = sysctl_handle_int(oidp, &val, 0, req);
1918 	if (error || req->newptr == NULL)
1919 		return error;
1920 	if (val <= 0)
1921 		return EINVAL;
1922 	else if (val > DN_CALLOUT_FREQ_MAX)
1923 		val = DN_CALLOUT_FREQ_MAX;
1924 
1925 	crit_enter();
1926 	dn_hz = val;
1927 	systimer_adjust_periodic(&dn_clock, val);
1928 	crit_exit();
1929 
1930 	return 0;
1931 }
1932 
1933 static void
1934 ip_dn_init_dispatch(netmsg_t msg)
1935 {
1936 	int i, error = 0;
1937 
1938 	KASSERT(mycpuid == ip_dn_cpu,
1939 			("%s runs on cpu%d, instead of cpu%d", __func__,
1940 			 mycpuid, ip_dn_cpu));
1941 
1942 	crit_enter();
1943 
1944 	if (DUMMYNET_LOADED) {
1945 		kprintf("DUMMYNET already loaded\n");
1946 		error = EEXIST;
1947 		goto back;
1948 	}
1949 
1950 	kprintf("DUMMYNET initialized (011031)\n");
1951 
1952 	for (i = 0; i < DN_NR_HASH_MAX; ++i)
1953 		LIST_INIT(&pipe_table[i]);
1954 
1955 	for (i = 0; i < DN_NR_HASH_MAX; ++i)
1956 		LIST_INIT(&flowset_table[i]);
1957 
1958 	ready_heap.size = ready_heap.elements = 0;
1959 	ready_heap.offset = 0;
1960 
1961 	wfq_ready_heap.size = wfq_ready_heap.elements = 0;
1962 	wfq_ready_heap.offset = 0;
1963 
1964 	extract_heap.size = extract_heap.elements = 0;
1965 	extract_heap.offset = 0;
1966 
1967 	ip_dn_ctl_ptr = dummynet_ctl;
1968 	ip_dn_io_ptr = dummynet_io;
1969 
1970 	netmsg_init(&dn_netmsg, NULL, &netisr_adone_rport,
1971 			0, dummynet);
1972 	systimer_init_periodic_nq(&dn_clock, dummynet_clock, NULL, dn_hz);
1973 
1974 back:
1975 	crit_exit();
1976 	lwkt_replymsg(&msg->lmsg, error);
1977 }
1978 
1979 static int
1980 ip_dn_init(void)
1981 {
1982 	struct netmsg_base smsg;
1983 
1984 	if (ip_dn_cpu >= ncpus) {
1985 		kprintf("%s: CPU%d does not exist, switch to CPU0\n",
1986 				__func__, ip_dn_cpu);
1987 		ip_dn_cpu = 0;
1988 	}
1989 
1990 	register_ipfw_module(MODULE_DUMMYNET_ID, MODULE_DUMMYNET_NAME);
1991 	register_ipfw_filter_funcs(MODULE_DUMMYNET_ID, O_DUMMYNET_PIPE,
1992 			(filter_func)check_pipe);
1993 	register_ipfw_filter_funcs(MODULE_DUMMYNET_ID, O_DUMMYNET_QUEUE,
1994 			(filter_func)check_pipe);
1995 
1996 	netmsg_init(&smsg, NULL, &curthread->td_msgport,
1997 			0, ip_dn_init_dispatch);
1998 	lwkt_domsg(netisr_cpuport(ip_dn_cpu), &smsg.lmsg, 0);
1999 	return smsg.lmsg.ms_error;
2000 }
2001 
2002 #ifdef KLD_MODULE
2003 
2004 static void
2005 ip_dn_stop_dispatch(netmsg_t msg)
2006 {
2007 	crit_enter();
2008 
2009 	dummynet_flush();
2010 
2011 	ip_dn_ctl_ptr = NULL;
2012 	ip_dn_io_ptr = NULL;
2013 
2014 	systimer_del(&dn_clock);
2015 
2016 	crit_exit();
2017 	lwkt_replymsg(&msg->lmsg, 0);
2018 }
2019 
2020 static void
2021 ip_dn_stop(void)
2022 {
2023 	struct netmsg_base smsg;
2024 
2025 	netmsg_init(&smsg, NULL, &curthread->td_msgport,
2026 			0, ip_dn_stop_dispatch);
2027 	lwkt_domsg(netisr_cpuport(ip_dn_cpu), &smsg.lmsg, 0);
2028 
2029 	netmsg_service_sync();
2030 }
2031 
2032 #endif	/* KLD_MODULE */
2033 
2034 static int
2035 dummynet_modevent(module_t mod, int type, void *data)
2036 {
2037 	switch (type) {
2038 		case MOD_LOAD:
2039 			return ip_dn_init();
2040 
2041 		case MOD_UNLOAD:
2042 #ifndef KLD_MODULE
2043 			kprintf("dummynet statically compiled, cannot unload\n");
2044 			return EINVAL;
2045 #else
2046 			ip_dn_stop();
2047 #endif
2048 			break;
2049 
2050 		default:
2051 			break;
2052 	}
2053 	return 0;
2054 }
2055 
2056 static moduledata_t dummynet_mod = {
2057 	"dummynet2",
2058 	dummynet_modevent,
2059 	NULL
2060 };
2061 DECLARE_MODULE(dummynet3, dummynet_mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2062 MODULE_DEPEND(dummynet3, ipfw3_basic, 1, 1, 1);
2063 MODULE_VERSION(dummynet3, 1);
2064