xref: /dragonfly/sys/net/dummynet3/ip_dummynet3.c (revision eea5ad68)
1 /*
2  * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3  * Portions Copyright (c) 2000 Akamba Corp.
4  * All rights reserved
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.24.2.22 2003/05/13 09:31:06 maxim Exp $
28  */
29 
30 #include "opt_ipfw.h"
31 #include "opt_inet.h"
32 #ifndef INET
33 #error IPFIREWALL3 requires INET.
34 #endif /* INET */
35 
36 /*
37  * This module implements IP dummynet, a bandwidth limiter/delay emulator.
38  * Description of the data structures used is in ip_dummynet.h
39  * Here you mainly find the following blocks of code:
40  *  + variable declarations;
41  *  + heap management functions;
42  *  + scheduler and dummynet functions;
43  *  + configuration and initialization.
44  *
45  * Most important Changes:
46  *
47  * 011004: KLDable
48  * 010124: Fixed WF2Q behaviour
49  * 010122: Fixed spl protection.
50  * 000601: WF2Q support
51  * 000106: Large rewrite, use heaps to handle very many pipes.
52  * 980513: Initial release
53  */
54 
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/socketvar.h>
60 #include <sys/sysctl.h>
61 #include <sys/systimer.h>
62 #include <sys/thread2.h>
63 
64 #include <net/ethernet.h>
65 #include <net/netmsg2.h>
66 #include <net/netisr2.h>
67 #include <net/route.h>
68 
69 #include <netinet/in_var.h>
70 #include <netinet/ip_var.h>
71 
72 #include <net/dummynet3/ip_dummynet3.h>
73 #include <net/ipfw3/ip_fw.h>
74 
75 void check_pipe(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
76 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
77 void check_queue(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
78 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
79 
80 void
81 check_pipe(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
82 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
83 {
84 	(*args)->rule = *f;
85 	(*args)->cookie = cmd->arg1;
86 	*cmd_val = IP_FW_DUMMYNET;
87 	*cmd_ctl = IP_FW_CTL_DONE;
88 }
89 
90 void
91 check_queue(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
92 		struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
93 {
94 	(*args)->rule = *f;
95 	(*args)->cookie = cmd->arg1;
96 	*cmd_val = IP_FW_DUMMYNET;
97 	*cmd_ctl = IP_FW_CTL_DONE;
98 }
99 
100 #ifdef DUMMYNET_DEBUG
101 #define DPRINTF(fmt, ...)	kprintf(fmt, __VA_ARGS__)
102 #else
103 #define DPRINTF(fmt, ...)	((void)0)
104 #endif
105 
106 #ifndef DN_CALLOUT_FREQ_MAX
107 #define DN_CALLOUT_FREQ_MAX	10000
108 #endif
109 
110 /*
111  * The maximum/minimum hash table size for queues.
112  * These values must be a power of 2.
113  */
114 #define DN_MIN_HASH_SIZE	4
115 #define DN_MAX_HASH_SIZE	65536
116 
117 /*
118  * Some macros are used to compare key values and handle wraparounds.
119  * MAX64 returns the largest of two key values.
120  */
121 #define DN_KEY_LT(a, b)		((int64_t)((a) - (b)) < 0)
122 #define DN_KEY_LEQ(a, b)	((int64_t)((a) - (b)) <= 0)
123 #define DN_KEY_GT(a, b)		((int64_t)((a) - (b)) > 0)
124 #define DN_KEY_GEQ(a, b)	((int64_t)((a) - (b)) >= 0)
125 #define MAX64(x, y)		((((int64_t)((y) - (x))) > 0) ? (y) : (x))
126 
127 #define DN_NR_HASH_MAX		16
128 #define DN_NR_HASH_MASK		(DN_NR_HASH_MAX - 1)
129 #define DN_NR_HASH(nr)		\
130 	((((nr) >> 12) ^ ((nr) >> 8) ^ ((nr) >> 4) ^ (nr)) & DN_NR_HASH_MASK)
131 
132 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
133 
134 extern int	ip_dn_cpu;
135 
136 static dn_key	curr_time = 0;		/* current simulation time */
137 static int	dn_hash_size = 64;	/* default hash size */
138 static int	pipe_expire = 1;	/* expire queue if empty */
139 static int	dn_max_ratio = 16;	/* max queues/buckets ratio */
140 
141 /*
142  * Statistics on number of queue searches and search steps
143  */
144 static int	searches;
145 static int	search_steps;
146 
147 /*
148  * RED parameters
149  */
150 static int	red_lookup_depth = 256;	/* default lookup table depth */
151 static int	red_avg_pkt_size = 512;	/* default medium packet size */
152 static int	red_max_pkt_size = 1500;/* default max packet size */
153 
154 /*
155  * Three heaps contain queues and pipes that the scheduler handles:
156  *
157  *  + ready_heap	contains all dn_flow_queue related to fixed-rate pipes.
158  *  + wfq_ready_heap	contains the pipes associated with WF2Q flows
159  *  + extract_heap	contains pipes associated with delay lines.
160  */
161 static struct dn_heap	ready_heap;
162 static struct dn_heap	extract_heap;
163 static struct dn_heap	wfq_ready_heap;
164 
165 static struct dn_pipe_head	pipe_table[DN_NR_HASH_MAX];
166 static struct dn_flowset_head	flowset_table[DN_NR_HASH_MAX];
167 
168 /*
169  * Variables for dummynet systimer
170  */
171 static struct netmsg_base dn_netmsg;
172 static struct systimer	dn_clock;
173 static int		dn_hz = 1000;
174 
175 static int	sysctl_dn_hz(SYSCTL_HANDLER_ARGS);
176 
177 SYSCTL_DECL(_net_inet_ip_dummynet);
178 
179 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLFLAG_RW,
180 		&dn_hash_size, 0, "Default hash table size");
181 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, curr_time, CTLFLAG_RD,
182 		&curr_time, 0, "Current tick");
183 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW,
184 		&pipe_expire, 0, "Expire queue if empty");
185 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, CTLFLAG_RW,
186 		&dn_max_ratio, 0, "Max ratio between dynamic queues and buckets");
187 
188 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap, CTLFLAG_RD,
189 		&ready_heap.size, 0, "Size of ready heap");
190 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap, CTLFLAG_RD,
191 		&extract_heap.size, 0, "Size of extract heap");
192 
193 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches, CTLFLAG_RD,
194 		&searches, 0, "Number of queue searches");
195 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, CTLFLAG_RD,
196 		&search_steps, 0, "Number of queue search steps");
197 
198 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD,
199 		&red_lookup_depth, 0, "Depth of RED lookup table");
200 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD,
201 		&red_avg_pkt_size, 0, "RED Medium packet size");
202 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD,
203 		&red_max_pkt_size, 0, "RED Max packet size");
204 
205 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hz, CTLTYPE_INT | CTLFLAG_RW,
206 		0, 0, sysctl_dn_hz, "I", "Dummynet callout frequency");
207 
208 static int	heap_init(struct dn_heap *, int);
209 static int	heap_insert(struct dn_heap *, dn_key, void *);
210 static void	heap_extract(struct dn_heap *, void *);
211 
212 static void	transmit_event(struct dn_pipe *);
213 static void	ready_event(struct dn_flow_queue *);
214 static void	ready_event_wfq(struct dn_pipe *);
215 
216 static int	config_pipe(struct dn_ioc_pipe *);
217 static void	dummynet_flush(void);
218 
219 static void	dummynet_clock(systimer_t, int, struct intrframe *);
220 static void	dummynet(netmsg_t);
221 
222 static struct dn_pipe *dn_find_pipe(int);
223 static struct dn_flow_set *dn_locate_flowset(int, int);
224 
225 typedef void	(*dn_pipe_iter_t)(struct dn_pipe *, void *);
226 static void	dn_iterate_pipe(dn_pipe_iter_t, void *);
227 
228 typedef void	(*dn_flowset_iter_t)(struct dn_flow_set *, void *);
229 static void	dn_iterate_flowset(dn_flowset_iter_t, void *);
230 
231 static ip_dn_io_t	dummynet_io;
232 static ip_dn_ctl_t	dummynet_ctl;
233 
234 /*
235  * Heap management functions.
236  *
237  * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
238  * Some macros help finding parent/children so we can optimize them.
239  *
240  * heap_init() is called to expand the heap when needed.
241  * Increment size in blocks of 16 entries.
242  * XXX failure to allocate a new element is a pretty bad failure
243  * as we basically stall a whole queue forever!!
244  * Returns 1 on error, 0 on success
245  */
246 #define HEAP_FATHER(x)		(((x) - 1) / 2)
247 #define HEAP_LEFT(x)		(2*(x) + 1)
248 #define HEAP_IS_LEFT(x)		((x) & 1)
249 #define HEAP_RIGHT(x)		(2*(x) + 2)
250 #define HEAP_SWAP(a, b, buffer)	{ buffer = a; a = b; b = buffer; }
251 #define HEAP_INCREMENT		15
252 
253 static int
254 heap_init(struct dn_heap *h, int new_size)
255 {
256 	struct dn_heap_entry *p;
257 
258 	if (h->size >= new_size) {
259 		kprintf("%s, Bogus call, have %d want %d\n", __func__,
260 				h->size, new_size);
261 		return 0;
262 	}
263 
264 	new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
265 	p = kmalloc(new_size * sizeof(*p), M_DUMMYNET, M_WAITOK | M_ZERO);
266 	if (h->size > 0) {
267 		bcopy(h->p, p, h->size * sizeof(*p));
268 		kfree(h->p, M_DUMMYNET);
269 	}
270 	h->p = p;
271 	h->size = new_size;
272 	return 0;
273 }
274 
275 /*
276  * Insert element in heap. Normally, p != NULL, we insert p in
277  * a new position and bubble up.  If p == NULL, then the element is
278  * already in place, and key is the position where to start the
279  * bubble-up.
280  * Returns 1 on failure (cannot allocate new heap entry)
281  *
282  * If offset > 0 the position (index, int) of the element in the heap is
283  * also stored in the element itself at the given offset in bytes.
284  */
285 #define SET_OFFSET(heap, node) \
286 	if (heap->offset > 0) \
287 	*((int *)((char *)(heap->p[node].object) + heap->offset)) = node;
288 
289 /*
290  * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
291  */
292 #define RESET_OFFSET(heap, node) \
293 	if (heap->offset > 0) \
294 	*((int *)((char *)(heap->p[node].object) + heap->offset)) = -1;
295 
296 static int
297 heap_insert(struct dn_heap *h, dn_key key1, void *p)
298 {
299 	int son;
300 
301 	if (p == NULL) {	/* Data already there, set starting point */
302 		son = key1;
303 	} else {		/* Insert new element at the end, possibly resize */
304 		son = h->elements;
305 		if (son == h->size) { /* Need resize... */
306 			if (heap_init(h, h->elements + 1))
307 				return 1; /* Failure... */
308 		}
309 		h->p[son].object = p;
310 		h->p[son].key = key1;
311 		h->elements++;
312 	}
313 
314 	while (son > 0) {	/* Bubble up */
315 		int father = HEAP_FATHER(son);
316 		struct dn_heap_entry tmp;
317 
318 		if (DN_KEY_LT(h->p[father].key, h->p[son].key))
319 			break; /* Found right position */
320 
321 		/* 'son' smaller than 'father', swap and repeat */
322 		HEAP_SWAP(h->p[son], h->p[father], tmp);
323 		SET_OFFSET(h, son);
324 		son = father;
325 	}
326 	SET_OFFSET(h, son);
327 	return 0;
328 }
329 
330 /*
331  * Remove top element from heap, or obj if obj != NULL
332  */
333 static void
334 heap_extract(struct dn_heap *h, void *obj)
335 {
336 	int child, father, max = h->elements - 1;
337 
338 	if (max < 0) {
339 		kprintf("warning, extract from empty heap 0x%p\n", h);
340 		return;
341 	}
342 
343 	father = 0; /* Default: move up smallest child */
344 	if (obj != NULL) { /* Extract specific element, index is at offset */
345 		if (h->offset <= 0)
346 			panic("%s from middle not supported on this heap!!!", __func__);
347 
348 		father = *((int *)((char *)obj + h->offset));
349 		if (father < 0 || father >= h->elements) {
350 			panic("%s father %d out of bound 0..%d", __func__,
351 					father, h->elements);
352 		}
353 	}
354 	RESET_OFFSET(h, father);
355 
356 	child = HEAP_LEFT(father);		/* Left child */
357 	while (child <= max) {		/* Valid entry */
358 		if (child != max && DN_KEY_LT(h->p[child + 1].key, h->p[child].key))
359 			child = child + 1;		/* Take right child, otherwise left */
360 		h->p[father] = h->p[child];
361 		SET_OFFSET(h, father);
362 		father = child;
363 		child = HEAP_LEFT(child);	/* Left child for next loop */
364 	}
365 	h->elements--;
366 	if (father != max) {
367 		/*
368 		 * Fill hole with last entry and bubble up, reusing the insert code
369 		 */
370 		h->p[father] = h->p[max];
371 		heap_insert(h, father, NULL);	/* This one cannot fail */
372 	}
373 }
374 
375 /*
376  * heapify() will reorganize data inside an array to maintain the
377  * heap property.  It is needed when we delete a bunch of entries.
378  */
379 static void
380 heapify(struct dn_heap *h)
381 {
382 	int i;
383 
384 	for (i = 0; i < h->elements; i++)
385 		heap_insert(h, i , NULL);
386 }
387 
388 /*
389  * Cleanup the heap and free data structure
390  */
391 static void
392 heap_free(struct dn_heap *h)
393 {
394 	if (h->size > 0)
395 		kfree(h->p, M_DUMMYNET);
396 	bzero(h, sizeof(*h));
397 }
398 
399 /*
400  * --- End of heap management functions ---
401  */
402 
403 /*
404  * Scheduler functions:
405  *
406  * transmit_event() is called when the delay-line needs to enter
407  * the scheduler, either because of existing pkts getting ready,
408  * or new packets entering the queue.  The event handled is the delivery
409  * time of the packet.
410  *
411  * ready_event() does something similar with fixed-rate queues, and the
412  * event handled is the finish time of the head pkt.
413  *
414  * ready_event_wfq() does something similar with WF2Q queues, and the
415  * event handled is the start time of the head pkt.
416  *
417  * In all cases, we make sure that the data structures are consistent
418  * before passing pkts out, because this might trigger recursive
419  * invocations of the procedures.
420  */
421 static void
422 transmit_event(struct dn_pipe *pipe)
423 {
424 	struct dn_pkt *pkt;
425 
426 	while ((pkt = TAILQ_FIRST(&pipe->p_queue)) &&
427 			DN_KEY_LEQ(pkt->output_time, curr_time)) {
428 		TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
429 		ip_dn_packet_redispatch(pkt);
430 	}
431 
432 	/*
433 	 * If there are leftover packets, put into the heap for next event
434 	 */
435 	if ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
436 		/*
437 		 * XXX should check errors on heap_insert, by draining the
438 		 * whole pipe and hoping in the future we are more successful
439 		 */
440 		heap_insert(&extract_heap, pkt->output_time, pipe);
441 	}
442 }
443 
444 /*
445  * The following macro computes how many ticks we have to wait
446  * before being able to transmit a packet. The credit is taken from
447  * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
448  */
449 #define SET_TICKS(pkt, q, p)	\
450 	(pkt->dn_m->m_pkthdr.len*8*dn_hz - (q)->numbytes + p->bandwidth - 1 ) / \
451 		p->bandwidth;
452 
453 /*
454  * Extract pkt from queue, compute output time (could be now)
455  * and put into delay line (p_queue)
456  */
457 static void
458 move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q,
459 	 struct dn_pipe *p, int len)
460 {
461 	TAILQ_REMOVE(&q->queue, pkt, dn_next);
462 	q->len--;
463 	q->len_bytes -= len;
464 
465 	pkt->output_time = curr_time + p->delay;
466 
467 	TAILQ_INSERT_TAIL(&p->p_queue, pkt, dn_next);
468 }
469 
470 /*
471  * ready_event() is invoked every time the queue must enter the
472  * scheduler, either because the first packet arrives, or because
473  * a previously scheduled event fired.
474  * On invokation, drain as many pkts as possible (could be 0) and then
475  * if there are leftover packets reinsert the pkt in the scheduler.
476  */
477 static void
478 ready_event(struct dn_flow_queue *q)
479 {
480 	struct dn_pkt *pkt;
481 	struct dn_pipe *p = q->fs->pipe;
482 	int p_was_empty;
483 
484 	if (p == NULL) {
485 		kprintf("ready_event- pipe is gone\n");
486 		return;
487 	}
488 	p_was_empty = TAILQ_EMPTY(&p->p_queue);
489 
490 	/*
491 	 * Schedule fixed-rate queues linked to this pipe:
492 	 * Account for the bw accumulated since last scheduling, then
493 	 * drain as many pkts as allowed by q->numbytes and move to
494 	 * the delay line (in p) computing output time.
495 	 * bandwidth==0 (no limit) means we can drain the whole queue,
496 	 * setting len_scaled = 0 does the job.
497 	 */
498 	q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
499 	while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
500 		int len = pkt->dn_m->m_pkthdr.len;
501 		int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
502 
503 		if (len_scaled > q->numbytes)
504 			break;
505 		q->numbytes -= len_scaled;
506 		move_pkt(pkt, q, p, len);
507 	}
508 
509 	/*
510 	 * If we have more packets queued, schedule next ready event
511 	 * (can only occur when bandwidth != 0, otherwise we would have
512 	 * flushed the whole queue in the previous loop).
513 	 * To this purpose we record the current time and compute how many
514 	 * ticks to go for the finish time of the packet.
515 	 */
516 	if ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
517 		/* This implies bandwidth != 0 */
518 		dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
519 
520 		q->sched_time = curr_time;
521 
522 		/*
523 		 * XXX should check errors on heap_insert, and drain the whole
524 		 * queue on error hoping next time we are luckier.
525 		 */
526 		heap_insert(&ready_heap, curr_time + t, q);
527 	} else {	/* RED needs to know when the queue becomes empty */
528 		q->q_time = curr_time;
529 		q->numbytes = 0;
530 	}
531 
532 	/*
533 	 * If the delay line was empty call transmit_event(p) now.
534 	 * Otherwise, the scheduler will take care of it.
535 	 */
536 	if (p_was_empty)
537 		transmit_event(p);
538 }
539 
540 /*
541  * Called when we can transmit packets on WF2Q queues.  Take pkts out of
542  * the queues at their start time, and enqueue into the delay line.
543  * Packets are drained until p->numbytes < 0.  As long as
544  * len_scaled >= p->numbytes, the packet goes into the delay line
545  * with a deadline p->delay.  For the last packet, if p->numbytes < 0,
546  * there is an additional delay.
547  */
548 static void
549 ready_event_wfq(struct dn_pipe *p)
550 {
551 	int p_was_empty = TAILQ_EMPTY(&p->p_queue);
552 	struct dn_heap *sch = &p->scheduler_heap;
553 	struct dn_heap *neh = &p->not_eligible_heap;
554 
555 	p->numbytes += (curr_time - p->sched_time) * p->bandwidth;
556 
557 	/*
558 	 * While we have backlogged traffic AND credit, we need to do
559 	 * something on the queue.
560 	 */
561 	while (p->numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
562 		if (sch->elements > 0) { /* Have some eligible pkts to send out */
563 			struct dn_flow_queue *q = sch->p[0].object;
564 			struct dn_pkt *pkt = TAILQ_FIRST(&q->queue);
565 			struct dn_flow_set *fs = q->fs;
566 			uint64_t len = pkt->dn_m->m_pkthdr.len;
567 			int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
568 
569 			heap_extract(sch, NULL);	/* Remove queue from heap */
570 			p->numbytes -= len_scaled;
571 			move_pkt(pkt, q, p, len);
572 
573 			p->V += (len << MY_M) / p->sum;	/* Update V */
574 			q->S = q->F;			/* Update start time */
575 
576 			if (q->len == 0) {	/* Flow not backlogged any more */
577 				fs->backlogged--;
578 				heap_insert(&p->idle_heap, q->F, q);
579 			} else {		/* Still backlogged */
580 				/*
581 				 * Update F and position in backlogged queue, then
582 				 * put flow in not_eligible_heap (we will fix this later).
583 				 */
584 				len = TAILQ_FIRST(&q->queue)->dn_m->m_pkthdr.len;
585 				q->F += (len << MY_M) / (uint64_t)fs->weight;
586 				if (DN_KEY_LEQ(q->S, p->V))
587 					heap_insert(neh, q->S, q);
588 				else
589 					heap_insert(sch, q->F, q);
590 			}
591 		}
592 
593 		/*
594 		 * Now compute V = max(V, min(S_i)).  Remember that all elements in
595 		 * sch have by definition S_i <= V so if sch is not empty, V is surely
596 		 * the max and we must not update it.  Conversely, if sch is empty
597 		 * we only need to look at neh.
598 		 */
599 		if (sch->elements == 0 && neh->elements > 0)
600 			p->V = MAX64(p->V, neh->p[0].key);
601 
602 		/*
603 		 * Move from neh to sch any packets that have become eligible
604 		 */
605 		while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
606 			struct dn_flow_queue *q = neh->p[0].object;
607 
608 			heap_extract(neh, NULL);
609 			heap_insert(sch, q->F, q);
610 		}
611 	}
612 
613 	if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0 &&
614 			p->idle_heap.elements > 0) {
615 		/*
616 		 * No traffic and no events scheduled.  We can get rid of idle-heap.
617 		 */
618 		int i;
619 
620 		for (i = 0; i < p->idle_heap.elements; i++) {
621 			struct dn_flow_queue *q = p->idle_heap.p[i].object;
622 
623 			q->F = 0;
624 			q->S = q->F + 1;
625 		}
626 		p->sum = 0;
627 		p->V = 0;
628 		p->idle_heap.elements = 0;
629 	}
630 
631 	/*
632 	 * If we are getting clocks from dummynet and if we are under credit,
633 	 * schedule the next ready event.
634 	 * Also fix the delivery time of the last packet.
635 	 */
636 	if (p->numbytes < 0) { /* This implies bandwidth>0 */
637 		dn_key t = 0; /* Number of ticks i have to wait */
638 
639 		if (p->bandwidth > 0)
640 			t = (p->bandwidth - 1 - p->numbytes) / p->bandwidth;
641 		TAILQ_LAST(&p->p_queue, dn_pkt_queue)->output_time += t;
642 		p->sched_time = curr_time;
643 
644 		/*
645 		 * XXX should check errors on heap_insert, and drain the whole
646 		 * queue on error hoping next time we are luckier.
647 		 */
648 		heap_insert(&wfq_ready_heap, curr_time + t, p);
649 	}
650 
651 	/*
652 	 * If the delay line was empty call transmit_event(p) now.
653 	 * Otherwise, the scheduler will take care of it.
654 	 */
655 	if (p_was_empty)
656 		transmit_event(p);
657 }
658 
659 static void
660 dn_expire_pipe_cb(struct dn_pipe *pipe, void *dummy __unused)
661 {
662 	if (pipe->idle_heap.elements > 0 &&
663 			DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
664 		struct dn_flow_queue *q = pipe->idle_heap.p[0].object;
665 
666 		heap_extract(&pipe->idle_heap, NULL);
667 		q->S = q->F + 1; /* Mark timestamp as invalid */
668 		pipe->sum -= q->fs->weight;
669 	}
670 }
671 
672 /*
673  * This is called once per tick, or dn_hz times per second.  It is used to
674  * increment the current tick counter and schedule expired events.
675  */
676 static void
677 dummynet(netmsg_t msg)
678 {
679 	void *p;
680 	struct dn_heap *h;
681 	struct dn_heap *heaps[3];
682 	int i;
683 
684 	heaps[0] = &ready_heap;		/* Fixed-rate queues */
685 	heaps[1] = &wfq_ready_heap;		/* WF2Q queues */
686 	heaps[2] = &extract_heap;		/* Delay line */
687 
688 	/* Reply ASAP */
689 	crit_enter();
690 	lwkt_replymsg(&msg->lmsg, 0);
691 	crit_exit();
692 
693 	curr_time++;
694 	for (i = 0; i < 3; i++) {
695 		h = heaps[i];
696 		while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
697 			if (h->p[0].key > curr_time) {
698 				kprintf("-- dummynet: warning, heap %d is %d ticks late\n",
699 						i, (int)(curr_time - h->p[0].key));
700 			}
701 
702 			p = h->p[0].object;		/* Store a copy before heap_extract */
703 			heap_extract(h, NULL);	/* Need to extract before processing */
704 
705 			if (i == 0)
706 				ready_event(p);
707 			else if (i == 1)
708 				ready_event_wfq(p);
709 			else
710 				transmit_event(p);
711 		}
712 	}
713 
714 	/* Sweep pipes trying to expire idle flow_queues */
715 	dn_iterate_pipe(dn_expire_pipe_cb, NULL);
716 }
717 
718 /*
719  * Unconditionally expire empty queues in case of shortage.
720  * Returns the number of queues freed.
721  */
722 static int
723 expire_queues(struct dn_flow_set *fs)
724 {
725 	int i, initial_elements = fs->rq_elements;
726 
727 	if (fs->last_expired == time_uptime)
728 		return 0;
729 
730 	fs->last_expired = time_uptime;
731 
732 	for (i = 0; i <= fs->rq_size; i++) { /* Last one is overflow */
733 		struct dn_flow_queue *q, *qn;
734 
735 		LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
736 			if (!TAILQ_EMPTY(&q->queue) || q->S != q->F + 1)
737 				continue;
738 
739 			/*
740 			 * Entry is idle, expire it
741 			 */
742 			LIST_REMOVE(q, q_link);
743 			kfree(q, M_DUMMYNET);
744 
745 			KASSERT(fs->rq_elements > 0,
746 					("invalid rq_elements %d", fs->rq_elements));
747 			fs->rq_elements--;
748 		}
749 	}
750 	return initial_elements - fs->rq_elements;
751 }
752 
753 /*
754  * If room, create a new queue and put at head of slot i;
755  * otherwise, create or use the default queue.
756  */
757 static struct dn_flow_queue *
758 create_queue(struct dn_flow_set *fs, int i)
759 {
760 	struct dn_flow_queue *q;
761 
762 	if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
763 			expire_queues(fs) == 0) {
764 		/*
765 		 * No way to get room, use or create overflow queue.
766 		 */
767 		i = fs->rq_size;
768 		if (!LIST_EMPTY(&fs->rq[i]))
769 			return LIST_FIRST(&fs->rq[i]);
770 	}
771 
772 	q = kmalloc(sizeof(*q), M_DUMMYNET, M_INTWAIT | M_NULLOK | M_ZERO);
773 	if (q == NULL)
774 		return NULL;
775 
776 	q->fs = fs;
777 	q->hash_slot = i;
778 	q->S = q->F + 1;   /* hack - mark timestamp as invalid */
779 	TAILQ_INIT(&q->queue);
780 
781 	LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
782 	fs->rq_elements++;
783 
784 	return q;
785 }
786 
787 /*
788  * Given a flow_set and a pkt in last_pkt, find a matching queue
789  * after appropriate masking. The queue is moved to front
790  * so that further searches take less time.
791  */
792 static struct dn_flow_queue *
793 find_queue(struct dn_flow_set *fs, struct dn_flow_id *id)
794 {
795 	struct dn_flow_queue *q;
796 	int i = 0;
797 
798 	if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
799 		q = LIST_FIRST(&fs->rq[0]);
800 	} else {
801 		struct dn_flow_queue *qn;
802 
803 		/* First, do the masking */
804 		id->fid_dst_ip &= fs->flow_mask.fid_dst_ip;
805 		id->fid_src_ip &= fs->flow_mask.fid_src_ip;
806 		id->fid_dst_port &= fs->flow_mask.fid_dst_port;
807 		id->fid_src_port &= fs->flow_mask.fid_src_port;
808 		id->fid_proto &= fs->flow_mask.fid_proto;
809 		id->fid_flags = 0; /* we don't care about this one */
810 
811 		/* Then, hash function */
812 		i = ((id->fid_dst_ip) & 0xffff) ^
813 			((id->fid_dst_ip >> 15) & 0xffff) ^
814 			((id->fid_src_ip << 1) & 0xffff) ^
815 			((id->fid_src_ip >> 16 ) & 0xffff) ^
816 			(id->fid_dst_port << 1) ^ (id->fid_src_port) ^
817 			(id->fid_proto);
818 		i = i % fs->rq_size;
819 
820 		/*
821 		 * Finally, scan the current list for a match and
822 		 * expire idle flow queues
823 		 */
824 		searches++;
825 		LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
826 			search_steps++;
827 			if (id->fid_dst_ip == q->id.fid_dst_ip &&
828 					id->fid_src_ip == q->id.fid_src_ip &&
829 					id->fid_dst_port == q->id.fid_dst_port &&
830 					id->fid_src_port == q->id.fid_src_port &&
831 					id->fid_proto == q->id.fid_proto &&
832 					id->fid_flags == q->id.fid_flags) {
833 				break; /* Found */
834 			} else if (pipe_expire && TAILQ_EMPTY(&q->queue) &&
835 					q->S == q->F + 1) {
836 				/*
837 				 * Entry is idle and not in any heap, expire it
838 				 */
839 				LIST_REMOVE(q, q_link);
840 				kfree(q, M_DUMMYNET);
841 
842 				KASSERT(fs->rq_elements > 0,
843 						("invalid rq_elements %d", fs->rq_elements));
844 				fs->rq_elements--;
845 			}
846 		}
847 		if (q && LIST_FIRST(&fs->rq[i]) != q) { /* Found and not in front */
848 			LIST_REMOVE(q, q_link);
849 			LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
850 		}
851 	}
852 	if (q == NULL) {	/* No match, need to allocate a new entry */
853 		q = create_queue(fs, i);
854 		if (q != NULL)
855 			q->id = *id;
856 	}
857 	return q;
858 }
859 
860 static int
861 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
862 {
863 	/*
864 	 * RED algorithm
865 	 *
866 	 * RED calculates the average queue size (avg) using a low-pass filter
867 	 * with an exponential weighted (w_q) moving average:
868 	 * 	avg  <-  (1-w_q) * avg + w_q * q_size
869 	 * where q_size is the queue length (measured in bytes or * packets).
870 	 *
871 	 * If q_size == 0, we compute the idle time for the link, and set
872 	 *	avg = (1 - w_q)^(idle/s)
873 	 * where s is the time needed for transmitting a medium-sized packet.
874 	 *
875 	 * Now, if avg < min_th the packet is enqueued.
876 	 * If avg > max_th the packet is dropped. Otherwise, the packet is
877 	 * dropped with probability P function of avg.
878 	 */
879 
880 	int64_t p_b = 0;
881 	u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
882 
883 	IPFW3_DEBUG("\n%d q: %2u ", (int)curr_time, q_size);
884 
885 	/* Average queue size estimation */
886 	if (q_size != 0) {
887 		/*
888 		 * Queue is not empty, avg <- avg + (q_size - avg) * w_q
889 		 */
890 		int diff = SCALE(q_size) - q->avg;
891 		int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
892 
893 		q->avg += (int)v;
894 	} else {
895 		/*
896 		 * Queue is empty, find for how long the queue has been
897 		 * empty and use a lookup table for computing
898 		 * (1 - * w_q)^(idle_time/s) where s is the time to send a
899 		 * (small) packet.
900 		 * XXX check wraps...
901 		 */
902 		if (q->avg) {
903 			u_int t = (curr_time - q->q_time) / fs->lookup_step;
904 
905 			q->avg = (t < fs->lookup_depth) ?
906 				SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
907 		}
908 	}
909 	IPFW3_DEBUG("avg: %u ", SCALE_VAL(q->avg));
910 
911 	/* Should i drop? */
912 
913 	if (q->avg < fs->min_th) {
914 		/* Accept packet */
915 		q->count = -1;
916 		return 0;
917 	}
918 
919 	if (q->avg >= fs->max_th) { /* Average queue >=  Max threshold */
920 		if (fs->flags_fs & DN_IS_GENTLE_RED) {
921 			/*
922 			 * According to Gentle-RED, if avg is greater than max_th the
923 			 * packet is dropped with a probability
924 			 *	p_b = c_3 * avg - c_4
925 			 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
926 			 */
927 			p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4;
928 		} else {
929 			q->count = -1;
930 			kprintf("- drop\n");
931 			return 1;
932 		}
933 	} else if (q->avg > fs->min_th) {
934 		/*
935 		 * We compute p_b using the linear dropping function p_b = c_1 *
936 		 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
937 		 * max_p * min_th / (max_th - min_th)
938 		 */
939 		p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
940 	}
941 	if (fs->flags_fs & DN_QSIZE_IS_BYTES)
942 		p_b = (p_b * len) / fs->max_pkt_size;
943 
944 	if (++q->count == 0) {
945 		q->random = krandom() & 0xffff;
946 	} else {
947 		/*
948 		 * q->count counts packets arrived since last drop, so a greater
949 		 * value of q->count means a greater packet drop probability.
950 		 */
951 		if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
952 			q->count = 0;
953 			IPFW3_DEBUG("%s", "- red drop");
954 			/* After a drop we calculate a new random value */
955 			q->random = krandom() & 0xffff;
956 			return 1;	/* Drop */
957 		}
958 	}
959 	/* End of RED algorithm */
960 	return 0; /* Accept */
961 }
962 
963 static void
964 dn_iterate_pipe(dn_pipe_iter_t func, void *arg)
965 {
966 	int i;
967 
968 	for (i = 0; i < DN_NR_HASH_MAX; ++i) {
969 		struct dn_pipe_head *pipe_hdr = &pipe_table[i];
970 		struct dn_pipe *pipe, *pipe_next;
971 
972 		LIST_FOREACH_MUTABLE(pipe, pipe_hdr, p_link, pipe_next)
973 			func(pipe, arg);
974 	}
975 }
976 
977 static void
978 dn_iterate_flowset(dn_flowset_iter_t func, void *arg)
979 {
980 	int i;
981 
982 	for (i = 0; i < DN_NR_HASH_MAX; ++i) {
983 		struct dn_flowset_head *fs_hdr = &flowset_table[i];
984 		struct dn_flow_set *fs, *fs_next;
985 
986 		LIST_FOREACH_MUTABLE(fs, fs_hdr, fs_link, fs_next)
987 			func(fs, arg);
988 	}
989 }
990 
991 static struct dn_pipe *
992 dn_find_pipe(int pipe_nr)
993 {
994 	struct dn_pipe_head *pipe_hdr;
995 	struct dn_pipe *p;
996 
997 	pipe_hdr = &pipe_table[DN_NR_HASH(pipe_nr)];
998 	LIST_FOREACH(p, pipe_hdr, p_link) {
999 		if (p->pipe_nr == pipe_nr)
1000 			break;
1001 	}
1002 	return p;
1003 }
1004 
1005 static struct dn_flow_set *
1006 dn_find_flowset(int fs_nr)
1007 {
1008 	struct dn_flowset_head *fs_hdr;
1009 	struct dn_flow_set *fs;
1010 
1011 	fs_hdr = &flowset_table[DN_NR_HASH(fs_nr)];
1012 	LIST_FOREACH(fs, fs_hdr, fs_link) {
1013 		if (fs->fs_nr == fs_nr)
1014 			break;
1015 	}
1016 	return fs;
1017 }
1018 
1019 static struct dn_flow_set *
1020 dn_locate_flowset(int pipe_nr, int is_pipe)
1021 {
1022 	struct dn_flow_set *fs = NULL;
1023 
1024 	if (!is_pipe) {
1025 		fs = dn_find_flowset(pipe_nr);
1026 	} else {
1027 		struct dn_pipe *p;
1028 
1029 		p = dn_find_pipe(pipe_nr);
1030 		if (p != NULL)
1031 			fs = &p->fs;
1032 	}
1033 	return fs;
1034 }
1035 
1036 /*
1037  * Dummynet hook for packets.  Below 'pipe' is a pipe or a queue
1038  * depending on whether WF2Q or fixed bw is used.
1039  *
1040  * pipe_nr	pipe or queue the packet is destined for.
1041  * dir		where shall we send the packet after dummynet.
1042  * m		the mbuf with the packet
1043  * fwa->oif	the 'ifp' parameter from the caller.
1044  *		NULL in ip_input, destination interface in ip_output
1045  * fwa->ro	route parameter (only used in ip_output, NULL otherwise)
1046  * fwa->dst	destination address, only used by ip_output
1047  * fwa->rule	matching rule, in case of multiple passes
1048  * fwa->flags	flags from the caller, only used in ip_output
1049  */
1050 static int
1051 dummynet_io(struct mbuf *m)
1052 {
1053 	struct dn_pkt *pkt;
1054 	struct m_tag *tag;
1055 	struct dn_flow_set *fs;
1056 	struct dn_pipe *pipe;
1057 	uint64_t len = m->m_pkthdr.len;
1058 	struct dn_flow_queue *q = NULL;
1059 	int is_pipe, pipe_nr;
1060 
1061 	tag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1062 	pkt = m_tag_data(tag);
1063 
1064 	is_pipe = pkt->dn_flags & DN_FLAGS_IS_PIPE;
1065 	pipe_nr = pkt->pipe_nr;
1066 
1067 	/*
1068 	 * This is a dummynet rule, so we expect a O_PIPE or O_QUEUE rule
1069 	 */
1070 	fs = dn_locate_flowset(pipe_nr, is_pipe);
1071 	if (fs == NULL)
1072 		goto dropit;	/* This queue/pipe does not exist! */
1073 
1074 	pipe = fs->pipe;
1075 	if (pipe == NULL) { /* Must be a queue, try find a matching pipe */
1076 		pipe = dn_find_pipe(fs->parent_nr);
1077 		if (pipe != NULL) {
1078 			fs->pipe = pipe;
1079 		} else {
1080 			kprintf("No pipe %d for queue %d, drop pkt\n",
1081 					fs->parent_nr, fs->fs_nr);
1082 			goto dropit;
1083 		}
1084 	}
1085 
1086 	q = find_queue(fs, &pkt->id);
1087 	if (q == NULL)
1088 		goto dropit;	/* Cannot allocate queue */
1089 
1090 	/*
1091 	 * Update statistics, then check reasons to drop pkt
1092 	 */
1093 	q->tot_bytes += len;
1094 	q->tot_pkts++;
1095 
1096 	if (fs->plr && krandom() < fs->plr)
1097 		goto dropit;	/* Random pkt drop */
1098 
1099 	if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1100 		if (q->len_bytes > fs->qsize)
1101 			goto dropit;	/* Queue size overflow */
1102 	} else {
1103 		if (q->len >= fs->qsize)
1104 			goto dropit;	/* Queue count overflow */
1105 	}
1106 
1107 	if ((fs->flags_fs & DN_IS_RED) && red_drops(fs, q, len))
1108 		goto dropit;
1109 
1110 	TAILQ_INSERT_TAIL(&q->queue, pkt, dn_next);
1111 	q->len++;
1112 	q->len_bytes += len;
1113 
1114 	if (TAILQ_FIRST(&q->queue) != pkt)	/* Flow was not idle, we are done */
1115 		goto done;
1116 
1117 	/*
1118 	 * If we reach this point the flow was previously idle, so we need
1119 	 * to schedule it.  This involves different actions for fixed-rate
1120 	 * or WF2Q queues.
1121 	 */
1122 	if (is_pipe) {
1123 		/*
1124 		 * Fixed-rate queue: just insert into the ready_heap.
1125 		 */
1126 		dn_key t = 0;
1127 
1128 		if (pipe->bandwidth)
1129 			t = SET_TICKS(pkt, q, pipe);
1130 
1131 		q->sched_time = curr_time;
1132 		if (t == 0)	/* Must process it now */
1133 			ready_event(q);
1134 		else
1135 			heap_insert(&ready_heap, curr_time + t, q);
1136 	} else {
1137 		/*
1138 		 * WF2Q:
1139 		 * First, compute start time S: if the flow was idle (S=F+1)
1140 		 * set S to the virtual time V for the controlling pipe, and update
1141 		 * the sum of weights for the pipe; otherwise, remove flow from
1142 		 * idle_heap and set S to max(F, V).
1143 		 * Second, compute finish time F = S + len/weight.
1144 		 * Third, if pipe was idle, update V = max(S, V).
1145 		 * Fourth, count one more backlogged flow.
1146 		 */
1147 		if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid */
1148 			q->S = pipe->V;
1149 			pipe->sum += fs->weight; /* Add weight of new queue */
1150 		} else {
1151 			heap_extract(&pipe->idle_heap, q);
1152 			q->S = MAX64(q->F, pipe->V);
1153 		}
1154 		q->F = q->S + (len << MY_M) / (uint64_t)fs->weight;
1155 
1156 		if (pipe->not_eligible_heap.elements == 0 &&
1157 				pipe->scheduler_heap.elements == 0)
1158 			pipe->V = MAX64(q->S, pipe->V);
1159 
1160 		fs->backlogged++;
1161 
1162 		/*
1163 		 * Look at eligibility.  A flow is not eligibile if S>V (when
1164 		 * this happens, it means that there is some other flow already
1165 		 * scheduled for the same pipe, so the scheduler_heap cannot be
1166 		 * empty).  If the flow is not eligible we just store it in the
1167 		 * not_eligible_heap.  Otherwise, we store in the scheduler_heap
1168 		 * and possibly invoke ready_event_wfq() right now if there is
1169 		 * leftover credit.
1170 		 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1171 		 * and for all flows in not_eligible_heap (NEH), S_i > V.
1172 		 * So when we need to compute max(V, min(S_i)) forall i in SCH+NEH,
1173 		 * we only need to look into NEH.
1174 		 */
1175 		if (DN_KEY_GT(q->S, pipe->V)) {	/* Not eligible */
1176 			if (pipe->scheduler_heap.elements == 0)
1177 				kprintf("++ ouch! not eligible but empty scheduler!\n");
1178 			heap_insert(&pipe->not_eligible_heap, q->S, q);
1179 		} else {
1180 			heap_insert(&pipe->scheduler_heap, q->F, q);
1181 			if (pipe->numbytes >= 0) {	/* Pipe is idle */
1182 				if (pipe->scheduler_heap.elements != 1)
1183 					kprintf("*** OUCH! pipe should have been idle!\n");
1184 				IPFW3_DEBUG("Waking up pipe %d at %d\n",
1185 						pipe->pipe_nr, (int)(q->F >> MY_M));
1186 				pipe->sched_time = curr_time;
1187 				ready_event_wfq(pipe);
1188 			}
1189 		}
1190 	}
1191 done:
1192 	return 0;
1193 
1194 dropit:
1195 	if (q)
1196 		q->drops++;
1197 	return ENOBUFS;
1198 }
1199 
1200 /*
1201  * Dispose all packets and flow_queues on a flow_set.
1202  * If all=1, also remove red lookup table and other storage,
1203  * including the descriptor itself.
1204  * For the one in dn_pipe MUST also cleanup ready_heap...
1205  */
1206 static void
1207 purge_flow_set(struct dn_flow_set *fs, int all)
1208 {
1209 	int i;
1210 #ifdef INVARIANTS
1211 	int rq_elements = 0;
1212 #endif
1213 
1214 	for (i = 0; i <= fs->rq_size; i++) {
1215 		struct dn_flow_queue *q;
1216 
1217 		while ((q = LIST_FIRST(&fs->rq[i])) != NULL) {
1218 			struct dn_pkt *pkt;
1219 
1220 			while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
1221 				TAILQ_REMOVE(&q->queue, pkt, dn_next);
1222 				ip_dn_packet_free(pkt);
1223 			}
1224 
1225 			LIST_REMOVE(q, q_link);
1226 			kfree(q, M_DUMMYNET);
1227 
1228 #ifdef INVARIANTS
1229 			rq_elements++;
1230 #endif
1231 		}
1232 	}
1233 	KASSERT(rq_elements == fs->rq_elements,
1234 			("# rq elements mismatch, freed %d, total %d",
1235 			 rq_elements, fs->rq_elements));
1236 	fs->rq_elements = 0;
1237 
1238 	if (all) {
1239 		/* RED - free lookup table */
1240 		if (fs->w_q_lookup)
1241 			kfree(fs->w_q_lookup, M_DUMMYNET);
1242 
1243 		if (fs->rq)
1244 			kfree(fs->rq, M_DUMMYNET);
1245 
1246 		/*
1247 		 * If this fs is not part of a pipe, free it
1248 		 *
1249 		 * fs->pipe == NULL could happen, if 'fs' is a WF2Q and
1250 		 * - No packet belongs to that flow set is delivered by
1251 		 *   dummynet_io(), i.e. parent pipe is not installed yet.
1252 		 * - Parent pipe is deleted.
1253 		 */
1254 		if (fs->pipe == NULL || (fs->pipe && fs != &fs->pipe->fs))
1255 			kfree(fs, M_DUMMYNET);
1256 	}
1257 }
1258 
1259 /*
1260  * Dispose all packets queued on a pipe (not a flow_set).
1261  * Also free all resources associated to a pipe, which is about
1262  * to be deleted.
1263  */
1264 static void
1265 purge_pipe(struct dn_pipe *pipe)
1266 {
1267 	struct dn_pkt *pkt;
1268 
1269 	purge_flow_set(&pipe->fs, 1);
1270 
1271 	while ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
1272 		TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
1273 		ip_dn_packet_free(pkt);
1274 	}
1275 
1276 	heap_free(&pipe->scheduler_heap);
1277 	heap_free(&pipe->not_eligible_heap);
1278 	heap_free(&pipe->idle_heap);
1279 }
1280 
1281 /*
1282  * Delete all pipes and heaps returning memory.
1283  */
1284 static void
1285 dummynet_flush(void)
1286 {
1287 	struct dn_pipe_head pipe_list;
1288 	struct dn_flowset_head fs_list;
1289 	struct dn_pipe *p;
1290 	struct dn_flow_set *fs;
1291 	int i;
1292 
1293 	/*
1294 	 * Prevent future matches...
1295 	 */
1296 	LIST_INIT(&pipe_list);
1297 	for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1298 		struct dn_pipe_head *pipe_hdr = &pipe_table[i];
1299 
1300 		while ((p = LIST_FIRST(pipe_hdr)) != NULL) {
1301 			LIST_REMOVE(p, p_link);
1302 			LIST_INSERT_HEAD(&pipe_list, p, p_link);
1303 		}
1304 	}
1305 
1306 	LIST_INIT(&fs_list);
1307 	for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1308 		struct dn_flowset_head *fs_hdr = &flowset_table[i];
1309 
1310 		while ((fs = LIST_FIRST(fs_hdr)) != NULL) {
1311 			LIST_REMOVE(fs, fs_link);
1312 			LIST_INSERT_HEAD(&fs_list, fs, fs_link);
1313 		}
1314 	}
1315 
1316 	/* Free heaps so we don't have unwanted events */
1317 	heap_free(&ready_heap);
1318 	heap_free(&wfq_ready_heap);
1319 	heap_free(&extract_heap);
1320 
1321 	/*
1322 	 * Now purge all queued pkts and delete all pipes
1323 	 */
1324 	/* Scan and purge all flow_sets. */
1325 	while ((fs = LIST_FIRST(&fs_list)) != NULL) {
1326 		LIST_REMOVE(fs, fs_link);
1327 		purge_flow_set(fs, 1);
1328 	}
1329 
1330 	while ((p = LIST_FIRST(&pipe_list)) != NULL) {
1331 		LIST_REMOVE(p, p_link);
1332 		purge_pipe(p);
1333 		kfree(p, M_DUMMYNET);
1334 	}
1335 }
1336 
1337 /*
1338  * setup RED parameters
1339  */
1340 static int
1341 config_red(const struct dn_ioc_flowset *ioc_fs, struct dn_flow_set *x)
1342 {
1343 	int i;
1344 
1345 	x->w_q = ioc_fs->w_q;
1346 	x->min_th = SCALE(ioc_fs->min_th);
1347 	x->max_th = SCALE(ioc_fs->max_th);
1348 	x->max_p = ioc_fs->max_p;
1349 
1350 	x->c_1 = ioc_fs->max_p / (ioc_fs->max_th - ioc_fs->min_th);
1351 	x->c_2 = SCALE_MUL(x->c_1, SCALE(ioc_fs->min_th));
1352 	if (x->flags_fs & DN_IS_GENTLE_RED) {
1353 		x->c_3 = (SCALE(1) - ioc_fs->max_p) / ioc_fs->max_th;
1354 		x->c_4 = (SCALE(1) - 2 * ioc_fs->max_p);
1355 	}
1356 
1357 	/* If the lookup table already exist, free and create it again */
1358 	if (x->w_q_lookup) {
1359 		kfree(x->w_q_lookup, M_DUMMYNET);
1360 		x->w_q_lookup = NULL ;
1361 	}
1362 
1363 	if (red_lookup_depth == 0) {
1364 		kprintf("net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1365 		kfree(x, M_DUMMYNET);
1366 		return EINVAL;
1367 	}
1368 	x->lookup_depth = red_lookup_depth;
1369 	x->w_q_lookup = kmalloc(x->lookup_depth * sizeof(int),
1370 			M_DUMMYNET, M_WAITOK);
1371 
1372 	/* Fill the lookup table with (1 - w_q)^x */
1373 	x->lookup_step = ioc_fs->lookup_step;
1374 	x->lookup_weight = ioc_fs->lookup_weight;
1375 
1376 	x->w_q_lookup[0] = SCALE(1) - x->w_q;
1377 	for (i = 1; i < x->lookup_depth; i++)
1378 		x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1379 
1380 	if (red_avg_pkt_size < 1)
1381 		red_avg_pkt_size = 512;
1382 	x->avg_pkt_size = red_avg_pkt_size;
1383 
1384 	if (red_max_pkt_size < 1)
1385 		red_max_pkt_size = 1500;
1386 	x->max_pkt_size = red_max_pkt_size;
1387 
1388 	return 0;
1389 }
1390 
1391 static void
1392 alloc_hash(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1393 {
1394 	int i, alloc_size;
1395 
1396 	if (x->flags_fs & DN_HAVE_FLOW_MASK) {
1397 		int l = ioc_fs->rq_size;
1398 
1399 		/* Allocate some slots */
1400 		if (l == 0)
1401 			l = dn_hash_size;
1402 
1403 		if (l < DN_MIN_HASH_SIZE)
1404 			l = DN_MIN_HASH_SIZE;
1405 		else if (l > DN_MAX_HASH_SIZE)
1406 			l = DN_MAX_HASH_SIZE;
1407 
1408 		x->rq_size = l;
1409 	} else {
1410 		/* One is enough for null mask */
1411 		x->rq_size = 1;
1412 	}
1413 	alloc_size = x->rq_size + 1;
1414 
1415 	x->rq = kmalloc(alloc_size * sizeof(struct dn_flowqueue_head),
1416 			M_DUMMYNET, M_WAITOK | M_ZERO);
1417 	x->rq_elements = 0;
1418 
1419 	for (i = 0; i < alloc_size; ++i)
1420 		LIST_INIT(&x->rq[i]);
1421 }
1422 
1423 static void
1424 set_flowid_parms(struct dn_flow_id *id, const struct dn_ioc_flowid *ioc_id)
1425 {
1426 	id->fid_dst_ip = ioc_id->u.ip.dst_ip;
1427 	id->fid_src_ip = ioc_id->u.ip.src_ip;
1428 	id->fid_dst_port = ioc_id->u.ip.dst_port;
1429 	id->fid_src_port = ioc_id->u.ip.src_port;
1430 	id->fid_proto = ioc_id->u.ip.proto;
1431 	id->fid_flags = ioc_id->u.ip.flags;
1432 }
1433 
1434 static void
1435 set_fs_parms(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1436 {
1437 	x->flags_fs = ioc_fs->flags_fs;
1438 	x->qsize = ioc_fs->qsize;
1439 	x->plr = ioc_fs->plr;
1440 	set_flowid_parms(&x->flow_mask, &ioc_fs->flow_mask);
1441 	if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1442 		if (x->qsize > 1024 * 1024)
1443 			x->qsize = 1024 * 1024;
1444 	} else {
1445 		if (x->qsize == 0 || x->qsize > 100)
1446 			x->qsize = 50;
1447 	}
1448 
1449 	/* Configuring RED */
1450 	if (x->flags_fs & DN_IS_RED)
1451 		config_red(ioc_fs, x);	/* XXX should check errors */
1452 }
1453 
1454 /*
1455  * setup pipe or queue parameters.
1456  */
1457 
1458 static int
1459 config_pipe(struct dn_ioc_pipe *ioc_pipe)
1460 {
1461 	struct dn_ioc_flowset *ioc_fs = &ioc_pipe->fs;
1462 	int error;
1463 
1464 	/*
1465 	 * The config program passes parameters as follows:
1466 	 * bw	bits/second (0 means no limits)
1467 	 * delay	ms (must be translated into ticks)
1468 	 * qsize	slots or bytes
1469 	 */
1470 	ioc_pipe->delay = (ioc_pipe->delay * dn_hz) / 1000;
1471 
1472 	/*
1473 	 * We need either a pipe number or a flow_set number
1474 	 */
1475 	if (ioc_pipe->pipe_nr == 0 && ioc_fs->fs_nr == 0)
1476 		return EINVAL;
1477 	if (ioc_pipe->pipe_nr != 0 && ioc_fs->fs_nr != 0)
1478 		return EINVAL;
1479 
1480 	/*
1481 	 * Validate pipe number
1482 	 */
1483 	if (ioc_pipe->pipe_nr > DN_PIPE_NR_MAX || ioc_pipe->pipe_nr < 0)
1484 		return EINVAL;
1485 
1486 	error = EINVAL;
1487 	if (ioc_pipe->pipe_nr != 0) {	/* This is a pipe */
1488 		struct dn_pipe *x, *p;
1489 
1490 		/* Locate pipe */
1491 		p = dn_find_pipe(ioc_pipe->pipe_nr);
1492 
1493 		if (p == NULL) {	/* New pipe */
1494 			x = kmalloc(sizeof(struct dn_pipe), M_DUMMYNET, M_WAITOK | M_ZERO);
1495 			x->pipe_nr = ioc_pipe->pipe_nr;
1496 			x->fs.pipe = x;
1497 			TAILQ_INIT(&x->p_queue);
1498 
1499 			/*
1500 			 * idle_heap is the only one from which we extract from the middle.
1501 			 */
1502 			x->idle_heap.size = x->idle_heap.elements = 0;
1503 			x->idle_heap.offset = __offsetof(struct dn_flow_queue, heap_pos);
1504 		} else {
1505 			int i;
1506 
1507 			x = p;
1508 
1509 			/* Flush accumulated credit for all queues */
1510 			for (i = 0; i <= x->fs.rq_size; i++) {
1511 				struct dn_flow_queue *q;
1512 
1513 				LIST_FOREACH(q, &x->fs.rq[i], q_link)
1514 					q->numbytes = 0;
1515 			}
1516 		}
1517 
1518 		x->bandwidth = ioc_pipe->bandwidth;
1519 		x->numbytes = 0; /* Just in case... */
1520 		x->delay = ioc_pipe->delay;
1521 
1522 		set_fs_parms(&x->fs, ioc_fs);
1523 
1524 		if (x->fs.rq == NULL) {	/* A new pipe */
1525 			struct dn_pipe_head *pipe_hdr;
1526 
1527 			alloc_hash(&x->fs, ioc_fs);
1528 
1529 			pipe_hdr = &pipe_table[DN_NR_HASH(x->pipe_nr)];
1530 			LIST_INSERT_HEAD(pipe_hdr, x, p_link);
1531 		}
1532 	} else {	/* Config flow_set */
1533 		struct dn_flow_set *x, *fs;
1534 
1535 		/* Locate flow_set */
1536 		fs = dn_find_flowset(ioc_fs->fs_nr);
1537 
1538 		if (fs == NULL) {	/* New flow_set */
1539 			if (ioc_fs->parent_nr == 0)	/* Need link to a pipe */
1540 				goto back;
1541 
1542 			x = kmalloc(sizeof(struct dn_flow_set), M_DUMMYNET,
1543 					M_WAITOK | M_ZERO);
1544 			x->fs_nr = ioc_fs->fs_nr;
1545 			x->parent_nr = ioc_fs->parent_nr;
1546 			x->weight = ioc_fs->weight;
1547 			if (x->weight == 0)
1548 				x->weight = 1;
1549 			else if (x->weight > 100)
1550 				x->weight = 100;
1551 		} else {
1552 			/* Change parent pipe not allowed; must delete and recreate */
1553 			if (ioc_fs->parent_nr != 0 && fs->parent_nr != ioc_fs->parent_nr)
1554 				goto back;
1555 			x = fs;
1556 		}
1557 
1558 		set_fs_parms(x, ioc_fs);
1559 
1560 		if (x->rq == NULL) {	/* A new flow_set */
1561 			struct dn_flowset_head *fs_hdr;
1562 
1563 			alloc_hash(x, ioc_fs);
1564 
1565 			fs_hdr = &flowset_table[DN_NR_HASH(x->fs_nr)];
1566 			LIST_INSERT_HEAD(fs_hdr, x, fs_link);
1567 		}
1568 	}
1569 	error = 0;
1570 
1571 back:
1572 	return error;
1573 }
1574 
1575 /*
1576  * Helper function to remove from a heap queues which are linked to
1577  * a flow_set about to be deleted.
1578  */
1579 static void
1580 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
1581 {
1582 	int i = 0, found = 0;
1583 
1584 	while (i < h->elements) {
1585 		if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
1586 			h->elements--;
1587 			h->p[i] = h->p[h->elements];
1588 			found++;
1589 		} else {
1590 			i++;
1591 		}
1592 	}
1593 	if (found)
1594 		heapify(h);
1595 }
1596 
1597 /*
1598  * helper function to remove a pipe from a heap (can be there at most once)
1599  */
1600 static void
1601 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
1602 {
1603 	if (h->elements > 0) {
1604 		int i;
1605 
1606 		for (i = 0; i < h->elements; i++) {
1607 			if (h->p[i].object == p) { /* found it */
1608 				h->elements--;
1609 				h->p[i] = h->p[h->elements];
1610 				heapify(h);
1611 				break;
1612 			}
1613 		}
1614 	}
1615 }
1616 
1617 static void
1618 dn_unref_pipe_cb(struct dn_flow_set *fs, void *pipe0)
1619 {
1620 	struct dn_pipe *pipe = pipe0;
1621 
1622 	if (fs->pipe == pipe) {
1623 		kprintf("++ ref to pipe %d from fs %d\n",
1624 				pipe->pipe_nr, fs->fs_nr);
1625 		fs->pipe = NULL;
1626 		purge_flow_set(fs, 0);
1627 	}
1628 }
1629 
1630 /*
1631  * Fully delete a pipe or a queue, cleaning up associated info.
1632  */
1633 static int
1634 delete_pipe(const struct dn_ioc_pipe *ioc_pipe)
1635 {
1636 	struct dn_pipe *p;
1637 	int error;
1638 
1639 	if (ioc_pipe->pipe_nr == 0 && ioc_pipe->fs.fs_nr == 0)
1640 		return EINVAL;
1641 	if (ioc_pipe->pipe_nr != 0 && ioc_pipe->fs.fs_nr != 0)
1642 		return EINVAL;
1643 
1644 	if (ioc_pipe->pipe_nr > DN_NR_HASH_MAX || ioc_pipe->pipe_nr < 0)
1645 		return EINVAL;
1646 
1647 	error = EINVAL;
1648 	if (ioc_pipe->pipe_nr != 0) {	/* This is an old-style pipe */
1649 		/* Locate pipe */
1650 		p = dn_find_pipe(ioc_pipe->pipe_nr);
1651 		if (p == NULL)
1652 			goto back; /* Not found */
1653 
1654 		/* Unlink from pipe hash table */
1655 		LIST_REMOVE(p, p_link);
1656 
1657 		/* Remove all references to this pipe from flow_sets */
1658 		dn_iterate_flowset(dn_unref_pipe_cb, p);
1659 
1660 		fs_remove_from_heap(&ready_heap, &p->fs);
1661 		purge_pipe(p);	/* Remove all data associated to this pipe */
1662 
1663 		/* Remove reference to here from extract_heap and wfq_ready_heap */
1664 		pipe_remove_from_heap(&extract_heap, p);
1665 		pipe_remove_from_heap(&wfq_ready_heap, p);
1666 
1667 		kfree(p, M_DUMMYNET);
1668 	} else {	/* This is a WF2Q queue (dn_flow_set) */
1669 		struct dn_flow_set *fs;
1670 
1671 		/* Locate flow_set */
1672 		fs = dn_find_flowset(ioc_pipe->fs.fs_nr);
1673 		if (fs == NULL)
1674 			goto back; /* Not found */
1675 
1676 		LIST_REMOVE(fs, fs_link);
1677 
1678 		if ((p = fs->pipe) != NULL) {
1679 			/* Update total weight on parent pipe and cleanup parent heaps */
1680 			p->sum -= fs->weight * fs->backlogged;
1681 			fs_remove_from_heap(&p->not_eligible_heap, fs);
1682 			fs_remove_from_heap(&p->scheduler_heap, fs);
1683 #if 1	/* XXX should i remove from idle_heap as well ? */
1684 			fs_remove_from_heap(&p->idle_heap, fs);
1685 #endif
1686 		}
1687 		purge_flow_set(fs, 1);
1688 	}
1689 	error = 0;
1690 
1691 back:
1692 	return error;
1693 }
1694 
1695 /*
1696  * helper function used to copy data from kernel in DUMMYNET_GET
1697  */
1698 static void
1699 dn_copy_flowid(const struct dn_flow_id *id, struct dn_ioc_flowid *ioc_id)
1700 {
1701 	ioc_id->type = ETHERTYPE_IP;
1702 	ioc_id->u.ip.dst_ip = id->fid_dst_ip;
1703 	ioc_id->u.ip.src_ip = id->fid_src_ip;
1704 	ioc_id->u.ip.dst_port = id->fid_dst_port;
1705 	ioc_id->u.ip.src_port = id->fid_src_port;
1706 	ioc_id->u.ip.proto = id->fid_proto;
1707 	ioc_id->u.ip.flags = id->fid_flags;
1708 }
1709 
1710 static void *
1711 dn_copy_flowqueues(const struct dn_flow_set *fs, void *bp)
1712 {
1713 	struct dn_ioc_flowqueue *ioc_fq = bp;
1714 	int i, copied = 0;
1715 
1716 	for (i = 0; i <= fs->rq_size; i++) {
1717 		const struct dn_flow_queue *q;
1718 
1719 		LIST_FOREACH(q, &fs->rq[i], q_link) {
1720 			if (q->hash_slot != i) {	/* XXX ASSERT */
1721 				kprintf("++ at %d: wrong slot (have %d, "
1722 						"should be %d)\n",
1723 						copied, q->hash_slot, i);
1724 			}
1725 			if (q->fs != fs) {		/* XXX ASSERT */
1726 				kprintf("++ at %d: wrong fs ptr (have %p, should be %p)\n",
1727 						i, q->fs, fs);
1728 			}
1729 
1730 			copied++;
1731 
1732 			ioc_fq->len = q->len;
1733 			ioc_fq->len_bytes = q->len_bytes;
1734 			ioc_fq->tot_pkts = q->tot_pkts;
1735 			ioc_fq->tot_bytes = q->tot_bytes;
1736 			ioc_fq->drops = q->drops;
1737 			ioc_fq->hash_slot = q->hash_slot;
1738 			ioc_fq->S = q->S;
1739 			ioc_fq->F = q->F;
1740 			dn_copy_flowid(&q->id, &ioc_fq->id);
1741 
1742 			ioc_fq++;
1743 		}
1744 	}
1745 
1746 	if (copied != fs->rq_elements) {	/* XXX ASSERT */
1747 		kprintf("++ wrong count, have %d should be %d\n",
1748 				copied, fs->rq_elements);
1749 	}
1750 	return ioc_fq;
1751 }
1752 
1753 static void
1754 dn_copy_flowset(const struct dn_flow_set *fs, struct dn_ioc_flowset *ioc_fs,
1755 		u_short fs_type)
1756 {
1757 	ioc_fs->fs_type = fs_type;
1758 
1759 	ioc_fs->fs_nr = fs->fs_nr;
1760 	ioc_fs->flags_fs = fs->flags_fs;
1761 	ioc_fs->parent_nr = fs->parent_nr;
1762 
1763 	ioc_fs->weight = fs->weight;
1764 	ioc_fs->qsize = fs->qsize;
1765 	ioc_fs->plr = fs->plr;
1766 
1767 	ioc_fs->rq_size = fs->rq_size;
1768 	ioc_fs->rq_elements = fs->rq_elements;
1769 
1770 	ioc_fs->w_q = fs->w_q;
1771 	ioc_fs->max_th = fs->max_th;
1772 	ioc_fs->min_th = fs->min_th;
1773 	ioc_fs->max_p = fs->max_p;
1774 
1775 	dn_copy_flowid(&fs->flow_mask, &ioc_fs->flow_mask);
1776 }
1777 
1778 static void
1779 dn_calc_pipe_size_cb(struct dn_pipe *pipe, void *sz)
1780 {
1781 	size_t *size = sz;
1782 
1783 	*size += sizeof(struct dn_ioc_pipe) +
1784 		pipe->fs.rq_elements * sizeof(struct dn_ioc_flowqueue);
1785 }
1786 
1787 static void
1788 dn_calc_fs_size_cb(struct dn_flow_set *fs, void *sz)
1789 {
1790 	size_t *size = sz;
1791 
1792 	*size += sizeof(struct dn_ioc_flowset) +
1793 		fs->rq_elements * sizeof(struct dn_ioc_flowqueue);
1794 }
1795 
1796 static void
1797 dn_copyout_pipe_cb(struct dn_pipe *pipe, void *bp0)
1798 {
1799 	char **bp = bp0;
1800 	struct dn_ioc_pipe *ioc_pipe = (struct dn_ioc_pipe *)(*bp);
1801 
1802 	/*
1803 	 * Copy flow set descriptor associated with this pipe
1804 	 */
1805 	dn_copy_flowset(&pipe->fs, &ioc_pipe->fs, DN_IS_PIPE);
1806 
1807 	/*
1808 	 * Copy pipe descriptor
1809 	 */
1810 	ioc_pipe->bandwidth = pipe->bandwidth;
1811 	ioc_pipe->pipe_nr = pipe->pipe_nr;
1812 	ioc_pipe->V = pipe->V;
1813 	/* Convert delay to milliseconds */
1814 	ioc_pipe->delay = (pipe->delay * 1000) / dn_hz;
1815 
1816 	/*
1817 	 * Copy flow queue descriptors
1818 	 */
1819 	*bp += sizeof(*ioc_pipe);
1820 	*bp = dn_copy_flowqueues(&pipe->fs, *bp);
1821 }
1822 
1823 static void
1824 dn_copyout_fs_cb(struct dn_flow_set *fs, void *bp0)
1825 {
1826 	char **bp = bp0;
1827 	struct dn_ioc_flowset *ioc_fs = (struct dn_ioc_flowset *)(*bp);
1828 
1829 	/*
1830 	 * Copy flow set descriptor
1831 	 */
1832 	dn_copy_flowset(fs, ioc_fs, DN_IS_QUEUE);
1833 
1834 	/*
1835 	 * Copy flow queue descriptors
1836 	 */
1837 	*bp += sizeof(*ioc_fs);
1838 	*bp = dn_copy_flowqueues(fs, *bp);
1839 }
1840 
1841 static int
1842 dummynet_get(struct dn_sopt *dn_sopt)
1843 {
1844 	char *buf, *bp;
1845 	size_t size = 0;
1846 
1847 	/*
1848 	 * Compute size of data structures: list of pipes and flow_sets.
1849 	 */
1850 	dn_iterate_pipe(dn_calc_pipe_size_cb, &size);
1851 	dn_iterate_flowset(dn_calc_fs_size_cb, &size);
1852 
1853 	/*
1854 	 * Copyout pipe/flow_set/flow_queue
1855 	 */
1856 	bp = buf = kmalloc(size, M_TEMP, M_WAITOK | M_ZERO);
1857 	dn_iterate_pipe(dn_copyout_pipe_cb, &bp);
1858 	dn_iterate_flowset(dn_copyout_fs_cb, &bp);
1859 
1860 	/* Temp memory will be freed by caller */
1861 	dn_sopt->dn_sopt_arg = buf;
1862 	dn_sopt->dn_sopt_arglen = size;
1863 	return 0;
1864 }
1865 
1866 /*
1867  * Handler for the various dummynet socket options (get, flush, config, del)
1868  */
1869 static int
1870 dummynet_ctl(struct dn_sopt *dn_sopt)
1871 {
1872 	int error = 0;
1873 
1874 	switch (dn_sopt->dn_sopt_name) {
1875 		case IP_DUMMYNET_GET:
1876 			error = dummynet_get(dn_sopt);
1877 			break;
1878 
1879 		case IP_DUMMYNET_FLUSH:
1880 			dummynet_flush();
1881 			break;
1882 
1883 		case IP_DUMMYNET_CONFIGURE:
1884 			KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1885 			error = config_pipe(dn_sopt->dn_sopt_arg);
1886 			break;
1887 
1888 		case IP_DUMMYNET_DEL:	/* Remove a pipe or flow_set */
1889 			KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1890 			error = delete_pipe(dn_sopt->dn_sopt_arg);
1891 			break;
1892 
1893 		default:
1894 			kprintf("%s -- unknown option %d\n", __func__, dn_sopt->dn_sopt_name);
1895 			error = EINVAL;
1896 			break;
1897 	}
1898 	return error;
1899 }
1900 
1901 static void
1902 dummynet_clock(systimer_t info __unused, int in_ipi __unused,
1903 	struct intrframe *frame __unused)
1904 {
1905 	KASSERT(mycpuid == ip_dn_cpu,
1906 			("dummynet systimer comes on cpu%d, should be %d!",
1907 			 mycpuid, ip_dn_cpu));
1908 
1909 	crit_enter();
1910 	if (DUMMYNET_LOADED && (dn_netmsg.lmsg.ms_flags & MSGF_DONE))
1911 		lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), &dn_netmsg.lmsg);
1912 	crit_exit();
1913 }
1914 
1915 static int
1916 sysctl_dn_hz(SYSCTL_HANDLER_ARGS)
1917 {
1918 	int error, val;
1919 
1920 	val = dn_hz;
1921 	error = sysctl_handle_int(oidp, &val, 0, req);
1922 	if (error || req->newptr == NULL)
1923 		return error;
1924 	if (val <= 0)
1925 		return EINVAL;
1926 	else if (val > DN_CALLOUT_FREQ_MAX)
1927 		val = DN_CALLOUT_FREQ_MAX;
1928 
1929 	crit_enter();
1930 	dn_hz = val;
1931 	systimer_adjust_periodic(&dn_clock, val);
1932 	crit_exit();
1933 
1934 	return 0;
1935 }
1936 
1937 static void
1938 ip_dn_init_dispatch(netmsg_t msg)
1939 {
1940 	int i, error = 0;
1941 
1942 	KASSERT(mycpuid == ip_dn_cpu,
1943 			("%s runs on cpu%d, instead of cpu%d", __func__,
1944 			 mycpuid, ip_dn_cpu));
1945 
1946 	crit_enter();
1947 
1948 	if (DUMMYNET_LOADED) {
1949 		kprintf("DUMMYNET already loaded\n");
1950 		error = EEXIST;
1951 		goto back;
1952 	}
1953 
1954 	kprintf("DUMMYNET initialized (011031)\n");
1955 
1956 	for (i = 0; i < DN_NR_HASH_MAX; ++i)
1957 		LIST_INIT(&pipe_table[i]);
1958 
1959 	for (i = 0; i < DN_NR_HASH_MAX; ++i)
1960 		LIST_INIT(&flowset_table[i]);
1961 
1962 	ready_heap.size = ready_heap.elements = 0;
1963 	ready_heap.offset = 0;
1964 
1965 	wfq_ready_heap.size = wfq_ready_heap.elements = 0;
1966 	wfq_ready_heap.offset = 0;
1967 
1968 	extract_heap.size = extract_heap.elements = 0;
1969 	extract_heap.offset = 0;
1970 
1971 	ip_dn_ctl_ptr = dummynet_ctl;
1972 	ip_dn_io_ptr = dummynet_io;
1973 
1974 	netmsg_init(&dn_netmsg, NULL, &netisr_adone_rport,
1975 			0, dummynet);
1976 	systimer_init_periodic_nq(&dn_clock, dummynet_clock, NULL, dn_hz);
1977 
1978 back:
1979 	crit_exit();
1980 	lwkt_replymsg(&msg->lmsg, error);
1981 }
1982 
1983 static int
1984 ip_dn_init(void)
1985 {
1986 	struct netmsg_base smsg;
1987 
1988 	if (ip_dn_cpu >= ncpus) {
1989 		kprintf("%s: CPU%d does not exist, switch to CPU0\n",
1990 				__func__, ip_dn_cpu);
1991 		ip_dn_cpu = 0;
1992 	}
1993 
1994 	ip_fw3_register_module(MODULE_DUMMYNET_ID, MODULE_DUMMYNET_NAME);
1995 	ip_fw3_register_filter_funcs(MODULE_DUMMYNET_ID, O_DUMMYNET_PIPE,
1996 			(filter_func)check_pipe);
1997 	ip_fw3_register_filter_funcs(MODULE_DUMMYNET_ID, O_DUMMYNET_QUEUE,
1998 			(filter_func)check_pipe);
1999 
2000 	netmsg_init(&smsg, NULL, &curthread->td_msgport,
2001 			0, ip_dn_init_dispatch);
2002 	lwkt_domsg(netisr_cpuport(ip_dn_cpu), &smsg.lmsg, 0);
2003 	return smsg.lmsg.ms_error;
2004 }
2005 
2006 #ifdef KLD_MODULE
2007 
2008 static void
2009 ip_dn_stop_dispatch(netmsg_t msg)
2010 {
2011 	crit_enter();
2012 
2013 	dummynet_flush();
2014 
2015 	ip_dn_ctl_ptr = NULL;
2016 	ip_dn_io_ptr = NULL;
2017 
2018 	systimer_del(&dn_clock);
2019 
2020 	crit_exit();
2021 	lwkt_replymsg(&msg->lmsg, 0);
2022 }
2023 
2024 static void
2025 ip_dn_stop(void)
2026 {
2027 	struct netmsg_base smsg;
2028 
2029 	netmsg_init(&smsg, NULL, &curthread->td_msgport,
2030 			0, ip_dn_stop_dispatch);
2031 	lwkt_domsg(netisr_cpuport(ip_dn_cpu), &smsg.lmsg, 0);
2032 
2033 	netmsg_service_sync();
2034 }
2035 
2036 #endif	/* KLD_MODULE */
2037 
2038 static int
2039 dummynet_modevent(module_t mod, int type, void *data)
2040 {
2041 	switch (type) {
2042 		case MOD_LOAD:
2043 			return ip_dn_init();
2044 
2045 		case MOD_UNLOAD:
2046 #ifndef KLD_MODULE
2047 			kprintf("dummynet statically compiled, cannot unload\n");
2048 			return EINVAL;
2049 #else
2050 			ip_dn_stop();
2051 #endif
2052 			break;
2053 
2054 		default:
2055 			break;
2056 	}
2057 	return 0;
2058 }
2059 
2060 static moduledata_t dummynet_mod = {
2061 	"dummynet2",
2062 	dummynet_modevent,
2063 	NULL
2064 };
2065 DECLARE_MODULE(dummynet3, dummynet_mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2066 MODULE_DEPEND(dummynet3, ipfw3_basic, 1, 1, 1);
2067 MODULE_VERSION(dummynet3, 1);
2068