xref: /freebsd/sbin/pfctl/pfctl_altq.c (revision 19261079)
1 /*	$OpenBSD: pfctl_altq.c,v 1.93 2007/10/15 02:16:35 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2002
5  *	Sony Computer Science Laboratories Inc.
6  * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
23 
24 #define PFIOC_USE_LATEST
25 
26 #include <sys/types.h>
27 #include <sys/bitset.h>
28 #include <sys/ioctl.h>
29 #include <sys/socket.h>
30 
31 #include <net/if.h>
32 #include <netinet/in.h>
33 #include <net/pfvar.h>
34 
35 #include <err.h>
36 #include <errno.h>
37 #include <inttypes.h>
38 #include <limits.h>
39 #include <math.h>
40 #include <search.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <string.h>
44 #include <unistd.h>
45 
46 #include <net/altq/altq.h>
47 #include <net/altq/altq_cbq.h>
48 #include <net/altq/altq_codel.h>
49 #include <net/altq/altq_priq.h>
50 #include <net/altq/altq_hfsc.h>
51 #include <net/altq/altq_fairq.h>
52 
53 #include "pfctl_parser.h"
54 #include "pfctl.h"
55 
56 #define is_sc_null(sc)	(((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
57 
58 static STAILQ_HEAD(interfaces, pfctl_altq) interfaces = STAILQ_HEAD_INITIALIZER(interfaces);
59 static struct hsearch_data queue_map;
60 static struct hsearch_data if_map;
61 static struct hsearch_data qid_map;
62 
63 static struct pfctl_altq *pfaltq_lookup(char *ifname);
64 static struct pfctl_altq *qname_to_pfaltq(const char *, const char *);
65 static u_int32_t	 qname_to_qid(char *);
66 
67 static int	eval_pfqueue_cbq(struct pfctl *, struct pf_altq *,
68 		    struct pfctl_altq *);
69 static int	cbq_compute_idletime(struct pfctl *, struct pf_altq *);
70 static int	check_commit_cbq(int, int, struct pfctl_altq *);
71 static int	print_cbq_opts(const struct pf_altq *);
72 
73 static int	print_codel_opts(const struct pf_altq *,
74 		    const struct node_queue_opt *);
75 
76 static int	eval_pfqueue_priq(struct pfctl *, struct pf_altq *,
77 		    struct pfctl_altq *);
78 static int	check_commit_priq(int, int, struct pfctl_altq *);
79 static int	print_priq_opts(const struct pf_altq *);
80 
81 static int	eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *,
82 		    struct pfctl_altq *, struct pfctl_altq *);
83 static int	check_commit_hfsc(int, int, struct pfctl_altq *);
84 static int	print_hfsc_opts(const struct pf_altq *,
85 		    const struct node_queue_opt *);
86 
87 static int	eval_pfqueue_fairq(struct pfctl *, struct pf_altq *,
88 		    struct pfctl_altq *, struct pfctl_altq *);
89 static int	print_fairq_opts(const struct pf_altq *,
90 		    const struct node_queue_opt *);
91 static int	check_commit_fairq(int, int, struct pfctl_altq *);
92 
93 static void		 gsc_add_sc(struct gen_sc *, struct service_curve *);
94 static int		 is_gsc_under_sc(struct gen_sc *,
95 			     struct service_curve *);
96 static struct segment	*gsc_getentry(struct gen_sc *, double);
97 static int		 gsc_add_seg(struct gen_sc *, double, double, double,
98 			     double);
99 static double		 sc_x2y(struct service_curve *, double);
100 
101 u_int32_t	 getifspeed(char *);
102 u_long		 getifmtu(char *);
103 int		 eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
104 		     u_int64_t);
105 u_int64_t	 eval_bwspec(struct node_queue_bw *, u_int64_t);
106 void		 print_hfsc_sc(const char *, u_int, u_int, u_int,
107 		     const struct node_hfsc_sc *);
108 void		 print_fairq_sc(const char *, u_int, u_int, u_int,
109 		     const struct node_fairq_sc *);
110 
111 static __attribute__((constructor)) void
112 pfctl_altq_init(void)
113 {
114 	/*
115 	 * As hdestroy() will never be called on these tables, it will be
116 	 * safe to use references into the stored data as keys.
117 	 */
118 	if (hcreate_r(0, &queue_map) == 0)
119 		err(1, "Failed to create altq queue map");
120 	if (hcreate_r(0, &if_map) == 0)
121 		err(1, "Failed to create altq interface map");
122 	if (hcreate_r(0, &qid_map) == 0)
123 		err(1, "Failed to create altq queue id map");
124 }
125 
126 void
127 pfaltq_store(struct pf_altq *a)
128 {
129 	struct pfctl_altq	*altq;
130 	ENTRY 			 item;
131 	ENTRY			*ret_item;
132 	size_t			 key_size;
133 
134 	if ((altq = malloc(sizeof(*altq))) == NULL)
135 		err(1, "queue malloc");
136 	memcpy(&altq->pa, a, sizeof(struct pf_altq));
137 	memset(&altq->meta, 0, sizeof(altq->meta));
138 
139 	if (a->qname[0] == 0) {
140 		item.key = altq->pa.ifname;
141 		item.data = altq;
142 		if (hsearch_r(item, ENTER, &ret_item, &if_map) == 0)
143 			err(1, "interface map insert");
144 		STAILQ_INSERT_TAIL(&interfaces, altq, meta.link);
145 	} else {
146 		key_size = sizeof(a->ifname) + sizeof(a->qname);
147 		if ((item.key = malloc(key_size)) == NULL)
148 			err(1, "queue map key malloc");
149 		snprintf(item.key, key_size, "%s:%s", a->ifname, a->qname);
150 		item.data = altq;
151 		if (hsearch_r(item, ENTER, &ret_item, &queue_map) == 0)
152 			err(1, "queue map insert");
153 
154 		item.key = altq->pa.qname;
155 		item.data = &altq->pa.qid;
156 		if (hsearch_r(item, ENTER, &ret_item, &qid_map) == 0)
157 			err(1, "qid map insert");
158 	}
159 }
160 
161 static struct pfctl_altq *
162 pfaltq_lookup(char *ifname)
163 {
164 	ENTRY	 item;
165 	ENTRY	*ret_item;
166 
167 	item.key = ifname;
168 	if (hsearch_r(item, FIND, &ret_item, &if_map) == 0)
169 		return (NULL);
170 
171 	return (ret_item->data);
172 }
173 
174 static struct pfctl_altq *
175 qname_to_pfaltq(const char *qname, const char *ifname)
176 {
177 	ENTRY	 item;
178 	ENTRY	*ret_item;
179 	char	 key[IFNAMSIZ + PF_QNAME_SIZE];
180 
181 	item.key = key;
182 	snprintf(item.key, sizeof(key), "%s:%s", ifname, qname);
183 	if (hsearch_r(item, FIND, &ret_item, &queue_map) == 0)
184 		return (NULL);
185 
186 	return (ret_item->data);
187 }
188 
189 static u_int32_t
190 qname_to_qid(char *qname)
191 {
192 	ENTRY	 item;
193 	ENTRY	*ret_item;
194 	uint32_t qid;
195 
196 	/*
197 	 * We guarantee that same named queues on different interfaces
198 	 * have the same qid.
199 	 */
200 	item.key = qname;
201 	if (hsearch_r(item, FIND, &ret_item, &qid_map) == 0)
202 		return (0);
203 
204 	qid = *(uint32_t *)ret_item->data;
205 	return (qid);
206 }
207 
208 void
209 print_altq(const struct pf_altq *a, unsigned int level,
210     struct node_queue_bw *bw, struct node_queue_opt *qopts)
211 {
212 	if (a->qname[0] != 0) {
213 		print_queue(a, level, bw, 1, qopts);
214 		return;
215 	}
216 
217 #ifdef __FreeBSD__
218 	if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
219 		printf("INACTIVE ");
220 #endif
221 
222 	printf("altq on %s ", a->ifname);
223 
224 	switch (a->scheduler) {
225 	case ALTQT_CBQ:
226 		if (!print_cbq_opts(a))
227 			printf("cbq ");
228 		break;
229 	case ALTQT_PRIQ:
230 		if (!print_priq_opts(a))
231 			printf("priq ");
232 		break;
233 	case ALTQT_HFSC:
234 		if (!print_hfsc_opts(a, qopts))
235 			printf("hfsc ");
236 		break;
237 	case ALTQT_FAIRQ:
238 		if (!print_fairq_opts(a, qopts))
239 			printf("fairq ");
240 		break;
241 	case ALTQT_CODEL:
242 		if (!print_codel_opts(a, qopts))
243 			printf("codel ");
244 		break;
245 	}
246 
247 	if (bw != NULL && bw->bw_percent > 0) {
248 		if (bw->bw_percent < 100)
249 			printf("bandwidth %u%% ", bw->bw_percent);
250 	} else
251 		printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
252 
253 	if (a->qlimit != DEFAULT_QLIMIT)
254 		printf("qlimit %u ", a->qlimit);
255 	printf("tbrsize %u ", a->tbrsize);
256 }
257 
258 void
259 print_queue(const struct pf_altq *a, unsigned int level,
260     struct node_queue_bw *bw, int print_interface,
261     struct node_queue_opt *qopts)
262 {
263 	unsigned int	i;
264 
265 #ifdef __FreeBSD__
266 	if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
267 		printf("INACTIVE ");
268 #endif
269 	printf("queue ");
270 	for (i = 0; i < level; ++i)
271 		printf(" ");
272 	printf("%s ", a->qname);
273 	if (print_interface)
274 		printf("on %s ", a->ifname);
275 	if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC ||
276 		a->scheduler == ALTQT_FAIRQ) {
277 		if (bw != NULL && bw->bw_percent > 0) {
278 			if (bw->bw_percent < 100)
279 				printf("bandwidth %u%% ", bw->bw_percent);
280 		} else
281 			printf("bandwidth %s ", rate2str((double)a->bandwidth));
282 	}
283 	if (a->priority != DEFAULT_PRIORITY)
284 		printf("priority %u ", a->priority);
285 	if (a->qlimit != DEFAULT_QLIMIT)
286 		printf("qlimit %u ", a->qlimit);
287 	switch (a->scheduler) {
288 	case ALTQT_CBQ:
289 		print_cbq_opts(a);
290 		break;
291 	case ALTQT_PRIQ:
292 		print_priq_opts(a);
293 		break;
294 	case ALTQT_HFSC:
295 		print_hfsc_opts(a, qopts);
296 		break;
297 	case ALTQT_FAIRQ:
298 		print_fairq_opts(a, qopts);
299 		break;
300 	}
301 }
302 
303 /*
304  * eval_pfaltq computes the discipline parameters.
305  */
306 int
307 eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
308     struct node_queue_opt *opts)
309 {
310 	u_int64_t	rate;
311 	u_int		size, errors = 0;
312 
313 	if (bw->bw_absolute > 0)
314 		pa->ifbandwidth = bw->bw_absolute;
315 	else
316 		if ((rate = getifspeed(pa->ifname)) == 0) {
317 			fprintf(stderr, "interface %s does not know its bandwidth, "
318 			    "please specify an absolute bandwidth\n",
319 			    pa->ifname);
320 			errors++;
321 		} else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
322 			pa->ifbandwidth = rate;
323 
324 	/*
325 	 * Limit bandwidth to UINT_MAX for schedulers that aren't 64-bit ready.
326 	 */
327 	if ((pa->scheduler != ALTQT_HFSC) && (pa->ifbandwidth > UINT_MAX)) {
328 		pa->ifbandwidth = UINT_MAX;
329 		warnx("interface %s bandwidth limited to %" PRIu64 " bps "
330 		    "because selected scheduler is 32-bit limited\n", pa->ifname,
331 		    pa->ifbandwidth);
332 	}
333 	errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
334 
335 	/* if tbrsize is not specified, use heuristics */
336 	if (pa->tbrsize == 0) {
337 		rate = pa->ifbandwidth;
338 		if (rate <= 1 * 1000 * 1000)
339 			size = 1;
340 		else if (rate <= 10 * 1000 * 1000)
341 			size = 4;
342 		else if (rate <= 200 * 1000 * 1000)
343 			size = 8;
344 		else if (rate <= 2500 * 1000 * 1000ULL)
345 			size = 24;
346 		else
347 			size = 128;
348 		size = size * getifmtu(pa->ifname);
349 		pa->tbrsize = size;
350 	}
351 	return (errors);
352 }
353 
354 /*
355  * check_commit_altq does consistency check for each interface
356  */
357 int
358 check_commit_altq(int dev, int opts)
359 {
360 	struct pfctl_altq	*if_ppa;
361 	int			 error = 0;
362 
363 	/* call the discipline check for each interface. */
364 	STAILQ_FOREACH(if_ppa, &interfaces, meta.link) {
365 		switch (if_ppa->pa.scheduler) {
366 		case ALTQT_CBQ:
367 			error = check_commit_cbq(dev, opts, if_ppa);
368 			break;
369 		case ALTQT_PRIQ:
370 			error = check_commit_priq(dev, opts, if_ppa);
371 			break;
372 		case ALTQT_HFSC:
373 			error = check_commit_hfsc(dev, opts, if_ppa);
374 			break;
375 		case ALTQT_FAIRQ:
376 			error = check_commit_fairq(dev, opts, if_ppa);
377 			break;
378 		default:
379 			break;
380 		}
381 	}
382 	return (error);
383 }
384 
385 /*
386  * eval_pfqueue computes the queue parameters.
387  */
388 int
389 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
390     struct node_queue_opt *opts)
391 {
392 	/* should be merged with expand_queue */
393 	struct pfctl_altq	*if_ppa, *parent;
394 	int		 	 error = 0;
395 
396 	/* find the corresponding interface and copy fields used by queues */
397 	if ((if_ppa = pfaltq_lookup(pa->ifname)) == NULL) {
398 		fprintf(stderr, "altq not defined on %s\n", pa->ifname);
399 		return (1);
400 	}
401 	pa->scheduler = if_ppa->pa.scheduler;
402 	pa->ifbandwidth = if_ppa->pa.ifbandwidth;
403 
404 	if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
405 		fprintf(stderr, "queue %s already exists on interface %s\n",
406 		    pa->qname, pa->ifname);
407 		return (1);
408 	}
409 	pa->qid = qname_to_qid(pa->qname);
410 
411 	parent = NULL;
412 	if (pa->parent[0] != 0) {
413 		parent = qname_to_pfaltq(pa->parent, pa->ifname);
414 		if (parent == NULL) {
415 			fprintf(stderr, "parent %s not found for %s\n",
416 			    pa->parent, pa->qname);
417 			return (1);
418 		}
419 		pa->parent_qid = parent->pa.qid;
420 	}
421 	if (pa->qlimit == 0)
422 		pa->qlimit = DEFAULT_QLIMIT;
423 
424 	if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC ||
425 		pa->scheduler == ALTQT_FAIRQ) {
426 		pa->bandwidth = eval_bwspec(bw,
427 		    parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth);
428 
429 		if (pa->bandwidth > pa->ifbandwidth) {
430 			fprintf(stderr, "bandwidth for %s higher than "
431 			    "interface\n", pa->qname);
432 			return (1);
433 		}
434 		/*
435 		 * If not HFSC, then check that the sum of the child
436 		 * bandwidths is less than the parent's bandwidth.  For
437 		 * HFSC, the equivalent concept is to check that the sum of
438 		 * the child linkshare service curves are under the parent's
439 		 * linkshare service curve, and that check is performed by
440 		 * eval_pfqueue_hfsc().
441 		 */
442 		if ((parent != NULL) && (pa->scheduler != ALTQT_HFSC)) {
443 			if (pa->bandwidth > parent->pa.bandwidth) {
444 				warnx("bandwidth for %s higher than parent",
445 				    pa->qname);
446 				return (1);
447 			}
448 			parent->meta.bwsum += pa->bandwidth;
449 			if (parent->meta.bwsum > parent->pa.bandwidth) {
450 				warnx("the sum of the child bandwidth (%" PRIu64
451 				    ") higher than parent \"%s\" (%" PRIu64 ")",
452 				    parent->meta.bwsum, parent->pa.qname,
453 				    parent->pa.bandwidth);
454 			}
455 		}
456 	}
457 
458 	if (eval_queue_opts(pa, opts,
459 		parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth))
460 		return (1);
461 
462 	if (parent != NULL)
463 		parent->meta.children++;
464 
465 	switch (pa->scheduler) {
466 	case ALTQT_CBQ:
467 		error = eval_pfqueue_cbq(pf, pa, if_ppa);
468 		break;
469 	case ALTQT_PRIQ:
470 		error = eval_pfqueue_priq(pf, pa, if_ppa);
471 		break;
472 	case ALTQT_HFSC:
473 		error = eval_pfqueue_hfsc(pf, pa, if_ppa, parent);
474 		break;
475 	case ALTQT_FAIRQ:
476 		error = eval_pfqueue_fairq(pf, pa, if_ppa, parent);
477 		break;
478 	default:
479 		break;
480 	}
481 	return (error);
482 }
483 
484 /*
485  * CBQ support functions
486  */
487 #define	RM_FILTER_GAIN	5	/* log2 of gain, e.g., 5 => 31/32 */
488 #define	RM_NS_PER_SEC	(1000000000)
489 
490 static int
491 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
492 {
493 	struct cbq_opts	*opts;
494 	u_int		 ifmtu;
495 
496 	if (pa->priority >= CBQ_MAXPRI) {
497 		warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
498 		return (-1);
499 	}
500 
501 	ifmtu = getifmtu(pa->ifname);
502 	opts = &pa->pq_u.cbq_opts;
503 
504 	if (opts->pktsize == 0) {	/* use default */
505 		opts->pktsize = ifmtu;
506 		if (opts->pktsize > MCLBYTES)	/* do what TCP does */
507 			opts->pktsize &= ~MCLBYTES;
508 	} else if (opts->pktsize > ifmtu)
509 		opts->pktsize = ifmtu;
510 	if (opts->maxpktsize == 0)	/* use default */
511 		opts->maxpktsize = ifmtu;
512 	else if (opts->maxpktsize > ifmtu)
513 		opts->pktsize = ifmtu;
514 
515 	if (opts->pktsize > opts->maxpktsize)
516 		opts->pktsize = opts->maxpktsize;
517 
518 	if (pa->parent[0] == 0)
519 		opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
520 
521 	if (pa->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
522 		if_ppa->meta.root_classes++;
523 	if (pa->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
524 		if_ppa->meta.default_classes++;
525 
526 	cbq_compute_idletime(pf, pa);
527 	return (0);
528 }
529 
530 /*
531  * compute ns_per_byte, maxidle, minidle, and offtime
532  */
533 static int
534 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
535 {
536 	struct cbq_opts	*opts;
537 	double		 maxidle_s, maxidle, minidle;
538 	double		 offtime, nsPerByte, ifnsPerByte, ptime, cptime;
539 	double		 z, g, f, gton, gtom;
540 	u_int		 minburst, maxburst;
541 
542 	opts = &pa->pq_u.cbq_opts;
543 	ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
544 	minburst = opts->minburst;
545 	maxburst = opts->maxburst;
546 
547 	if (pa->bandwidth == 0)
548 		f = 0.0001;	/* small enough? */
549 	else
550 		f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
551 
552 	nsPerByte = ifnsPerByte / f;
553 	ptime = (double)opts->pktsize * ifnsPerByte;
554 	cptime = ptime * (1.0 - f) / f;
555 
556 	if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
557 		/*
558 		 * this causes integer overflow in kernel!
559 		 * (bandwidth < 6Kbps when max_pkt_size=1500)
560 		 */
561 		if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0) {
562 			warnx("queue bandwidth must be larger than %s",
563 			    rate2str(ifnsPerByte * (double)opts->maxpktsize /
564 			    (double)INT_MAX * (double)pa->ifbandwidth));
565 			fprintf(stderr, "cbq: queue %s is too slow!\n",
566 			    pa->qname);
567 		}
568 		nsPerByte = (double)(INT_MAX / opts->maxpktsize);
569 	}
570 
571 	if (maxburst == 0) {  /* use default */
572 		if (cptime > 10.0 * 1000000)
573 			maxburst = 4;
574 		else
575 			maxburst = 16;
576 	}
577 	if (minburst == 0)  /* use default */
578 		minburst = 2;
579 	if (minburst > maxburst)
580 		minburst = maxburst;
581 
582 	z = (double)(1 << RM_FILTER_GAIN);
583 	g = (1.0 - 1.0 / z);
584 	gton = pow(g, (double)maxburst);
585 	gtom = pow(g, (double)(minburst-1));
586 	maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
587 	maxidle_s = (1.0 - g);
588 	if (maxidle > maxidle_s)
589 		maxidle = ptime * maxidle;
590 	else
591 		maxidle = ptime * maxidle_s;
592 	offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
593 	minidle = -((double)opts->maxpktsize * (double)nsPerByte);
594 
595 	/* scale parameters */
596 	maxidle = ((maxidle * 8.0) / nsPerByte) *
597 	    pow(2.0, (double)RM_FILTER_GAIN);
598 	offtime = (offtime * 8.0) / nsPerByte *
599 	    pow(2.0, (double)RM_FILTER_GAIN);
600 	minidle = ((minidle * 8.0) / nsPerByte) *
601 	    pow(2.0, (double)RM_FILTER_GAIN);
602 
603 	maxidle = maxidle / 1000.0;
604 	offtime = offtime / 1000.0;
605 	minidle = minidle / 1000.0;
606 
607 	opts->minburst = minburst;
608 	opts->maxburst = maxburst;
609 	opts->ns_per_byte = (u_int)nsPerByte;
610 	opts->maxidle = (u_int)fabs(maxidle);
611 	opts->minidle = (int)minidle;
612 	opts->offtime = (u_int)fabs(offtime);
613 
614 	return (0);
615 }
616 
617 static int
618 check_commit_cbq(int dev, int opts, struct pfctl_altq *if_ppa)
619 {
620 	int	error = 0;
621 
622 	/*
623 	 * check if cbq has one root queue and one default queue
624 	 * for this interface
625 	 */
626 	if (if_ppa->meta.root_classes != 1) {
627 		warnx("should have one root queue on %s", if_ppa->pa.ifname);
628 		error++;
629 	}
630 	if (if_ppa->meta.default_classes != 1) {
631 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
632 		error++;
633 	}
634 	return (error);
635 }
636 
637 static int
638 print_cbq_opts(const struct pf_altq *a)
639 {
640 	const struct cbq_opts	*opts;
641 
642 	opts = &a->pq_u.cbq_opts;
643 	if (opts->flags) {
644 		printf("cbq(");
645 		if (opts->flags & CBQCLF_RED)
646 			printf(" red");
647 		if (opts->flags & CBQCLF_ECN)
648 			printf(" ecn");
649 		if (opts->flags & CBQCLF_RIO)
650 			printf(" rio");
651 		if (opts->flags & CBQCLF_CODEL)
652 			printf(" codel");
653 		if (opts->flags & CBQCLF_CLEARDSCP)
654 			printf(" cleardscp");
655 		if (opts->flags & CBQCLF_FLOWVALVE)
656 			printf(" flowvalve");
657 		if (opts->flags & CBQCLF_BORROW)
658 			printf(" borrow");
659 		if (opts->flags & CBQCLF_WRR)
660 			printf(" wrr");
661 		if (opts->flags & CBQCLF_EFFICIENT)
662 			printf(" efficient");
663 		if (opts->flags & CBQCLF_ROOTCLASS)
664 			printf(" root");
665 		if (opts->flags & CBQCLF_DEFCLASS)
666 			printf(" default");
667 		printf(" ) ");
668 
669 		return (1);
670 	} else
671 		return (0);
672 }
673 
674 /*
675  * PRIQ support functions
676  */
677 static int
678 eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
679 {
680 
681 	if (pa->priority >= PRIQ_MAXPRI) {
682 		warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
683 		return (-1);
684 	}
685 	if (BIT_ISSET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris)) {
686 		warnx("%s does not have a unique priority on interface %s",
687 		    pa->qname, pa->ifname);
688 		return (-1);
689 	} else
690 		BIT_SET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris);
691 
692 	if (pa->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
693 		if_ppa->meta.default_classes++;
694 	return (0);
695 }
696 
697 static int
698 check_commit_priq(int dev, int opts, struct pfctl_altq *if_ppa)
699 {
700 
701 	/*
702 	 * check if priq has one default class for this interface
703 	 */
704 	if (if_ppa->meta.default_classes != 1) {
705 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
706 		return (1);
707 	}
708 	return (0);
709 }
710 
711 static int
712 print_priq_opts(const struct pf_altq *a)
713 {
714 	const struct priq_opts	*opts;
715 
716 	opts = &a->pq_u.priq_opts;
717 
718 	if (opts->flags) {
719 		printf("priq(");
720 		if (opts->flags & PRCF_RED)
721 			printf(" red");
722 		if (opts->flags & PRCF_ECN)
723 			printf(" ecn");
724 		if (opts->flags & PRCF_RIO)
725 			printf(" rio");
726 		if (opts->flags & PRCF_CODEL)
727 			printf(" codel");
728 		if (opts->flags & PRCF_CLEARDSCP)
729 			printf(" cleardscp");
730 		if (opts->flags & PRCF_DEFAULTCLASS)
731 			printf(" default");
732 		printf(" ) ");
733 
734 		return (1);
735 	} else
736 		return (0);
737 }
738 
739 /*
740  * HFSC support functions
741  */
742 static int
743 eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa,
744     struct pfctl_altq *parent)
745 {
746 	struct hfsc_opts_v1	*opts;
747 	struct service_curve	 sc;
748 
749 	opts = &pa->pq_u.hfsc_opts;
750 
751 	if (parent == NULL) {
752 		/* root queue */
753 		opts->lssc_m1 = pa->ifbandwidth;
754 		opts->lssc_m2 = pa->ifbandwidth;
755 		opts->lssc_d = 0;
756 		return (0);
757 	}
758 
759 	/* First child initializes the parent's service curve accumulators. */
760 	if (parent->meta.children == 1) {
761 		LIST_INIT(&parent->meta.rtsc);
762 		LIST_INIT(&parent->meta.lssc);
763 	}
764 
765 	if (parent->pa.pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
766 		warnx("adding %s would make default queue %s not a leaf",
767 		    pa->qname, pa->parent);
768 		return (-1);
769 	}
770 
771 	if (pa->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS)
772 		if_ppa->meta.default_classes++;
773 
774 	/* if link_share is not specified, use bandwidth */
775 	if (opts->lssc_m2 == 0)
776 		opts->lssc_m2 = pa->bandwidth;
777 
778 	if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
779 	    (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
780 	    (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
781 		warnx("m2 is zero for %s", pa->qname);
782 		return (-1);
783 	}
784 
785 	if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
786 	    (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) ||
787 	    (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) {
788 		warnx("m1 must be zero for convex curve: %s", pa->qname);
789 		return (-1);
790 	}
791 
792 	/*
793 	 * admission control:
794 	 * for the real-time service curve, the sum of the service curves
795 	 * should not exceed 80% of the interface bandwidth.  20% is reserved
796 	 * not to over-commit the actual interface bandwidth.
797 	 * for the linkshare service curve, the sum of the child service
798 	 * curve should not exceed the parent service curve.
799 	 * for the upper-limit service curve, the assigned bandwidth should
800 	 * be smaller than the interface bandwidth, and the upper-limit should
801 	 * be larger than the real-time service curve when both are defined.
802 	 */
803 
804 	/* check the real-time service curve.  reserve 20% of interface bw */
805 	if (opts->rtsc_m2 != 0) {
806 		/* add this queue to the sum */
807 		sc.m1 = opts->rtsc_m1;
808 		sc.d = opts->rtsc_d;
809 		sc.m2 = opts->rtsc_m2;
810 		gsc_add_sc(&parent->meta.rtsc, &sc);
811 		/* compare the sum with 80% of the interface */
812 		sc.m1 = 0;
813 		sc.d = 0;
814 		sc.m2 = pa->ifbandwidth / 100 * 80;
815 		if (!is_gsc_under_sc(&parent->meta.rtsc, &sc)) {
816 			warnx("real-time sc exceeds 80%% of the interface "
817 			    "bandwidth (%s)", rate2str((double)sc.m2));
818 			return (-1);
819 		}
820 	}
821 
822 	/* check the linkshare service curve. */
823 	if (opts->lssc_m2 != 0) {
824 		/* add this queue to the child sum */
825 		sc.m1 = opts->lssc_m1;
826 		sc.d = opts->lssc_d;
827 		sc.m2 = opts->lssc_m2;
828 		gsc_add_sc(&parent->meta.lssc, &sc);
829 		/* compare the sum of the children with parent's sc */
830 		sc.m1 = parent->pa.pq_u.hfsc_opts.lssc_m1;
831 		sc.d = parent->pa.pq_u.hfsc_opts.lssc_d;
832 		sc.m2 = parent->pa.pq_u.hfsc_opts.lssc_m2;
833 		if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
834 			warnx("linkshare sc exceeds parent's sc");
835 			return (-1);
836 		}
837 	}
838 
839 	/* check the upper-limit service curve. */
840 	if (opts->ulsc_m2 != 0) {
841 		if (opts->ulsc_m1 > pa->ifbandwidth ||
842 		    opts->ulsc_m2 > pa->ifbandwidth) {
843 			warnx("upper-limit larger than interface bandwidth");
844 			return (-1);
845 		}
846 		if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
847 			warnx("upper-limit sc smaller than real-time sc");
848 			return (-1);
849 		}
850 	}
851 
852 	return (0);
853 }
854 
855 /*
856  * FAIRQ support functions
857  */
858 static int
859 eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa,
860     struct pfctl_altq *if_ppa, struct pfctl_altq *parent)
861 {
862 	struct fairq_opts	*opts;
863 	struct service_curve	 sc;
864 
865 	opts = &pa->pq_u.fairq_opts;
866 
867 	if (parent == NULL) {
868 		/* root queue */
869 		opts->lssc_m1 = pa->ifbandwidth;
870 		opts->lssc_m2 = pa->ifbandwidth;
871 		opts->lssc_d = 0;
872 		return (0);
873 	}
874 
875 	/* First child initializes the parent's service curve accumulator. */
876 	if (parent->meta.children == 1)
877 		LIST_INIT(&parent->meta.lssc);
878 
879 	if (parent->pa.pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) {
880 		warnx("adding %s would make default queue %s not a leaf",
881 		    pa->qname, pa->parent);
882 		return (-1);
883 	}
884 
885 	if (pa->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS)
886 		if_ppa->meta.default_classes++;
887 
888 	/* if link_share is not specified, use bandwidth */
889 	if (opts->lssc_m2 == 0)
890 		opts->lssc_m2 = pa->bandwidth;
891 
892 	/*
893 	 * admission control:
894 	 * for the real-time service curve, the sum of the service curves
895 	 * should not exceed 80% of the interface bandwidth.  20% is reserved
896 	 * not to over-commit the actual interface bandwidth.
897 	 * for the link-sharing service curve, the sum of the child service
898 	 * curve should not exceed the parent service curve.
899 	 * for the upper-limit service curve, the assigned bandwidth should
900 	 * be smaller than the interface bandwidth, and the upper-limit should
901 	 * be larger than the real-time service curve when both are defined.
902 	 */
903 
904 	/* check the linkshare service curve. */
905 	if (opts->lssc_m2 != 0) {
906 		/* add this queue to the child sum */
907 		sc.m1 = opts->lssc_m1;
908 		sc.d = opts->lssc_d;
909 		sc.m2 = opts->lssc_m2;
910 		gsc_add_sc(&parent->meta.lssc, &sc);
911 		/* compare the sum of the children with parent's sc */
912 		sc.m1 = parent->pa.pq_u.fairq_opts.lssc_m1;
913 		sc.d = parent->pa.pq_u.fairq_opts.lssc_d;
914 		sc.m2 = parent->pa.pq_u.fairq_opts.lssc_m2;
915 		if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
916 			warnx("link-sharing sc exceeds parent's sc");
917 			return (-1);
918 		}
919 	}
920 
921 	return (0);
922 }
923 
924 static int
925 check_commit_hfsc(int dev, int opts, struct pfctl_altq *if_ppa)
926 {
927 
928 	/* check if hfsc has one default queue for this interface */
929 	if (if_ppa->meta.default_classes != 1) {
930 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
931 		return (1);
932 	}
933 	return (0);
934 }
935 
936 static int
937 check_commit_fairq(int dev __unused, int opts __unused, struct pfctl_altq *if_ppa)
938 {
939 
940 	/* check if fairq has one default queue for this interface */
941 	if (if_ppa->meta.default_classes != 1) {
942 		warnx("should have one default queue on %s", if_ppa->pa.ifname);
943 		return (1);
944 	}
945 	return (0);
946 }
947 
948 static int
949 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
950 {
951 	const struct hfsc_opts_v1	*opts;
952 	const struct node_hfsc_sc	*rtsc, *lssc, *ulsc;
953 
954 	opts = &a->pq_u.hfsc_opts;
955 	if (qopts == NULL)
956 		rtsc = lssc = ulsc = NULL;
957 	else {
958 		rtsc = &qopts->data.hfsc_opts.realtime;
959 		lssc = &qopts->data.hfsc_opts.linkshare;
960 		ulsc = &qopts->data.hfsc_opts.upperlimit;
961 	}
962 
963 	if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
964 	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
965 	    opts->lssc_d != 0))) {
966 		printf("hfsc(");
967 		if (opts->flags & HFCF_RED)
968 			printf(" red");
969 		if (opts->flags & HFCF_ECN)
970 			printf(" ecn");
971 		if (opts->flags & HFCF_RIO)
972 			printf(" rio");
973 		if (opts->flags & HFCF_CODEL)
974 			printf(" codel");
975 		if (opts->flags & HFCF_CLEARDSCP)
976 			printf(" cleardscp");
977 		if (opts->flags & HFCF_DEFAULTCLASS)
978 			printf(" default");
979 		if (opts->rtsc_m2 != 0)
980 			print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
981 			    opts->rtsc_m2, rtsc);
982 		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
983 		    opts->lssc_d != 0))
984 			print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
985 			    opts->lssc_m2, lssc);
986 		if (opts->ulsc_m2 != 0)
987 			print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
988 			    opts->ulsc_m2, ulsc);
989 		printf(" ) ");
990 
991 		return (1);
992 	} else
993 		return (0);
994 }
995 
996 static int
997 print_codel_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
998 {
999 	const struct codel_opts *opts;
1000 
1001 	opts = &a->pq_u.codel_opts;
1002 	if (opts->target || opts->interval || opts->ecn) {
1003 		printf("codel(");
1004 		if (opts->target)
1005 			printf(" target %d", opts->target);
1006 		if (opts->interval)
1007 			printf(" interval %d", opts->interval);
1008 		if (opts->ecn)
1009 			printf("ecn");
1010 		printf(" ) ");
1011 
1012 		return (1);
1013 	}
1014 
1015 	return (0);
1016 }
1017 
1018 static int
1019 print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1020 {
1021 	const struct fairq_opts		*opts;
1022 	const struct node_fairq_sc	*loc_lssc;
1023 
1024 	opts = &a->pq_u.fairq_opts;
1025 	if (qopts == NULL)
1026 		loc_lssc = NULL;
1027 	else
1028 		loc_lssc = &qopts->data.fairq_opts.linkshare;
1029 
1030 	if (opts->flags ||
1031 	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1032 	    opts->lssc_d != 0))) {
1033 		printf("fairq(");
1034 		if (opts->flags & FARF_RED)
1035 			printf(" red");
1036 		if (opts->flags & FARF_ECN)
1037 			printf(" ecn");
1038 		if (opts->flags & FARF_RIO)
1039 			printf(" rio");
1040 		if (opts->flags & FARF_CODEL)
1041 			printf(" codel");
1042 		if (opts->flags & FARF_CLEARDSCP)
1043 			printf(" cleardscp");
1044 		if (opts->flags & FARF_DEFAULTCLASS)
1045 			printf(" default");
1046 		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1047 		    opts->lssc_d != 0))
1048 			print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d,
1049 			    opts->lssc_m2, loc_lssc);
1050 		printf(" ) ");
1051 
1052 		return (1);
1053 	} else
1054 		return (0);
1055 }
1056 
1057 /*
1058  * admission control using generalized service curve
1059  */
1060 
1061 /* add a new service curve to a generalized service curve */
1062 static void
1063 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
1064 {
1065 	if (is_sc_null(sc))
1066 		return;
1067 	if (sc->d != 0)
1068 		gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
1069 	gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
1070 }
1071 
1072 /*
1073  * check whether all points of a generalized service curve have
1074  * their y-coordinates no larger than a given two-piece linear
1075  * service curve.
1076  */
1077 static int
1078 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
1079 {
1080 	struct segment	*s, *last, *end;
1081 	double		 y;
1082 
1083 	if (is_sc_null(sc)) {
1084 		if (LIST_EMPTY(gsc))
1085 			return (1);
1086 		LIST_FOREACH(s, gsc, _next) {
1087 			if (s->m != 0)
1088 				return (0);
1089 		}
1090 		return (1);
1091 	}
1092 	/*
1093 	 * gsc has a dummy entry at the end with x = INFINITY.
1094 	 * loop through up to this dummy entry.
1095 	 */
1096 	end = gsc_getentry(gsc, INFINITY);
1097 	if (end == NULL)
1098 		return (1);
1099 	last = NULL;
1100 	for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
1101 		if (s->y > sc_x2y(sc, s->x))
1102 			return (0);
1103 		last = s;
1104 	}
1105 	/* last now holds the real last segment */
1106 	if (last == NULL)
1107 		return (1);
1108 	if (last->m > sc->m2)
1109 		return (0);
1110 	if (last->x < sc->d && last->m > sc->m1) {
1111 		y = last->y + (sc->d - last->x) * last->m;
1112 		if (y > sc_x2y(sc, sc->d))
1113 			return (0);
1114 	}
1115 	return (1);
1116 }
1117 
1118 /*
1119  * return a segment entry starting at x.
1120  * if gsc has no entry starting at x, a new entry is created at x.
1121  */
1122 static struct segment *
1123 gsc_getentry(struct gen_sc *gsc, double x)
1124 {
1125 	struct segment	*new, *prev, *s;
1126 
1127 	prev = NULL;
1128 	LIST_FOREACH(s, gsc, _next) {
1129 		if (s->x == x)
1130 			return (s);	/* matching entry found */
1131 		else if (s->x < x)
1132 			prev = s;
1133 		else
1134 			break;
1135 	}
1136 
1137 	/* we have to create a new entry */
1138 	if ((new = calloc(1, sizeof(struct segment))) == NULL)
1139 		return (NULL);
1140 
1141 	new->x = x;
1142 	if (x == INFINITY || s == NULL)
1143 		new->d = 0;
1144 	else if (s->x == INFINITY)
1145 		new->d = INFINITY;
1146 	else
1147 		new->d = s->x - x;
1148 	if (prev == NULL) {
1149 		/* insert the new entry at the head of the list */
1150 		new->y = 0;
1151 		new->m = 0;
1152 		LIST_INSERT_HEAD(gsc, new, _next);
1153 	} else {
1154 		/*
1155 		 * the start point intersects with the segment pointed by
1156 		 * prev.  divide prev into 2 segments
1157 		 */
1158 		if (x == INFINITY) {
1159 			prev->d = INFINITY;
1160 			if (prev->m == 0)
1161 				new->y = prev->y;
1162 			else
1163 				new->y = INFINITY;
1164 		} else {
1165 			prev->d = x - prev->x;
1166 			new->y = prev->d * prev->m + prev->y;
1167 		}
1168 		new->m = prev->m;
1169 		LIST_INSERT_AFTER(prev, new, _next);
1170 	}
1171 	return (new);
1172 }
1173 
1174 /* add a segment to a generalized service curve */
1175 static int
1176 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1177 {
1178 	struct segment	*start, *end, *s;
1179 	double		 x2;
1180 
1181 	if (d == INFINITY)
1182 		x2 = INFINITY;
1183 	else
1184 		x2 = x + d;
1185 	start = gsc_getentry(gsc, x);
1186 	end = gsc_getentry(gsc, x2);
1187 	if (start == NULL || end == NULL)
1188 		return (-1);
1189 
1190 	for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1191 		s->m += m;
1192 		s->y += y + (s->x - x) * m;
1193 	}
1194 
1195 	end = gsc_getentry(gsc, INFINITY);
1196 	for (; s != end; s = LIST_NEXT(s, _next)) {
1197 		s->y += m * d;
1198 	}
1199 
1200 	return (0);
1201 }
1202 
1203 /* get y-projection of a service curve */
1204 static double
1205 sc_x2y(struct service_curve *sc, double x)
1206 {
1207 	double	y;
1208 
1209 	if (x <= (double)sc->d)
1210 		/* y belongs to the 1st segment */
1211 		y = x * (double)sc->m1;
1212 	else
1213 		/* y belongs to the 2nd segment */
1214 		y = (double)sc->d * (double)sc->m1
1215 			+ (x - (double)sc->d) * (double)sc->m2;
1216 	return (y);
1217 }
1218 
1219 /*
1220  * misc utilities
1221  */
1222 #define	R2S_BUFS	8
1223 #define	RATESTR_MAX	16
1224 
1225 char *
1226 rate2str(double rate)
1227 {
1228 	char		*buf;
1229 	static char	 r2sbuf[R2S_BUFS][RATESTR_MAX];  /* ring bufer */
1230 	static int	 idx = 0;
1231 	int		 i;
1232 	static const char unit[] = " KMG";
1233 
1234 	buf = r2sbuf[idx++];
1235 	if (idx == R2S_BUFS)
1236 		idx = 0;
1237 
1238 	for (i = 0; rate >= 1000 && i <= 3; i++)
1239 		rate /= 1000;
1240 
1241 	if ((int)(rate * 100) % 100)
1242 		snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1243 	else
1244 		snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1245 
1246 	return (buf);
1247 }
1248 
1249 u_int32_t
1250 getifspeed(char *ifname)
1251 {
1252 	int		s;
1253 	struct ifreq	ifr;
1254 	struct if_data	ifrdat;
1255 
1256 	s = get_query_socket();
1257 	bzero(&ifr, sizeof(ifr));
1258 	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1259 	    sizeof(ifr.ifr_name))
1260 		errx(1, "getifspeed: strlcpy");
1261 	ifr.ifr_data = (caddr_t)&ifrdat;
1262 	if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1)
1263 		err(1, "SIOCGIFDATA");
1264 	return ((u_int32_t)ifrdat.ifi_baudrate);
1265 }
1266 
1267 u_long
1268 getifmtu(char *ifname)
1269 {
1270 	int		s;
1271 	struct ifreq	ifr;
1272 
1273 	s = get_query_socket();
1274 	bzero(&ifr, sizeof(ifr));
1275 	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1276 	    sizeof(ifr.ifr_name))
1277 		errx(1, "getifmtu: strlcpy");
1278 	if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1279 #ifdef __FreeBSD__
1280 		ifr.ifr_mtu = 1500;
1281 #else
1282 		err(1, "SIOCGIFMTU");
1283 #endif
1284 	if (ifr.ifr_mtu > 0)
1285 		return (ifr.ifr_mtu);
1286 	else {
1287 		warnx("could not get mtu for %s, assuming 1500", ifname);
1288 		return (1500);
1289 	}
1290 }
1291 
1292 int
1293 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1294     u_int64_t ref_bw)
1295 {
1296 	int	errors = 0;
1297 
1298 	switch (pa->scheduler) {
1299 	case ALTQT_CBQ:
1300 		pa->pq_u.cbq_opts = opts->data.cbq_opts;
1301 		break;
1302 	case ALTQT_PRIQ:
1303 		pa->pq_u.priq_opts = opts->data.priq_opts;
1304 		break;
1305 	case ALTQT_HFSC:
1306 		pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1307 		if (opts->data.hfsc_opts.linkshare.used) {
1308 			pa->pq_u.hfsc_opts.lssc_m1 =
1309 			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1310 			    ref_bw);
1311 			pa->pq_u.hfsc_opts.lssc_m2 =
1312 			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1313 			    ref_bw);
1314 			pa->pq_u.hfsc_opts.lssc_d =
1315 			    opts->data.hfsc_opts.linkshare.d;
1316 		}
1317 		if (opts->data.hfsc_opts.realtime.used) {
1318 			pa->pq_u.hfsc_opts.rtsc_m1 =
1319 			    eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1320 			    ref_bw);
1321 			pa->pq_u.hfsc_opts.rtsc_m2 =
1322 			    eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1323 			    ref_bw);
1324 			pa->pq_u.hfsc_opts.rtsc_d =
1325 			    opts->data.hfsc_opts.realtime.d;
1326 		}
1327 		if (opts->data.hfsc_opts.upperlimit.used) {
1328 			pa->pq_u.hfsc_opts.ulsc_m1 =
1329 			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1330 			    ref_bw);
1331 			pa->pq_u.hfsc_opts.ulsc_m2 =
1332 			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1333 			    ref_bw);
1334 			pa->pq_u.hfsc_opts.ulsc_d =
1335 			    opts->data.hfsc_opts.upperlimit.d;
1336 		}
1337 		break;
1338 	case ALTQT_FAIRQ:
1339 		pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags;
1340 		pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets;
1341 		pa->pq_u.fairq_opts.hogs_m1 =
1342 			eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw);
1343 
1344 		if (opts->data.fairq_opts.linkshare.used) {
1345 			pa->pq_u.fairq_opts.lssc_m1 =
1346 			    eval_bwspec(&opts->data.fairq_opts.linkshare.m1,
1347 			    ref_bw);
1348 			pa->pq_u.fairq_opts.lssc_m2 =
1349 			    eval_bwspec(&opts->data.fairq_opts.linkshare.m2,
1350 			    ref_bw);
1351 			pa->pq_u.fairq_opts.lssc_d =
1352 			    opts->data.fairq_opts.linkshare.d;
1353 		}
1354 		break;
1355 	case ALTQT_CODEL:
1356 		pa->pq_u.codel_opts.target = opts->data.codel_opts.target;
1357 		pa->pq_u.codel_opts.interval = opts->data.codel_opts.interval;
1358 		pa->pq_u.codel_opts.ecn = opts->data.codel_opts.ecn;
1359 		break;
1360 	default:
1361 		warnx("eval_queue_opts: unknown scheduler type %u",
1362 		    opts->qtype);
1363 		errors++;
1364 		break;
1365 	}
1366 
1367 	return (errors);
1368 }
1369 
1370 /*
1371  * If absolute bandwidth if set, return the lesser of that value and the
1372  * reference bandwidth.  Limiting to the reference bandwidth allows simple
1373  * limiting of configured bandwidth parameters for schedulers that are
1374  * 32-bit limited, as the root/interface bandwidth (top-level reference
1375  * bandwidth) will be properly limited in that case.
1376  *
1377  * Otherwise, if the absolute bandwidth is not set, return given percentage
1378  * of reference bandwidth.
1379  */
1380 u_int64_t
1381 eval_bwspec(struct node_queue_bw *bw, u_int64_t ref_bw)
1382 {
1383 	if (bw->bw_absolute > 0)
1384 		return (MIN(bw->bw_absolute, ref_bw));
1385 
1386 	if (bw->bw_percent > 0)
1387 		return (ref_bw / 100 * bw->bw_percent);
1388 
1389 	return (0);
1390 }
1391 
1392 void
1393 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1394     const struct node_hfsc_sc *sc)
1395 {
1396 	printf(" %s", scname);
1397 
1398 	if (d != 0) {
1399 		printf("(");
1400 		if (sc != NULL && sc->m1.bw_percent > 0)
1401 			printf("%u%%", sc->m1.bw_percent);
1402 		else
1403 			printf("%s", rate2str((double)m1));
1404 		printf(" %u", d);
1405 	}
1406 
1407 	if (sc != NULL && sc->m2.bw_percent > 0)
1408 		printf(" %u%%", sc->m2.bw_percent);
1409 	else
1410 		printf(" %s", rate2str((double)m2));
1411 
1412 	if (d != 0)
1413 		printf(")");
1414 }
1415 
1416 void
1417 print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2,
1418     const struct node_fairq_sc *sc)
1419 {
1420 	printf(" %s", scname);
1421 
1422 	if (d != 0) {
1423 		printf("(");
1424 		if (sc != NULL && sc->m1.bw_percent > 0)
1425 			printf("%u%%", sc->m1.bw_percent);
1426 		else
1427 			printf("%s", rate2str((double)m1));
1428 		printf(" %u", d);
1429 	}
1430 
1431 	if (sc != NULL && sc->m2.bw_percent > 0)
1432 		printf(" %u%%", sc->m2.bw_percent);
1433 	else
1434 		printf(" %s", rate2str((double)m2));
1435 
1436 	if (d != 0)
1437 		printf(")");
1438 }
1439