1 /*	$NetBSD: altq_jobs.c,v 1.9 2016/04/20 08:58:48 knakahara Exp $	*/
2 /*	$KAME: altq_jobs.c,v 1.11 2005/04/13 03:44:25 suz Exp $	*/
3 /*
4  * Copyright (c) 2001, the Rector and Board of Visitors of the
5  * University of Virginia.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms,
9  * with or without modification, are permitted provided
10  * that the following conditions are met:
11  *
12  * Redistributions of source code must retain the above
13  * copyright notice, this list of conditions and the following
14  * disclaimer.
15  *
16  * Redistributions in binary form must reproduce the above
17  * copyright notice, this list of conditions and the following
18  * disclaimer in the documentation and/or other materials provided
19  * with the distribution.
20  *
21  * Neither the name of the University of Virginia nor the names
22  * of its contributors may be used to endorse or promote products
23  * derived from this software without specific prior written
24  * permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
27  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
28  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
31  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
32  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
38  * THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 /*
41  * JoBS - altq prototype implementation
42  *
43  * Author: Nicolas Christin <nicolas@cs.virginia.edu>
44  *
45  * JoBS algorithms originally devised and proposed by
46  * Nicolas Christin and Jorg Liebeherr.
47  * Grateful acknowledgments to Tarek Abdelzaher for his help and
48  * comments, and to Kenjiro Cho for some helpful advice.
49  * Contributed by the Multimedia Networks Group at the University
50  * of Virginia.
51  *
52  * Papers and additional info can be found at
53  * http://qosbox.cs.virginia.edu
54  *
55  */
56 
57 /*
58  * JoBS queue
59  */
60 
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: altq_jobs.c,v 1.9 2016/04/20 08:58:48 knakahara Exp $");
63 
64 #ifdef _KERNEL_OPT
65 #include "opt_altq.h"
66 #include "opt_inet.h"
67 #endif
68 
69 #ifdef ALTQ_JOBS  /* jobs is enabled by ALTQ_JOBS option in opt_altq.h */
70 
71 #include <sys/param.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/socket.h>
75 #include <sys/sockio.h>
76 #include <sys/systm.h>
77 #include <sys/proc.h>
78 #include <sys/errno.h>
79 #include <sys/kernel.h>
80 #include <sys/queue.h>
81 #include <sys/kauth.h>
82 
83 #ifdef __FreeBSD__
84 #include <sys/limits.h>
85 #endif
86 
87 #include <net/if.h>
88 #include <net/if_types.h>
89 
90 #include <altq/altq.h>
91 #include <altq/altq_conf.h>
92 #include <altq/altq_jobs.h>
93 
94 #ifdef ALTQ3_COMPAT
95 /*
96  * function prototypes
97  */
98 static struct jobs_if *jobs_attach(struct ifaltq *, u_int, u_int, u_int);
99 static void jobs_detach(struct jobs_if *);
100 static int jobs_clear_interface(struct jobs_if *);
101 static int jobs_request(struct ifaltq *, int, void *);
102 static void jobs_purge(struct jobs_if *);
103 static struct jobs_class *jobs_class_create(struct jobs_if *,
104     int, int64_t, int64_t, int64_t, int64_t, int64_t, int);
105 static int jobs_class_destroy(struct jobs_class *);
106 static int jobs_enqueue(struct ifaltq *, struct mbuf *);
107 static struct mbuf *jobs_dequeue(struct ifaltq *, int);
108 
109 static int jobs_addq(struct jobs_class *, struct mbuf *, struct jobs_if*);
110 static struct mbuf *jobs_getq(struct jobs_class *);
111 static struct mbuf *jobs_pollq(struct jobs_class *);
112 static void jobs_purgeq(struct jobs_class *);
113 
114 static int jobscmd_if_attach(struct jobs_attach *);
115 static int jobscmd_if_detach(struct jobs_interface *);
116 static int jobscmd_add_class(struct jobs_add_class *);
117 static int jobscmd_delete_class(struct jobs_delete_class *);
118 static int jobscmd_modify_class(struct jobs_modify_class *);
119 static int jobscmd_add_filter(struct jobs_add_filter *);
120 static int jobscmd_delete_filter(struct jobs_delete_filter *);
121 static int jobscmd_class_stats(struct jobs_class_stats *);
122 static void get_class_stats(struct class_stats *, struct jobs_class *);
123 static struct jobs_class *clh_to_clp(struct jobs_if *, u_long);
124 static u_long clp_to_clh(struct jobs_class *);
125 
126 static TSLIST *tslist_alloc(void);
127 static void tslist_destroy(struct jobs_class *);
128 static int tslist_enqueue(struct jobs_class *, u_int64_t);
129 static void tslist_dequeue(struct jobs_class *);
130 static void tslist_drop(struct jobs_class *);
131 
132 static int enforce_wc(struct jobs_if *);
133 static int64_t* adjust_rates_rdc(struct jobs_if *);
134 static int64_t* assign_rate_drops_adc(struct jobs_if *);
135 static int64_t* update_error(struct jobs_if *);
136 static int min_rates_adc(struct jobs_if *);
137 static int64_t proj_delay(struct jobs_if *, int);
138 static int pick_dropped_rlc(struct jobs_if *);
139 
140 altqdev_decl(jobs);
141 
142 /* jif_list keeps all jobs_if's allocated. */
143 static struct jobs_if *jif_list = NULL;
144 
145 typedef unsigned long long ull;
146 
147 /* setup functions */
148 
149 static struct jobs_if *
jobs_attach(struct ifaltq * ifq,u_int bandwidth,u_int qlimit,u_int separate)150 jobs_attach(struct ifaltq *ifq, u_int bandwidth, u_int qlimit, u_int separate)
151 {
152 	struct jobs_if *jif;
153 
154 	jif = malloc(sizeof(struct jobs_if), M_DEVBUF, M_WAITOK|M_ZERO);
155 	if (jif == NULL)
156 	        return (NULL);
157 
158 	jif->jif_bandwidth = bandwidth;
159 	jif->jif_qlimit = qlimit;
160 	jif->jif_separate = separate;
161 #ifdef ALTQ_DEBUG
162 	printf("JoBS bandwidth = %d bps\n", (int)bandwidth);
163 	printf("JoBS buffer size = %d pkts [%s]\n",
164 	       (int)qlimit, separate?"separate buffers":"shared buffer");
165 #endif
166 	jif->jif_maxpri = -1;
167 	jif->jif_ifq = ifq;
168 
169 	jif->wc_cycles_enqueue = 0;
170 	jif->avg_cycles_enqueue = 0;
171 	jif->avg_cycles2_enqueue = 0;
172 	jif->bc_cycles_enqueue = ALTQ_INFINITY;
173 	jif->wc_cycles_dequeue = 0;
174 	jif->avg_cycles_dequeue = 0;
175 	jif->avg_cycles2_dequeue = 0;
176 	jif->bc_cycles_dequeue = ALTQ_INFINITY;
177 	jif->total_enqueued = 0;
178 	jif->total_dequeued = 0;
179 
180 	/* add this state to the jobs list */
181 	jif->jif_next = jif_list;
182 	jif_list = jif;
183 
184 	return (jif);
185 }
186 
187 static void
jobs_detach(struct jobs_if * jif)188 jobs_detach(struct jobs_if *jif)
189 {
190 	(void)jobs_clear_interface(jif);
191 
192 	/* remove this interface from the jif list */
193 	if (jif_list == jif)
194 		jif_list = jif->jif_next;
195 	else {
196 		struct jobs_if *p;
197 
198 		for (p = jif_list; p != NULL; p = p->jif_next)
199 			if (p->jif_next == jif) {
200 				p->jif_next = jif->jif_next;
201 				break;
202 			}
203 		ASSERT(p != NULL);
204 	}
205 	free(jif, M_DEVBUF);
206 }
207 
208 /*
209  * bring the interface back to the initial state by discarding
210  * all the filters and classes.
211  */
212 static int
jobs_clear_interface(struct jobs_if * jif)213 jobs_clear_interface(struct jobs_if *jif)
214 {
215 	struct jobs_class	*cl;
216 	int	pri;
217 
218 	/* free the filters for this interface */
219 	acc_discard_filters(&jif->jif_classifier, NULL, 1);
220 
221 	/* clear out the classes */
222 	for (pri = 0; pri <= jif->jif_maxpri; pri++)
223 		if ((cl = jif->jif_classes[pri]) != NULL)
224 			jobs_class_destroy(cl);
225 
226 	return (0);
227 }
228 
229 static int
jobs_request(struct ifaltq * ifq,int req,void * arg)230 jobs_request(struct ifaltq *ifq, int req, void *arg)
231 {
232 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
233 
234 	switch (req) {
235 	case ALTRQ_PURGE:
236 		jobs_purge(jif);
237 		break;
238 	}
239 	return (0);
240 }
241 
242 /* discard all the queued packets on the interface */
243 static void
jobs_purge(struct jobs_if * jif)244 jobs_purge(struct jobs_if *jif)
245 {
246 	struct jobs_class *cl;
247 	int pri;
248 
249 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
250 		if ((cl = jif->jif_classes[pri]) != NULL && !qempty(cl->cl_q))
251 			jobs_purgeq(cl);
252 	}
253 	if (ALTQ_IS_ENABLED(jif->jif_ifq))
254 		jif->jif_ifq->ifq_len = 0;
255 }
256 
257 static struct jobs_class *
jobs_class_create(struct jobs_if * jif,int pri,int64_t adc,int64_t rdc,int64_t alc,int64_t rlc,int64_t arc,int flags)258 jobs_class_create(struct jobs_if *jif, int pri, int64_t adc, int64_t rdc,
259     int64_t alc, int64_t rlc, int64_t arc, int flags)
260 {
261 	struct jobs_class *cl, *scan1, *scan2;
262 	int s;
263 	int class_exists1, class_exists2;
264 	int i, j;
265 	int64_t tmp[JOBS_MAXPRI];
266 	u_int64_t now;
267 
268 	if ((cl = jif->jif_classes[pri]) != NULL) {
269 		/* modify the class instead of creating a new one */
270 		s = splnet();
271 		if (!qempty(cl->cl_q))
272 			jobs_purgeq(cl);
273 		splx(s);
274 	} else {
275 		cl = malloc(sizeof(struct jobs_class), M_DEVBUF,
276 		    M_WAITOK|M_ZERO);
277 		if (cl == NULL)
278 			return (NULL);
279 
280 		cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF,
281 		    M_WAITOK|M_ZERO);
282 		if (cl->cl_q == NULL)
283 			goto err_ret;
284 
285 		cl->arv_tm = tslist_alloc();
286 		if (cl->arv_tm == NULL)
287 			goto err_ret;
288 	}
289 
290 	jif->jif_classes[pri] = cl;
291 
292 	if (flags & JOCF_DEFAULTCLASS)
293 		jif->jif_default = cl;
294 
295 	qtype(cl->cl_q) = Q_DROPTAIL;
296 	qlen(cl->cl_q) = 0;
297 	cl->service_rate = 0;
298 	cl->min_rate_adc = 0;
299 	cl->current_loss = 0;
300 	cl->cl_period = 0;
301 	PKTCNTR_RESET(&cl->cl_arrival);
302 	PKTCNTR_RESET(&cl->cl_rin);
303 	PKTCNTR_RESET(&cl->cl_rout);
304 	PKTCNTR_RESET(&cl->cl_rout_th);
305 	PKTCNTR_RESET(&cl->cl_dropcnt);
306 	PKTCNTR_RESET(&cl->st_arrival);
307 	PKTCNTR_RESET(&cl->st_rin);
308 	PKTCNTR_RESET(&cl->st_rout);
309 	PKTCNTR_RESET(&cl->st_dropcnt);
310 	cl->st_service_rate = 0;
311 	cl->cl_lastdel = 0;
312 	cl->cl_avgdel = 0;
313 	cl->adc_violations = 0;
314 
315 	if (adc == -1) {
316 		cl->concerned_adc = 0;
317 		adc = ALTQ_INFINITY;
318 	} else
319 		cl->concerned_adc = 1;
320 
321 	if (alc == -1) {
322 		cl->concerned_alc = 0;
323 		alc = ALTQ_INFINITY;
324 	} else
325 		cl->concerned_alc = 1;
326 
327 	if (rdc == -1) {
328 		rdc = 0;
329 		cl->concerned_rdc = 0;
330 	} else
331 		cl->concerned_rdc = 1;
332 
333 	if (rlc == -1) {
334 		rlc = 0;
335 		cl->concerned_rlc = 0;
336 	} else
337 		cl->concerned_rlc = 1;
338 
339 	if (arc == -1) {
340 		arc = 0;
341 		cl->concerned_arc = 0;
342 	} else
343 		cl->concerned_arc = 1;
344 
345 	cl->cl_rdc=rdc;
346 
347 	if (cl->concerned_adc) {
348 		/* adc is given in us, convert it to clock ticks */
349 		cl->cl_adc = (u_int64_t)(adc*machclk_freq/GRANULARITY);
350 	} else
351 		cl->cl_adc = adc;
352 
353 	if (cl->concerned_arc) {
354 		/* arc is given in bps, convert it to internal unit */
355 		cl->cl_arc = (u_int64_t)(bps_to_internal(arc));
356 	} else
357 		cl->cl_arc = arc;
358 
359 	cl->cl_rlc=rlc;
360 	cl->cl_alc=alc;
361 	cl->delay_prod_others = 0;
362 	cl->loss_prod_others = 0;
363 	cl->cl_flags = flags;
364 	cl->cl_pri = pri;
365 	if (pri > jif->jif_maxpri)
366 		jif->jif_maxpri = pri;
367 	cl->cl_jif = jif;
368 	cl->cl_handle = (u_long)cl;  /* just a pointer to this class */
369 
370 	/*
371 	 * update delay_prod_others and loss_prod_others
372 	 * in all classes if needed
373 	 */
374 
375 	if (cl->concerned_rdc) {
376 		for (i = 0; i <= jif->jif_maxpri; i++) {
377 			scan1 = jif->jif_classes[i];
378 			class_exists1 = (scan1 != NULL);
379 			if (class_exists1) {
380 				tmp[i] = 1;
381 				for (j = 0; j <= i-1; j++) {
382 					scan2 = jif->jif_classes[j];
383 					class_exists2 = (scan2 != NULL);
384 					if (class_exists2
385 					    && scan2->concerned_rdc)
386 						tmp[i] *= scan2->cl_rdc;
387 				}
388 			} else
389 				tmp[i] = 0;
390 		}
391 
392 		for (i = 0; i <= jif->jif_maxpri; i++) {
393 			scan1 = jif->jif_classes[i];
394 			class_exists1 = (scan1 != NULL);
395 			if (class_exists1) {
396 				scan1->delay_prod_others = 1;
397 				for (j = 0; j <= jif->jif_maxpri; j++) {
398 					scan2 = jif->jif_classes[j];
399 					class_exists2 = (scan2 != NULL);
400 					if (class_exists2 && j != i
401 					    && scan2->concerned_rdc)
402 						scan1->delay_prod_others *= tmp[j];
403 				}
404 			}
405 		}
406 	}
407 
408 	if (cl->concerned_rlc) {
409 		for (i = 0; i <= jif->jif_maxpri; i++) {
410 			scan1 = jif->jif_classes[i];
411 			class_exists1 = (scan1 != NULL);
412 			if (class_exists1) {
413 				tmp[i] = 1;
414 				for (j = 0; j <= i-1; j++) {
415 					scan2 = jif->jif_classes[j];
416 					class_exists2 = (scan2 != NULL);
417 					if (class_exists2
418 					    && scan2->concerned_rlc)
419 						tmp[i] *= scan2->cl_rlc;
420 				}
421 			} else
422 				tmp[i] = 0;
423 		}
424 
425 		for (i = 0; i <= jif->jif_maxpri; i++) {
426 			scan1 = jif->jif_classes[i];
427 			class_exists1 = (scan1 != NULL);
428 			if (class_exists1) {
429 				scan1->loss_prod_others = 1;
430 				for (j = 0; j <= jif->jif_maxpri; j++) {
431 					scan2 = jif->jif_classes[j];
432 					class_exists2 = (scan2 != NULL);
433 					if (class_exists2 && j != i
434 					    && scan2->concerned_rlc)
435 						scan1->loss_prod_others *= tmp[j];
436 				}
437 			}
438 		}
439 	}
440 
441 	now = read_machclk();
442 	cl->idletime = now;
443 	return (cl);
444 
445  err_ret:
446 	if (cl->cl_q != NULL)
447 		free(cl->cl_q, M_DEVBUF);
448 	if (cl->arv_tm != NULL)
449 		free(cl->arv_tm, M_DEVBUF);
450 
451 	free(cl, M_DEVBUF);
452 	return (NULL);
453 }
454 
455 static int
jobs_class_destroy(struct jobs_class * cl)456 jobs_class_destroy(struct jobs_class *cl)
457 {
458 	struct jobs_if *jif;
459 	int s, pri;
460 
461 	s = splnet();
462 
463 	/* delete filters referencing to this class */
464 	acc_discard_filters(&cl->cl_jif->jif_classifier, cl, 0);
465 
466 	if (!qempty(cl->cl_q))
467 		jobs_purgeq(cl);
468 
469 	jif = cl->cl_jif;
470 	jif->jif_classes[cl->cl_pri] = NULL;
471 	if (jif->jif_maxpri == cl->cl_pri) {
472 		for (pri = cl->cl_pri; pri >= 0; pri--)
473 			if (jif->jif_classes[pri] != NULL) {
474 				jif->jif_maxpri = pri;
475 				break;
476 			}
477 		if (pri < 0)
478 			jif->jif_maxpri = -1;
479 	}
480 	splx(s);
481 
482 	tslist_destroy(cl);
483 	free(cl->cl_q, M_DEVBUF);
484 	free(cl, M_DEVBUF);
485 	return (0);
486 }
487 
488 /*
489  * jobs_enqueue is an enqueue function to be registered to
490  * (*altq_enqueue) in struct ifaltq.
491  */
492 static int
jobs_enqueue(struct ifaltq * ifq,struct mbuf * m)493 jobs_enqueue(struct ifaltq *ifq, struct mbuf *m)
494 {
495 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
496 	struct jobs_class *cl, *scan;
497 	int len;
498 	int return_flag;
499 	int pri;
500 	u_int64_t now;
501 	u_int64_t old_arv;
502 	int64_t* delta_rate;
503 	u_int64_t tstamp1, tstamp2, cycles; /* used for benchmarking only */
504 
505 	jif->total_enqueued++;
506 	now = read_machclk();
507 	tstamp1 = now;
508 
509 	return_flag = 0;
510 
511 	/* proceed with packet enqueuing */
512 
513 	if (IFQ_IS_EMPTY(ifq)) {
514 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
515 			scan = jif->jif_classes[pri];
516 			if (scan != NULL) {
517 				/*
518 				 * reset all quantities, except:
519 				 * average delay, number of violations
520 				 */
521 				PKTCNTR_RESET(&scan->cl_rin);
522 				PKTCNTR_RESET(&scan->cl_rout);
523 				PKTCNTR_RESET(&scan->cl_rout_th);
524 				PKTCNTR_RESET(&scan->cl_arrival);
525 				PKTCNTR_RESET(&scan->cl_dropcnt);
526 				scan->cl_lastdel = 0;
527 				scan->current_loss = 0;
528 				scan->service_rate = 0;
529 				scan->idletime = now;
530 				scan->cl_last_rate_update = now;
531 			}
532 		}
533 	}
534 
535 	/* grab class set by classifier */
536 	if ((cl = m->m_pkthdr.pattr_class) == NULL)
537 		cl = jif->jif_default;
538 
539 	len = m_pktlen(m);
540 	old_arv = cl->cl_arrival.bytes;
541 	PKTCNTR_ADD(&cl->cl_arrival, (int)len);
542 	PKTCNTR_ADD(&cl->cl_rin, (int)len);
543 	PKTCNTR_ADD(&cl->st_arrival, (int)len);
544 	PKTCNTR_ADD(&cl->st_rin, (int)len);
545 
546 	if (cl->cl_arrival.bytes < old_arv) {
547 		/* deals w/ overflow */
548 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
549 			scan = jif->jif_classes[pri];
550 			if (scan != NULL) {
551 				/*
552 				 * reset all quantities, except:
553 				 * average delay, number of violations
554 				 */
555 				PKTCNTR_RESET(&scan->cl_rin);
556 				PKTCNTR_RESET(&scan->cl_rout);
557 				PKTCNTR_RESET(&scan->cl_rout_th);
558 				PKTCNTR_RESET(&scan->cl_arrival);
559 				PKTCNTR_RESET(&scan->cl_dropcnt);
560 				scan->current_loss = 0;
561 				scan->service_rate = 0;
562 				scan->idletime = now;
563 				scan->cl_last_rate_update = now;
564 			}
565 		}
566 		PKTCNTR_ADD(&cl->cl_arrival, (int)len);
567 		PKTCNTR_ADD(&cl->cl_rin, (int)len);
568 	}
569 
570 	if (cl->cl_arrival.bytes > cl->cl_rin.bytes)
571 		cl->current_loss =
572 		    ((cl->cl_arrival.bytes - cl->cl_rin.bytes) << SCALE_LOSS)
573 		    / cl->cl_arrival.bytes;
574 	else
575 		cl->current_loss = 0;
576 
577 	/* for MDRR: update theoretical value of the output curve */
578 
579 	for (pri=0; pri <= jif->jif_maxpri; pri++) {
580 		scan = jif->jif_classes[pri];
581 		if (scan != NULL) {
582 			if (scan->cl_last_rate_update == scan->idletime
583 			    || scan->cl_last_rate_update == 0)
584 				scan->cl_last_rate_update = now; /* initial case */
585 			else
586 				scan->cl_rout_th.bytes +=
587 				    delay_diff(now, scan->cl_last_rate_update)
588 				    * scan->service_rate;
589 
590 			/*
591 			 * we don't really care about packets here
592 			 * WARNING: rout_th is SCALED
593 			 * (b/c of the service rate)
594 			 * for precision, as opposed to rout.
595 			 */
596 
597 			scan->cl_last_rate_update = now;
598 		}
599 	}
600 
601 	if (jobs_addq(cl, m, jif) != 0)
602 		return_flag = ENOBUFS; /* signals there's a buffer overflow */
603 	else
604 		IFQ_INC_LEN(ifq);
605 
606 	/* successfully queued. */
607 
608 	enforce_wc(jif);
609 
610 	if (!min_rates_adc(jif)) {
611 		delta_rate = assign_rate_drops_adc(jif);
612 		if (delta_rate != NULL) {
613 			for (pri = 0; pri <= jif->jif_maxpri; pri++)
614 			  if ((cl = jif->jif_classes[pri]) != NULL &&
615 			      !qempty(cl->cl_q))
616 				cl->service_rate += delta_rate[pri];
617 			free(delta_rate, M_DEVBUF);
618 		}
619 	}
620 
621 	delta_rate = adjust_rates_rdc(jif);
622 
623 	if (delta_rate != NULL) {
624 		for (pri = 0; pri <= jif->jif_maxpri; pri++)
625 			if ((cl = jif->jif_classes[pri]) != NULL &&
626 			    !qempty(cl->cl_q))
627 				cl->service_rate += delta_rate[pri];
628 		free(delta_rate, M_DEVBUF);
629 	}
630 
631 	tstamp2 = read_machclk();
632 	cycles = delay_diff(tstamp2, tstamp1);
633 	if (cycles > jif->wc_cycles_enqueue)
634 		jif->wc_cycles_enqueue=cycles;
635 	if (cycles < jif->bc_cycles_enqueue)
636 		jif->bc_cycles_enqueue=cycles;
637 
638 	jif->avg_cycles_enqueue += cycles;
639 	jif->avg_cycles2_enqueue += cycles * cycles;
640 
641 	return (return_flag);
642 }
643 
644 /*
645  * jobs_dequeue is a dequeue function to be registered to
646  * (*altq_dequeue) in struct ifaltq.
647  *
648  * note: ALTDQ_POLL returns the next packet without removing the packet
649  *	from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
650  *	ALTDQ_REMOVE must return the same packet if called immediately
651  *	after ALTDQ_POLL.
652  */
653 
654 static struct mbuf *
jobs_dequeue(struct ifaltq * ifq,int op)655 jobs_dequeue(struct ifaltq *ifq, int op)
656 {
657 	struct jobs_if	*jif = (struct jobs_if *)ifq->altq_disc;
658 	struct jobs_class *cl;
659 	struct mbuf *m;
660 	int pri;
661 	int svc_class;
662 	int64_t max_error;
663 	int64_t error;
664 	u_int64_t now;
665 	u_int64_t tstamp1, tstamp2, cycles;
666 
667 	jif->total_dequeued++;
668 
669 	now = read_machclk();
670 	tstamp1 = now;
671 
672 	if (IFQ_IS_EMPTY(ifq)) {
673 		/* no packet in the queue */
674 		for (pri=0; pri <= jif->jif_maxpri; pri++) {
675 		  cl = jif->jif_classes[pri];
676 		  if (cl != NULL)
677 		    cl->idletime = now;
678 		}
679 
680 		tstamp2 = read_machclk();
681 		cycles = delay_diff(tstamp2, tstamp1);
682 		if (cycles > jif->wc_cycles_dequeue)
683 			jif->wc_cycles_dequeue = cycles;
684 		if (cycles < jif->bc_cycles_dequeue)
685 			jif->bc_cycles_dequeue = cycles;
686 
687 		jif->avg_cycles_dequeue += cycles;
688 		jif->avg_cycles2_dequeue += cycles * cycles;
689 
690 		return (NULL);
691 	}
692 
693 	/*
694 	 * select the class whose actual tranmissions are the furthest
695 	 * from the promised transmissions
696 	 */
697 
698 	max_error = -1;
699 	svc_class = -1;
700 
701 	for (pri=0; pri <= jif->jif_maxpri; pri++) {
702 		if (((cl = jif->jif_classes[pri]) != NULL)
703 		    && !qempty(cl->cl_q)) {
704 			error = (int64_t)cl->cl_rout_th.bytes
705 			    -(int64_t)scale_rate(cl->cl_rout.bytes);
706 			if (max_error == -1) {
707 				max_error = error;
708 				svc_class = pri;
709 			} else if (error > max_error) {
710 				max_error = error;
711 				svc_class = pri;
712 			}
713 		}
714 	}
715 
716 	if (svc_class != -1)
717 		cl = jif->jif_classes[svc_class];
718 	else
719 		cl = NULL;
720 
721 	if (op == ALTDQ_POLL) {
722 		tstamp2 = read_machclk();
723 		cycles = delay_diff(tstamp2, tstamp1);
724 		if (cycles > jif->wc_cycles_dequeue)
725 			jif->wc_cycles_dequeue = cycles;
726 		if (cycles < jif->bc_cycles_dequeue)
727 			jif->bc_cycles_dequeue = cycles;
728 
729 		jif->avg_cycles_dequeue += cycles;
730 		jif->avg_cycles2_dequeue += cycles * cycles;
731 
732 		return (jobs_pollq(cl));
733 	}
734 
735 	if (cl != NULL)
736 		m = jobs_getq(cl);
737 	else
738 		m = NULL;
739 
740 	if (m != NULL) {
741 		IFQ_DEC_LEN(ifq);
742 		if (qempty(cl->cl_q))
743 			cl->cl_period++;
744 
745 		cl->cl_lastdel = (u_int64_t)delay_diff(now,
746 		    tslist_first(cl->arv_tm)->timestamp);
747 		if (cl->concerned_adc
748 		    && (int64_t)cl->cl_lastdel > cl->cl_adc)
749 			cl->adc_violations++;
750 		cl->cl_avgdel  += ticks_to_secs(GRANULARITY*cl->cl_lastdel);
751 
752 		PKTCNTR_ADD(&cl->cl_rout, m_pktlen(m));
753 		PKTCNTR_ADD(&cl->st_rout, m_pktlen(m));
754 	}
755 	if (cl != NULL)
756 		tslist_dequeue(cl);		/* dequeue the timestamp */
757 
758 	tstamp2 = read_machclk();
759 	cycles = delay_diff(tstamp2, tstamp1);
760 	if (cycles > jif->wc_cycles_dequeue)
761 		jif->wc_cycles_dequeue = cycles;
762 	if (cycles < jif->bc_cycles_dequeue)
763 		jif->bc_cycles_dequeue = cycles;
764 
765 	jif->avg_cycles_dequeue += cycles;
766 	jif->avg_cycles2_dequeue += cycles * cycles;
767 
768 	return (m);
769 }
770 
771 static int
jobs_addq(struct jobs_class * cl,struct mbuf * m,struct jobs_if * jif)772 jobs_addq(struct jobs_class *cl, struct mbuf *m, struct jobs_if *jif)
773 {
774 	int victim;
775 	u_int64_t len;
776 	u_int64_t now;
777 	struct jobs_class* victim_class;
778 
779 	victim = -1;
780 	victim_class = NULL;
781 	len = 0;
782 
783 	now = read_machclk();
784 
785 	if (jif->jif_separate && qlen(cl->cl_q) >= jif->jif_qlimit) {
786 		/*
787 		 * separate buffers: no guarantees on packet drops
788 		 * can be offered
789 		 * thus we drop the incoming packet
790 		 */
791 		len = (u_int64_t)m_pktlen(m);
792 		PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
793 		PKTCNTR_SUB(&cl->cl_rin, (int)len);
794 		PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
795 		PKTCNTR_SUB(&cl->st_rin, (int)len);
796 		cl->current_loss += (len << SCALE_LOSS)
797 		    /cl->cl_arrival.bytes;
798 		m_freem(m);
799 		return (-1);
800 
801 	} else if (!jif->jif_separate
802 		   && jif->jif_ifq->ifq_len >= jif->jif_qlimit) {
803 		/* shared buffer: supports guarantees on losses */
804 		if (!cl->concerned_rlc) {
805 			if (!cl->concerned_alc) {
806 				/*
807 				 * no ALC, no RLC on this class:
808 				 * drop the incoming packet
809 				 */
810 				len = (u_int64_t)m_pktlen(m);
811 				PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
812 				PKTCNTR_SUB(&cl->cl_rin, (int)len);
813 				PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
814 				PKTCNTR_SUB(&cl->st_rin, (int)len);
815 				cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
816 				m_freem(m);
817 				return (-1);
818 			} else {
819 				/*
820 				 * no RLC, but an ALC:
821 				 * drop the incoming packet if possible
822 				 */
823 				len = (u_int64_t)m_pktlen(m);
824 				if (cl->current_loss + (len << SCALE_LOSS)
825 				    / cl->cl_arrival.bytes <= cl->cl_alc) {
826 					PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
827 					PKTCNTR_SUB(&cl->cl_rin, (int)len);
828 					PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
829 					PKTCNTR_SUB(&cl->st_rin, (int)len);
830 					cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
831 					m_freem(m);
832 					return (-1);
833 				} else {
834 					/*
835 					 * the ALC would be violated:
836 					 * pick another class
837 					 */
838 					_addq(cl->cl_q, m);
839 					tslist_enqueue(cl, now);
840 
841 					victim = pick_dropped_rlc(jif);
842 
843 					if (victim == -1) {
844 						/*
845 						 * something went wrong
846 						 * let us discard
847 						 * the incoming packet,
848 						 * regardless of what
849 						 * may happen...
850 						 */
851 						victim_class = cl;
852 					} else
853 						victim_class = jif->jif_classes[victim];
854 
855 					if (victim_class != NULL) {
856 						/*
857 						 * test for safety
858 						 * purposes...
859 						 * it must be true
860 						 */
861 						m = _getq_tail(victim_class->cl_q);
862 						len = (u_int64_t)m_pktlen(m);
863 						PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len);
864 						PKTCNTR_SUB(&victim_class->cl_rin, (int)len);
865 						PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len);
866 						PKTCNTR_SUB(&victim_class->st_rin, (int)len);
867 						victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes;
868 						m_freem(m); /* the packet is trashed here */
869 						tslist_drop(victim_class); /* and its timestamp as well */
870 					}
871 					return (-1);
872 				}
873 			}
874 		} else {
875 			/*
876 			 * RLC on that class:
877 			 * pick class according to RLCs
878 			 */
879 			_addq(cl->cl_q, m);
880 			tslist_enqueue(cl, now);
881 
882 			victim = pick_dropped_rlc(jif);
883 			if (victim == -1) {
884 				/*
885 				 * something went wrong
886 				 * let us discard the incoming packet,
887 				 * regardless of what may happen...
888 				 */
889 				victim_class = cl;
890 			} else
891 				victim_class = jif->jif_classes[victim];
892 
893 			if (victim_class != NULL) {
894 				/*
895 				 * test for safety purposes...
896 				 * it must be true
897 				 */
898 				m = _getq_tail(victim_class->cl_q);
899 				len = (u_int64_t)m_pktlen(m);
900 				PKTCNTR_ADD(&victim_class->cl_dropcnt, (int)len);
901 				PKTCNTR_SUB(&victim_class->cl_rin, (int)len);
902 				PKTCNTR_ADD(&victim_class->st_dropcnt, (int)len);
903 				PKTCNTR_SUB(&victim_class->st_rin, (int)len);
904 				victim_class->current_loss += (len << SCALE_LOSS)/victim_class->cl_arrival.bytes;
905 				m_freem(m); /* the packet is trashed here */
906 				tslist_drop(victim_class); /* and its timestamp as well */
907 			}
908 			return (-1);
909 		}
910 	}
911 	/* else: no drop */
912 
913 	_addq(cl->cl_q, m);
914 	tslist_enqueue(cl, now);
915 
916 	return (0);
917 }
918 
919 static struct mbuf *
jobs_getq(struct jobs_class * cl)920 jobs_getq(struct jobs_class *cl)
921 {
922 	return _getq(cl->cl_q);
923 }
924 
925 static struct mbuf *
jobs_pollq(struct jobs_class * cl)926 jobs_pollq(struct jobs_class *cl)
927 {
928 	return qhead(cl->cl_q);
929 }
930 
931 static void
jobs_purgeq(struct jobs_class * cl)932 jobs_purgeq(struct jobs_class *cl)
933 {
934 	struct mbuf *m;
935 
936 	if (qempty(cl->cl_q))
937 		return;
938 
939 	while ((m = _getq(cl->cl_q)) != NULL) {
940 		PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m));
941 		PKTCNTR_ADD(&cl->st_dropcnt, m_pktlen(m));
942 		m_freem(m);
943 		tslist_drop(cl);
944 	}
945 	ASSERT(qlen(cl->cl_q) == 0);
946 }
947 
948 /*
949  * timestamp list support routines
950  *
951  * this implementation has been revamped and
952  * now uses a TAILQ structure.
953  * timestamp list holds class timestamps
954  * there is one timestamp list per class.
955  */
956 static TSLIST *
tslist_alloc(void)957 tslist_alloc(void)
958 {
959 	TSLIST *list_init;
960 
961 	list_init = malloc(sizeof(TSLIST), M_DEVBUF, M_WAITOK);
962 	TAILQ_INIT(list_init);
963 	return (list_init);
964 }
965 
966 static void
tslist_destroy(struct jobs_class * cl)967 tslist_destroy(struct jobs_class *cl)
968 {
969 	while (tslist_first(cl->arv_tm) != NULL)
970 		tslist_dequeue(cl);
971 
972 	free(cl->arv_tm, M_DEVBUF);
973 }
974 
975 static int
tslist_enqueue(struct jobs_class * cl,u_int64_t arv)976 tslist_enqueue(struct jobs_class *cl, u_int64_t arv)
977 {
978 	TSENTRY *pushed;
979 	pushed = malloc(sizeof(TSENTRY), M_DEVBUF, M_WAITOK);
980 	if (pushed == NULL)
981 		return (0);
982 
983 	pushed->timestamp = arv;
984 	TAILQ_INSERT_TAIL(cl->arv_tm, pushed, ts_list);
985 	return (1);
986 }
987 
988 static void
tslist_dequeue(struct jobs_class * cl)989 tslist_dequeue(struct jobs_class *cl)
990 {
991 	TSENTRY *popped;
992 	popped = tslist_first(cl->arv_tm);
993 	if (popped != NULL) {
994 		  TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
995 		  free(popped, M_DEVBUF);
996 	}
997 	return;
998 }
999 
1000 static void
tslist_drop(struct jobs_class * cl)1001 tslist_drop(struct jobs_class *cl)
1002 {
1003 	TSENTRY *popped;
1004 	popped = tslist_last(cl->arv_tm);
1005 	if (popped != NULL) {
1006 		  TAILQ_REMOVE(cl->arv_tm, popped, ts_list);
1007 		  free(popped, M_DEVBUF);
1008 	}
1009 	return;
1010 }
1011 
1012 /*
1013  * rate allocation support routines
1014  */
1015 /*
1016  * enforce_wc: enforce that backlogged classes have non-zero
1017  * service rate, and that non-backlogged classes have zero
1018  * service rate.
1019  */
1020 
1021 static int
enforce_wc(struct jobs_if * jif)1022 enforce_wc(struct jobs_if *jif)
1023 {
1024 	struct jobs_class *cl;
1025 
1026 	int64_t active_classes;
1027 	int pri;
1028 	int is_backlogged, class_exists, updated;
1029 
1030 	updated = 0;
1031 	active_classes = 0;
1032 
1033 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
1034 		cl = jif->jif_classes[pri];
1035 		class_exists = (cl != NULL);
1036 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1037 
1038 		if (is_backlogged)
1039 			active_classes++;
1040 		if ((is_backlogged && cl->service_rate <= 0)
1041 		    ||(class_exists
1042 		       && !is_backlogged && cl->service_rate > 0))
1043 			updated = 1;
1044 	}
1045 
1046 	if (updated) {
1047 		for (pri = 0; pri <= jif->jif_maxpri; pri++) {
1048 			cl = jif->jif_classes[pri];
1049 			class_exists = (cl != NULL);
1050 			is_backlogged = (class_exists && !qempty(cl->cl_q));
1051 
1052 			if (class_exists && !is_backlogged)
1053 				cl->service_rate = 0;
1054 			else if (is_backlogged)
1055 				cl->service_rate = (int64_t)(bps_to_internal((u_int64_t)jif->jif_bandwidth)/active_classes);
1056 		}
1057 	}
1058 
1059 	return (updated);
1060 }
1061 
1062 /*
1063  * adjust_rates_rdc: compute the service rates adjustments
1064  * needed to realize the desired proportional delay differentiation.
1065  * essentially, the rate adjustement delta_rate = prop_control*error,
1066  * where error is the difference between the measured "weighted"
1067  * delay and the mean of the weighted delays. see paper for more
1068  * information.
1069  * prop_control has slightly changed since the INFOCOM paper,
1070  * this condition seems to provide better results.
1071  */
1072 
1073 static int64_t *
adjust_rates_rdc(struct jobs_if * jif)1074 adjust_rates_rdc(struct jobs_if *jif)
1075 {
1076 	int64_t *result;
1077 	int64_t credit, available, lower_bound, upper_bound;
1078 	int64_t bk;
1079 	int i, j;
1080 	int rdc_classes, active_classes;
1081 	int class_exists, is_backlogged;
1082 	struct jobs_class *cl;
1083 	int64_t *error;
1084 	int64_t prop_control;
1085 	u_int64_t max_prod;
1086 	u_int64_t min_share;
1087 	u_int64_t max_avg_pkt_size;
1088 
1089 	/*
1090 	 * min_share is scaled
1091 	 * to avoid dealing with doubles
1092 	 */
1093 	active_classes = 0;
1094 	rdc_classes = 0;
1095 	max_prod = 0;
1096 	max_avg_pkt_size = 0;
1097 
1098 	upper_bound = (int64_t)jif->jif_bandwidth;
1099 
1100 	for (i = 0; i <= jif->jif_maxpri; i++) {
1101 		cl = jif->jif_classes[i];
1102 		class_exists = (cl != NULL);
1103 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1104 		if (is_backlogged) {
1105 			active_classes++;
1106 			if (cl->concerned_rdc)
1107 				rdc_classes++;
1108 			else
1109 				upper_bound -=
1110 				    internal_to_bps(cl->service_rate);
1111 		}
1112 	}
1113 
1114 	result = malloc((jif->jif_maxpri+1)*sizeof(int64_t),
1115 	    M_DEVBUF, M_WAITOK);
1116 
1117 	if (result == NULL)
1118 		return NULL;
1119 
1120 	for (i = 0; i <= jif->jif_maxpri; i++)
1121 		result[i] = 0;
1122 
1123 	if (upper_bound <= 0 || rdc_classes == 0)
1124 		return result;
1125 
1126 	credit = 0;
1127 	lower_bound = 0;
1128 	min_share = ((u_int64_t)1 << SCALE_SHARE);
1129 	bk = 0;
1130 
1131 	for (i = 0; i <= jif->jif_maxpri; i++) {
1132 		cl = jif->jif_classes[i];
1133 		class_exists = (cl != NULL);
1134 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1135 		if (is_backlogged && cl->concerned_rdc)
1136 			bk += cl->cl_rin.bytes;
1137 	}
1138 
1139 	if (bk == 0)
1140 		return (result);
1141 
1142 	for (i = 0; i <= jif->jif_maxpri; i++) {
1143 		cl = jif->jif_classes[i];
1144 		class_exists = (cl != NULL);
1145 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1146 		if (is_backlogged
1147 		    && (cl->cl_rin.bytes << SCALE_SHARE)/bk < min_share)
1148 			min_share = (cl->cl_rin.bytes << SCALE_SHARE)/bk;
1149 		if (is_backlogged && cl->concerned_rdc
1150 		    && cl->delay_prod_others > max_prod)
1151 			max_prod = cl->delay_prod_others;
1152 
1153 		if (is_backlogged && cl->concerned_rdc
1154 		    && cl->cl_rin.bytes > max_avg_pkt_size*cl->cl_rin.packets)
1155 			max_avg_pkt_size = (u_int64_t)((u_int)cl->cl_rin.bytes/(u_int)cl->cl_rin.packets);
1156 	}
1157 
1158 	error = update_error(jif);
1159 	if (!error)
1160 		goto fail;
1161 
1162 	prop_control = (upper_bound*upper_bound*min_share)
1163 	    /(max_prod*(max_avg_pkt_size << 2));
1164 
1165 	prop_control = bps_to_internal(ticks_to_secs(prop_control)); /* in BT-1 */
1166 
1167 	credit = 0;
1168 	for (i = 0; i <= jif->jif_maxpri; i++) {
1169 		cl = jif->jif_classes[i];
1170 		class_exists = (cl != NULL);
1171 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1172 		if (is_backlogged && cl->concerned_rdc) {
1173 			result[i] = -prop_control*error[i]; /* in BT-1 */
1174 			result[i] >>= (SCALE_SHARE);
1175 		}
1176 	}
1177 
1178 	free(error, M_DEVBUF); /* we don't need these anymore */
1179 
1180 	/* saturation */
1181 
1182 	for (i = 0; i <= jif->jif_maxpri; i++) {
1183 		cl = jif->jif_classes[i];
1184 		class_exists = (cl != NULL);
1185 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1186 
1187 		if (is_backlogged && cl->concerned_rdc)
1188 			lower_bound += cl->min_rate_adc;
1189 		/*
1190 		 * note: if there's no ADC or ARC on cl,
1191 		 * this is equal to zero, which is fine
1192 		 */
1193 	}
1194 
1195 	for (i = 0; i <= jif->jif_maxpri; i++) {
1196 		cl = jif->jif_classes[i];
1197 		class_exists = (cl != NULL);
1198 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1199 
1200 		if (is_backlogged && cl->concerned_rdc
1201 		    && result[i] + cl->service_rate > upper_bound) {
1202 			for (j = 0; j <= jif->jif_maxpri; j++) {
1203 				cl = jif->jif_classes[j];
1204 				class_exists = (cl != NULL);
1205 				is_backlogged = (class_exists
1206 						 && !qempty(cl->cl_q));
1207 				if (is_backlogged && cl->concerned_rdc) {
1208 					if (j == i)
1209 						result[j] = upper_bound
1210 						    -cl->service_rate
1211 						    + cl->min_rate_adc
1212 						    - lower_bound;
1213 					else
1214 						result[j] =
1215 						    -cl->service_rate
1216 						    +cl->min_rate_adc;
1217 				}
1218 			}
1219 			return result;
1220 		}
1221 
1222 		cl = jif->jif_classes[i];
1223 		/* do this again since it may have been modified */
1224 		class_exists = (cl != NULL);
1225 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1226 
1227 		if (is_backlogged && cl->concerned_rdc
1228 		    && result[i] + cl->service_rate < cl->min_rate_adc) {
1229 			credit += cl->service_rate+result[i]
1230 			    -cl->min_rate_adc;
1231 			/* "credit" is in fact a negative number */
1232 			result[i] = -cl->service_rate+cl->min_rate_adc;
1233 		}
1234 	}
1235 
1236 	for (i = jif->jif_maxpri; (i >= 0 && credit < 0); i--) {
1237 		cl = jif->jif_classes[i];
1238 		class_exists = (cl != NULL);
1239 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1240 
1241 		if (is_backlogged && cl->concerned_rdc) {
1242 			available = result[i]
1243 			    + cl->service_rate-cl->min_rate_adc;
1244 			if (available >= -credit) {
1245 				result[i] += credit;
1246 				credit = 0;
1247 			} else {
1248 				result[i] -= available;
1249 				credit += available;
1250 			}
1251 		}
1252 	}
1253 	return result;
1254 
1255 fail:	free(result, M_DEVBUF);
1256 	return NULL;
1257 }
1258 
1259 /*
1260  * assign_rate_drops_adc: returns the adjustment needed to
1261  * the service rates to meet the absolute delay/rate constraints
1262  * (delay/throughput bounds) and drops traffic if need be.
1263  * see tech. report UVA/T.R. CS-2000-24/CS-2001-21 for more info.
1264  */
1265 
1266 static int64_t *
assign_rate_drops_adc(struct jobs_if * jif)1267 assign_rate_drops_adc(struct jobs_if *jif)
1268 {
1269 	int64_t *result;
1270 	int class_exists, is_backlogged;
1271 	struct jobs_class *cl;
1272 
1273 	int64_t *c, *n, *k;
1274 	int64_t *available;
1275 
1276 	int lowest, highest;
1277 	int keep_going;
1278 	int i;
1279 	u_int64_t now, oldest_arv;
1280 	int64_t	remaining_time;
1281 	struct mbuf* pkt;
1282 	u_int64_t len;
1283 
1284 	now = read_machclk();
1285 	oldest_arv = now;
1286 
1287 	result = malloc((jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK);
1288 	if (result == NULL)
1289 		goto fail0;
1290 	c = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
1291 	if (c == NULL)
1292 		goto fail1;
1293 	n = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
1294 	if (n == NULL)
1295 		goto fail2;
1296 	k = malloc((jif->jif_maxpri+1)*sizeof(u_int64_t), M_DEVBUF, M_WAITOK);
1297 	if (k == NULL)
1298 		goto fail3;
1299 	available = malloc((jif->jif_maxpri+1)*sizeof(int64_t), M_DEVBUF, M_WAITOK);
1300 	if (available == NULL)
1301 		goto fail4;
1302 
1303 	for (i = 0; i <= jif->jif_maxpri; i++)
1304 		result[i] = 0;
1305 
1306 	keep_going = 1;
1307 
1308 	for (i = 0; i <= jif->jif_maxpri; i++) {
1309 		cl = jif->jif_classes[i];
1310 		class_exists = (cl != NULL);
1311 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1312 
1313 		if (is_backlogged) {
1314 			if (cl->concerned_adc) {
1315 				/*
1316 				 * get the arrival time of the oldest
1317 				 * class-i packet
1318 				 */
1319 				if (tslist_first(cl->arv_tm) == NULL)
1320 					oldest_arv = now; /* NOTREACHED */
1321 				else
1322 					oldest_arv = (tslist_first(cl->arv_tm))->timestamp;
1323 
1324 				n[i] = cl->service_rate;
1325 				k[i] = scale_rate((int64_t)(cl->cl_rin.bytes - cl->cl_rout.bytes));
1326 
1327 				remaining_time = cl->cl_adc
1328 				    - (int64_t)delay_diff(now, oldest_arv);
1329 				if (remaining_time > 0) {
1330 					c[i] = remaining_time;
1331 					/*
1332 					 * c is the remaining time before
1333 					 * the deadline is violated
1334 					 * (in ticks)
1335 					 */
1336 					available[i] = n[i]-k[i]/c[i];
1337 				} else {
1338 					/*
1339 					 * deadline has passed...
1340 					 * we allocate the whole link
1341 					 * capacity to hopefully
1342 					 * solve the problem
1343 					 */
1344 					c[i] = 0;
1345 					available[i] = -((int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth));
1346 				}
1347 				if (cl->concerned_arc) {
1348 					/*
1349 					 * there's an ARC in addition
1350 					 * to the ADC
1351 					 */
1352 					if (n[i] - cl->cl_arc < available[i])
1353 						available[i] = n[i]
1354 						    - cl->cl_arc;
1355 				}
1356 			} else if (cl->concerned_arc) {
1357 				/*
1358 				 * backlogged, concerned by ARC
1359 				 * but not by ADC
1360 				 */
1361 				n[i] = cl->service_rate;
1362 				available[i] = n[i] - cl->cl_arc;
1363 			} else {
1364 				/*
1365 				 * backlogged but not concerned by ADC
1366 				 * or ARC -> can give everything
1367 				 */
1368 				n[i] = cl->service_rate;
1369 				available[i] = n[i];
1370 			}
1371 		} else {
1372 			/* not backlogged */
1373 			n[i] = 0;
1374 			k[i] = 0;
1375 			c[i] = 0;
1376 			if (class_exists)
1377 				available[i] = cl->service_rate;
1378 			else
1379 				available[i] = 0;
1380 		}
1381 	}
1382 
1383 	/* step 1: adjust rates (greedy algorithm) */
1384 
1385 	highest = 0;
1386 	lowest  = jif->jif_maxpri;
1387 
1388 	while (highest < jif->jif_maxpri+1 && available[highest] >= 0)
1389 		highest++; /* which is the highest class that needs more service? */
1390 	while (lowest > 0 && available[lowest] <= 0)
1391 		lowest--;  /* which is the lowest class that needs less service? */
1392 
1393 	while (highest != jif->jif_maxpri+1 && lowest != -1) {
1394 		/* give the excess service from lowest to highest */
1395 		if (available[lowest]+available[highest] > 0) {
1396 			/*
1397 			 * still some "credit" left
1398 			 * give all that is needed by "highest"
1399 			 */
1400 			n[lowest]  += available[highest];
1401 			n[highest] -= available[highest];
1402 			available[lowest]  += available[highest];
1403 			available[highest] = 0;
1404 
1405 			while (highest < jif->jif_maxpri+1
1406 			       && available[highest] >= 0)
1407 				highest++;  /* which is the highest class that needs more service now? */
1408 
1409 		} else if (available[lowest]+available[highest] == 0) {
1410 			/* no more credit left but it's fine */
1411 			n[lowest]  += available[highest];
1412 			n[highest] -= available[highest];
1413 			available[highest] = 0;
1414 			available[lowest]  = 0;
1415 
1416 			while (highest < jif->jif_maxpri+1
1417 			       && available[highest] >= 0)
1418 				highest++;  /* which is the highest class that needs more service? */
1419 			while (lowest >= 0 && available[lowest] <= 0)
1420 				lowest--;   /* which is the lowest class that needs less service? */
1421 
1422 		} else if (available[lowest]+available[highest] < 0) {
1423 			/*
1424 			 * no more credit left and we need to switch
1425 			 * to another class
1426 			 */
1427 			n[lowest]  -= available[lowest];
1428 			n[highest] += available[lowest];
1429 			available[highest] += available[lowest];
1430 			available[lowest]  = 0;
1431 
1432 			while ((lowest >= 0)&&(available[lowest] <= 0))
1433 				lowest--;  /* which is the lowest class that needs less service? */
1434 		}
1435 	}
1436 
1437 	for (i = 0; i <= jif->jif_maxpri; i++) {
1438 		cl = jif->jif_classes[i];
1439 		class_exists = (cl != NULL);
1440 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1441 		if (is_backlogged) {
1442 			result[i] = n[i] - cl->service_rate;
1443 		} else {
1444 			if (class_exists)
1445 				result[i] = - cl->service_rate;
1446 			else
1447 				result[i] = 0;
1448 		}
1449 	}
1450 
1451 	/* step 2: adjust drops (for ADC) */
1452 
1453 	if (highest != jif->jif_maxpri+1) {
1454 		/* some class(es) still need(s) additional service */
1455 		for (i = 0; i <= jif->jif_maxpri; i++) {
1456 			cl = jif->jif_classes[i];
1457 			class_exists = (cl != NULL);
1458 			is_backlogged = (class_exists
1459 					 && !qempty(cl->cl_q));
1460 			if (is_backlogged && available[i] < 0) {
1461 				if (cl->concerned_adc) {
1462 					k[i] = c[i]*n[i];
1463 					while (keep_going && scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes)) > k[i]) {
1464 						pkt = qtail(cl->cl_q);
1465 						if (pkt != NULL) {
1466 							/* "safeguard" test (a packet SHOULD be in there) */
1467 							len = (u_int64_t)m_pktlen(pkt);
1468 							/* access packet at the tail */
1469 							if (cl->concerned_alc
1470 							    && cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
1471 								keep_going = 0; /* relax ADC in favor of ALC */
1472 							} else {
1473 								/* drop packet at the tail of the class-i queue, update values */
1474 								pkt = _getq_tail(cl->cl_q);
1475 								len = (u_int64_t)m_pktlen(pkt);
1476 								PKTCNTR_ADD(&cl->cl_dropcnt, (int)len);
1477 								PKTCNTR_SUB(&cl->cl_rin, (int)len);
1478 								PKTCNTR_ADD(&cl->st_dropcnt, (int)len);
1479 								PKTCNTR_SUB(&cl->st_rin, (int)len);
1480 								cl->current_loss += (len << SCALE_LOSS)/cl->cl_arrival.bytes;
1481 								m_freem(pkt); /* the packet is trashed here */
1482 								tslist_drop(cl);
1483 								IFQ_DEC_LEN(cl->cl_jif->jif_ifq);
1484 							}
1485 						} else
1486 							keep_going = 0; /* NOTREACHED */
1487 					}
1488 					k[i] = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes));
1489 				}
1490 				/*
1491 				 * n[i] is the max rate we can give.
1492 				 * the above drops as much as possible
1493 				 * to respect a delay bound.
1494 				 * for throughput bounds,
1495 				 * there's nothing that can be done
1496 				 * after the greedy reallocation.
1497 				 */
1498 			}
1499 		}
1500 	}
1501 
1502 	/* update the values of min_rate_adc */
1503 	for (i = 0; i <= jif->jif_maxpri; i++) {
1504 		cl = jif->jif_classes[i];
1505 		class_exists = (cl != NULL);
1506 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1507 		if (is_backlogged && cl->concerned_adc) {
1508 			if (c[i] != 0) {
1509 				if (cl->concerned_adc
1510 				    && !cl->concerned_arc)
1511 					cl->min_rate_adc = k[i]/c[i];
1512 				else
1513 					cl->min_rate_adc = n[i];
1514 			} else
1515 				cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
1516 		} else if (is_backlogged && cl->concerned_arc)
1517 			cl->min_rate_adc = n[i]; /* the best we can give */
1518 		else {
1519 			if (class_exists)
1520 				cl->min_rate_adc = 0;
1521 		}
1522 	}
1523 
1524 	free(c, M_DEVBUF);
1525 	free(n, M_DEVBUF);
1526 	free(k, M_DEVBUF);
1527 	free(available, M_DEVBUF);
1528 
1529 	return (result);
1530 
1531 fail5: __unused
1532 	free(available, M_DEVBUF);
1533 fail4:	free(k, M_DEVBUF);
1534 fail3:	free(n, M_DEVBUF);
1535 fail2:	free(c, M_DEVBUF);
1536 fail1:	free(result, M_DEVBUF);
1537 fail0:	return NULL;
1538 }
1539 
1540 /*
1541  * update_error: returns the difference between the mean weighted
1542  * delay and the weighted delay for each class. if proportional
1543  * delay differentiation is perfectly achieved, it should return
1544  * zero for each class.
1545  */
1546 static int64_t *
update_error(struct jobs_if * jif)1547 update_error(struct jobs_if *jif)
1548 {
1549 	int i;
1550 	int active_classes, backlogged_classes;
1551 	u_int64_t mean_weighted_delay;
1552 	u_int64_t delays[JOBS_MAXPRI];
1553 	int64_t* error;
1554 	int class_exists, is_backlogged;
1555 	struct jobs_class *cl;
1556 
1557 	error = malloc(sizeof(int64_t)*(jif->jif_maxpri+1), M_DEVBUF,
1558 	    M_WAITOK|M_ZERO);
1559 
1560 	if (error == NULL)
1561 		return NULL;
1562 
1563 	mean_weighted_delay = 0;
1564 	active_classes = 0;
1565 	backlogged_classes = 0;
1566 
1567 	for (i = 0; i <= jif->jif_maxpri; i++) {
1568 		cl = jif->jif_classes[i];
1569 		class_exists = (cl != NULL);
1570 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1571 
1572 		if (is_backlogged) {
1573 			backlogged_classes++;
1574 			if (cl->concerned_rdc) {
1575 				delays[i] = proj_delay(jif, i);
1576 				mean_weighted_delay += cl->delay_prod_others*delays[i];
1577 				active_classes ++;
1578 			}
1579 		}
1580 	}
1581 
1582 	if (active_classes == 0)
1583 		return error;
1584 	else
1585 		mean_weighted_delay /= active_classes;
1586 
1587 	for (i = 0; i <= jif->jif_maxpri; i++) {
1588 		cl = jif->jif_classes[i];
1589 		class_exists = (cl != NULL);
1590 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1591 
1592 		if (is_backlogged && cl->concerned_rdc)
1593 			error[i] = ((int64_t)mean_weighted_delay)-((int64_t)cl->delay_prod_others*delays[i]);
1594 		else
1595 			error[i] = 0; /*
1596 				       * either the class isn't concerned,
1597 				       * or it's not backlogged.
1598 				       * in any case, the rate shouldn't
1599 				       * be adjusted.
1600 				       */
1601 	}
1602 	return error;
1603 }
1604 
1605 /*
1606  * min_rates_adc: computes the minimum service rates needed in
1607  * each class to meet the absolute delay bounds. if, for any
1608  * class i, the current service rate of class i is less than
1609  * the computed minimum service rate, this function returns
1610  * false, true otherwise.
1611  */
1612 static int
min_rates_adc(struct jobs_if * jif)1613 min_rates_adc(struct jobs_if *jif)
1614 {
1615 	int result;
1616 	int i;
1617 	int class_exists, is_backlogged;
1618 	int64_t remaining_time;
1619 	struct jobs_class *cl;
1620 	result = 1;
1621 
1622 	for (i = 0; i <= jif->jif_maxpri; i++) {
1623 		cl = jif->jif_classes[i];
1624 		class_exists = (cl != NULL);
1625 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1626 		if (is_backlogged && cl->concerned_adc) {
1627 			remaining_time = cl->cl_adc - proj_delay(jif, i);
1628 			if (remaining_time > 0 ) {
1629 				/* min rate needed for ADC */
1630 				cl->min_rate_adc = scale_rate((int64_t)(cl->cl_rin.bytes-cl->cl_rout.bytes))/remaining_time;
1631 				if (cl->concerned_arc
1632 				    && cl->cl_arc > cl->min_rate_adc) {
1633 					/* min rate needed for ADC + ARC */
1634 					cl->min_rate_adc = cl->cl_arc;
1635 				}
1636 			} else {
1637 				/* the deadline has been exceeded: give the whole link capacity to hopefully fix the situation */
1638 				cl->min_rate_adc = (int64_t)bps_to_internal((u_int64_t)jif->jif_bandwidth);
1639 			}
1640       		} else if (is_backlogged && cl->concerned_arc)
1641 			cl->min_rate_adc = cl->cl_arc; 			/* no ADC, an ARC */
1642 		else if (class_exists)
1643 			cl->min_rate_adc = 0;	/*
1644 						 * either the class is not
1645 						 * backlogged
1646 						 * or there is no ADC and
1647 						 * no ARC
1648 						 */
1649 		if (is_backlogged && cl->min_rate_adc > cl->service_rate)
1650 			result = 0;
1651 	}
1652 
1653 	return result;
1654 }
1655 
1656 /*
1657  * proj_delay: computes the difference between the current time
1658  * and the time the oldest class-i packet still in the class-i
1659  * queue i arrived in the system.
1660  */
1661 static int64_t
proj_delay(struct jobs_if * jif,int i)1662 proj_delay(struct jobs_if *jif, int i)
1663 {
1664 	u_int64_t now;
1665 	int class_exists, is_backlogged;
1666 	struct jobs_class *cl;
1667 
1668 	now = read_machclk();
1669 	cl = jif->jif_classes[i];
1670 	class_exists = (cl != NULL);
1671 	is_backlogged = (class_exists && !qempty(cl->cl_q));
1672 
1673 	if (is_backlogged)
1674 		return ((int64_t)delay_diff(now, tslist_first(cl->arv_tm)->timestamp));
1675 
1676 	return (0); /* NOTREACHED */
1677 }
1678 
1679 /*
1680  * pick_dropped_rlc: returns the class index of the class to be
1681  * dropped for meeting the relative loss constraints.
1682  */
1683 static int
pick_dropped_rlc(struct jobs_if * jif)1684 pick_dropped_rlc(struct jobs_if *jif)
1685 {
1686 	int64_t mean;
1687 	int64_t* loss_error;
1688 	int i, active_classes, backlogged_classes;
1689 	int class_exists, is_backlogged;
1690 	int class_dropped;
1691 	int64_t max_error;
1692 	int64_t max_alc;
1693 	struct mbuf* pkt;
1694 	struct jobs_class *cl;
1695 	u_int64_t len;
1696 
1697 	loss_error = malloc(sizeof(int64_t)*(jif->jif_maxpri+1),
1698 	    M_DEVBUF, M_WAITOK);
1699 
1700 	if (loss_error == NULL)
1701 		return -1;
1702 
1703 	class_dropped = -1;
1704 	max_error = 0;
1705 	mean = 0;
1706 	active_classes = 0;
1707 	backlogged_classes = 0;
1708 
1709 	for (i = 0; i <= jif->jif_maxpri; i++) {
1710 		cl = jif->jif_classes[i];
1711 		class_exists = (cl != NULL);
1712 		is_backlogged = (class_exists && !qempty(cl->cl_q));
1713 		if (is_backlogged) {
1714 			backlogged_classes ++;
1715 			if (cl->concerned_rlc) {
1716 				mean += cl->loss_prod_others
1717 				    * cl->current_loss;
1718 				active_classes++;
1719 			}
1720 		}
1721 	}
1722 
1723 	if (active_classes > 0)
1724 		mean /= active_classes;
1725 
1726 	if (active_classes == 0)
1727 		class_dropped = JOBS_MAXPRI+1; /*
1728 						* no classes are concerned
1729 						* by RLCs (JOBS_MAXPRI+1
1730 						* means "ignore RLC" here)
1731 						*/
1732 	else {
1733 		for (i = 0; i <= jif->jif_maxpri; i++) {
1734 			cl = jif->jif_classes[i];
1735 			class_exists = (cl != NULL);
1736 			is_backlogged = (class_exists
1737 					 && !qempty(cl->cl_q));
1738 
1739 			if ((is_backlogged)&&(cl->cl_rlc))
1740 				loss_error[i]=cl->loss_prod_others
1741 				    *cl->current_loss-mean;
1742 			else
1743 				loss_error[i] = ALTQ_INFINITY;
1744 		}
1745 
1746 		for (i = 0; i <= jif->jif_maxpri; i++) {
1747 			cl = jif->jif_classes[i];
1748 			class_exists = (cl != NULL);
1749 			is_backlogged = (class_exists
1750 					 && !qempty(cl->cl_q));
1751 			if (is_backlogged && loss_error[i] <= max_error) {
1752 				/*
1753 				 * find out which class is the most
1754 				 * below the mean.
1755 				 * it's the one that needs to be dropped
1756 				 * ties are broken in favor of the higher
1757 				 * priority classes (i.e., if two classes
1758 				 * present the same deviation, the lower
1759 				 * priority class will get dropped).
1760 				 */
1761 				max_error = loss_error[i];
1762 				class_dropped = i;
1763 			}
1764 		}
1765 
1766 		if (class_dropped != -1) {
1767 			cl = jif->jif_classes[class_dropped];
1768 			pkt = qtail(cl->cl_q);
1769 			if (pkt != NULL) {
1770 				/*
1771 				 * "safeguard" test (a packet SHOULD be
1772 				 * in there)
1773 				 */
1774 				len = (u_int64_t)m_pktlen(pkt);
1775 				/* access packet at the tail */
1776 				if (cl->current_loss+(len << SCALE_LOSS)/cl->cl_arrival.bytes > cl->cl_alc) {
1777 					/*
1778 					 * the class to drop for meeting
1779 					 * the RLC will defeat the ALC:
1780 					 * ignore RLC.
1781 					 */
1782 					class_dropped = JOBS_MAXPRI+1;
1783 				}
1784 			} else
1785 				class_dropped = JOBS_MAXPRI+1; /* NOTREACHED */
1786 		} else
1787 			class_dropped = JOBS_MAXPRI+1;
1788 	}
1789 
1790 	if (class_dropped == JOBS_MAXPRI+1) {
1791 		max_alc = -((int64_t)1 << SCALE_LOSS);
1792 		for (i = jif->jif_maxpri; i >= 0; i--) {
1793 			cl = jif->jif_classes[i];
1794 			class_exists = (cl != NULL);
1795 			is_backlogged = (class_exists
1796 					 && !qempty(cl->cl_q));
1797 			if (is_backlogged) {
1798 				if (cl->concerned_alc && cl->cl_alc - cl->current_loss > max_alc) {
1799 					max_alc = cl->cl_alc-cl->current_loss; /* pick the class which is the furthest from its ALC */
1800 					class_dropped = i;
1801 				} else if (!cl->concerned_alc && ((int64_t) 1 << SCALE_LOSS)-cl->current_loss > max_alc) {
1802 					max_alc = ((int64_t) 1 << SCALE_LOSS)-cl->current_loss;
1803 					class_dropped = i;
1804 				}
1805 			}
1806 		}
1807 	}
1808 
1809 	free(loss_error, M_DEVBUF);
1810 	return (class_dropped);
1811 }
1812 
1813 /*
1814  * ALTQ binding/setup functions
1815  */
1816 /*
1817  * jobs device interface
1818  */
1819 int
jobsopen(dev_t dev,int flag,int fmt,struct lwp * l)1820 jobsopen(dev_t dev, int flag, int fmt,
1821     struct lwp *l)
1822 {
1823 	if (machclk_freq == 0)
1824 		init_machclk();
1825 
1826 	if (machclk_freq == 0) {
1827 		printf("jobs: no CPU clock available!\n");
1828 		return (ENXIO);
1829 	}
1830 	/* everything will be done when the queueing scheme is attached. */
1831 	return 0;
1832 }
1833 
1834 int
jobsclose(dev_t dev,int flag,int fmt,struct lwp * l)1835 jobsclose(dev_t dev, int flag, int fmt,
1836     struct lwp *l)
1837 {
1838 	struct jobs_if *jif;
1839 
1840 	while ((jif = jif_list) != NULL) {
1841 		/* destroy all */
1842 		if (ALTQ_IS_ENABLED(jif->jif_ifq))
1843 			altq_disable(jif->jif_ifq);
1844 
1845 		int error = altq_detach(pif->pif_ifq);
1846 		switch (error) {
1847 		case 0:
1848 		case ENXIO:	/* already disabled */
1849 			break;
1850 		default:
1851 			return error;
1852 		}
1853 		jobs_detach(jif);
1854 	}
1855 
1856 	return error;
1857 }
1858 
1859 int
jobsioctl(dev_t dev,ioctlcmd_t cmd,void * addr,int flag,struct lwp * l)1860 jobsioctl(dev_t dev, ioctlcmd_t cmd, void *addr, int flag,
1861     struct lwp *l)
1862 {
1863 	struct jobs_if *jif;
1864 	struct jobs_interface *ifacep;
1865 	struct proc *p = l->l_proc;
1866 	int	error = 0;
1867 
1868 	/* check super-user privilege */
1869 	switch (cmd) {
1870 	case JOBS_GETSTATS:
1871 		break;
1872 	default:
1873 #if (__FreeBSD_version > 400000)
1874 		if ((error = suser(p)) != 0)
1875 			return (error);
1876 #else
1877 		if ((error = kauth_authorize_network(p->p_cred,
1878 		    KAUTH_NETWORK_ALTQ, KAUTH_REQ_NETWORK_ALTQ_JOBS, NULL,
1879 		    NULL, NULL)) != 0)
1880 			return (error);
1881 #endif
1882 		break;
1883 	}
1884 
1885 	switch (cmd) {
1886 
1887 	case JOBS_IF_ATTACH:
1888 		error = jobscmd_if_attach((struct jobs_attach *)addr);
1889 		break;
1890 
1891 	case JOBS_IF_DETACH:
1892 		error = jobscmd_if_detach((struct jobs_interface *)addr);
1893 		break;
1894 
1895 	case JOBS_ENABLE:
1896 	case JOBS_DISABLE:
1897 	case JOBS_CLEAR:
1898 		ifacep = (struct jobs_interface *)addr;
1899 		if ((jif = altq_lookup(ifacep->jobs_ifname,
1900 				       ALTQT_JOBS)) == NULL) {
1901 			error = EBADF;
1902 			break;
1903 		}
1904 
1905 		switch (cmd) {
1906 		case JOBS_ENABLE:
1907 			if (jif->jif_default == NULL) {
1908 #if 1
1909 				printf("jobs: no default class\n");
1910 #endif
1911 				error = EINVAL;
1912 				break;
1913 			}
1914 			error = altq_enable(jif->jif_ifq);
1915 			break;
1916 
1917 		case JOBS_DISABLE:
1918 			error = altq_disable(jif->jif_ifq);
1919 			break;
1920 
1921 		case JOBS_CLEAR:
1922 			jobs_clear_interface(jif);
1923 			break;
1924 		}
1925 		break;
1926 
1927 		case JOBS_ADD_CLASS:
1928 			error = jobscmd_add_class((struct jobs_add_class *)addr);
1929 			break;
1930 
1931 	case JOBS_DEL_CLASS:
1932 		error = jobscmd_delete_class((struct jobs_delete_class *)addr);
1933 		break;
1934 
1935 	case JOBS_MOD_CLASS:
1936 		error = jobscmd_modify_class((struct jobs_modify_class *)addr);
1937 		break;
1938 
1939 	case JOBS_ADD_FILTER:
1940 		error = jobscmd_add_filter((struct jobs_add_filter *)addr);
1941 		break;
1942 
1943 	case JOBS_DEL_FILTER:
1944 		error = jobscmd_delete_filter((struct jobs_delete_filter *)addr);
1945 		break;
1946 
1947 	case JOBS_GETSTATS:
1948 		error = jobscmd_class_stats((struct jobs_class_stats *)addr);
1949 		break;
1950 
1951 	default:
1952 		error = EINVAL;
1953 		break;
1954 	}
1955 	return error;
1956 }
1957 
1958 static int
jobscmd_if_attach(struct jobs_attach * ap)1959 jobscmd_if_attach(struct jobs_attach *ap)
1960 {
1961 	struct jobs_if *jif;
1962 	struct ifnet *ifp;
1963 	int error;
1964 
1965 	if ((ifp = ifunit(ap->iface.jobs_ifname)) == NULL)
1966 		return (ENXIO);
1967 	if ((jif = jobs_attach(&ifp->if_snd, ap->bandwidth, ap->qlimit, ap->separate)) == NULL)
1968 		return (ENOMEM);
1969 
1970 	/*
1971 	 * set JOBS to this ifnet structure.
1972 	 */
1973 	if ((error = altq_attach(&ifp->if_snd, ALTQT_JOBS, jif,
1974 				 jobs_enqueue, jobs_dequeue, jobs_request,
1975 				 &jif->jif_classifier, acc_classify)) != 0)
1976 		jobs_detach(jif);
1977 
1978 	return (error);
1979 }
1980 
1981 static int
jobscmd_if_detach(struct jobs_interface * ap)1982 jobscmd_if_detach(struct jobs_interface *ap)
1983 {
1984 	struct jobs_if *jif;
1985 	int error;
1986 
1987 	if ((jif = altq_lookup(ap->jobs_ifname, ALTQT_JOBS)) == NULL)
1988 		return (EBADF);
1989 
1990 	if (ALTQ_IS_ENABLED(jif->jif_ifq))
1991 		altq_disable(jif->jif_ifq);
1992 
1993 	if ((error = altq_detach(jif->jif_ifq)))
1994 		return (error);
1995 
1996 	jobs_detach(jif);
1997 	return 0;
1998 }
1999 
2000 static int
jobscmd_add_class(struct jobs_add_class * ap)2001 jobscmd_add_class(struct jobs_add_class *ap)
2002 {
2003 	struct jobs_if *jif;
2004 	struct jobs_class *cl;
2005 
2006 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
2007 		return (EBADF);
2008 
2009 	if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI)
2010 		return (EINVAL);
2011 
2012 	if ((cl = jobs_class_create(jif, ap->pri,
2013 				    ap->cl_adc, ap->cl_rdc,
2014 				    ap->cl_alc, ap->cl_rlc, ap-> cl_arc,
2015 				    ap->flags)) == NULL)
2016 		return (ENOMEM);
2017 
2018 	/* return a class handle to the user */
2019 	ap->class_handle = clp_to_clh(cl);
2020 	return (0);
2021 }
2022 
2023 static int
jobscmd_delete_class(struct jobs_delete_class * ap)2024 jobscmd_delete_class(struct jobs_delete_class *ap)
2025 {
2026 	struct jobs_if *jif;
2027 	struct jobs_class *cl;
2028 
2029 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
2030 		return (EBADF);
2031 
2032 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
2033 		return (EINVAL);
2034 
2035 	return jobs_class_destroy(cl);
2036 }
2037 
2038 static int
jobscmd_modify_class(struct jobs_modify_class * ap)2039 jobscmd_modify_class(struct jobs_modify_class *ap)
2040 {
2041 	struct jobs_if *jif;
2042 	struct jobs_class *cl;
2043 
2044 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
2045 		return (EBADF);
2046 
2047 	if (ap->pri < 0 || ap->pri >= JOBS_MAXPRI)
2048 		return (EINVAL);
2049 
2050 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
2051 		return (EINVAL);
2052 
2053 	/*
2054 	 * if priority is changed, move the class to the new priority
2055 	 */
2056 	if (jif->jif_classes[ap->pri] != cl) {
2057 		if (jif->jif_classes[ap->pri] != NULL)
2058 			return (EEXIST);
2059 		jif->jif_classes[cl->cl_pri] = NULL;
2060 		jif->jif_classes[ap->pri] = cl;
2061 		cl->cl_pri = ap->pri;
2062 	}
2063 
2064 	/* call jobs_class_create to change class parameters */
2065 	if ((cl = jobs_class_create(jif, ap->pri,
2066 				    ap->cl_adc, ap->cl_rdc,
2067 				    ap->cl_alc, ap->cl_rlc, ap->cl_arc,
2068 				    ap->flags)) == NULL)
2069 		return (ENOMEM);
2070 	return 0;
2071 }
2072 
2073 static int
jobscmd_add_filter(struct jobs_add_filter * ap)2074 jobscmd_add_filter(struct jobs_add_filter *ap)
2075 {
2076 	struct jobs_if *jif;
2077 	struct jobs_class *cl;
2078 
2079 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
2080 		return (EBADF);
2081 
2082 	if ((cl = clh_to_clp(jif, ap->class_handle)) == NULL)
2083 		return (EINVAL);
2084 
2085 	return acc_add_filter(&jif->jif_classifier, &ap->filter,
2086 			      cl, &ap->filter_handle);
2087 }
2088 
2089 static int
jobscmd_delete_filter(struct jobs_delete_filter * ap)2090 jobscmd_delete_filter(struct jobs_delete_filter *ap)
2091 {
2092 	struct jobs_if *jif;
2093 
2094 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
2095 		return (EBADF);
2096 
2097 	return acc_delete_filter(&jif->jif_classifier, ap->filter_handle);
2098 }
2099 
2100 static int
jobscmd_class_stats(struct jobs_class_stats * ap)2101 jobscmd_class_stats(struct jobs_class_stats *ap)
2102 {
2103 	struct jobs_if *jif;
2104 	struct jobs_class *cl;
2105 	struct class_stats stats, *usp;
2106 	int pri, error;
2107 
2108 	if ((jif = altq_lookup(ap->iface.jobs_ifname, ALTQT_JOBS)) == NULL)
2109 		return (EBADF);
2110 
2111 	ap->maxpri = jif->jif_maxpri;
2112 
2113 	/* then, read the next N classes */
2114 	usp = ap->stats;
2115 	for (pri = 0; pri <= jif->jif_maxpri; pri++) {
2116 		cl = jif->jif_classes[pri];
2117 		if (cl != NULL)
2118 			get_class_stats(&stats, cl);
2119 		else
2120 			(void)memset(&stats, 0, sizeof(stats));
2121 		if ((error = copyout((void *)&stats, (void *)usp++,
2122 				     sizeof(stats))) != 0)
2123 			return (error);
2124 	}
2125 	return (0);
2126 }
2127 
2128 static void
get_class_stats(struct class_stats * sp,struct jobs_class * cl)2129 get_class_stats(struct class_stats *sp, struct jobs_class *cl)
2130 {
2131 	u_int64_t now;
2132 	now = read_machclk();
2133 
2134 	sp->class_handle = clp_to_clh(cl);
2135 	sp->qlength = qlen(cl->cl_q);
2136 
2137 	sp->period = cl->cl_period;
2138 	sp->rin = cl->st_rin;
2139 	sp->arrival = cl->st_arrival;
2140 	sp->arrivalbusy = cl->cl_arrival;
2141 	sp->rout = cl->st_rout;
2142 	sp->dropcnt = cl->cl_dropcnt;
2143 
2144 	/*  PKTCNTR_RESET(&cl->st_arrival);*/
2145 	PKTCNTR_RESET(&cl->st_rin);
2146 	PKTCNTR_RESET(&cl->st_rout);
2147 
2148 	sp->totallength = cl->cl_jif->jif_ifq->ifq_len;
2149 	sp->lastdel = ticks_to_secs(GRANULARITY*cl->cl_lastdel);
2150 	sp->avgdel = cl->cl_avgdel;
2151 
2152 	cl->cl_avgdel = 0;
2153 
2154 	sp->busylength = ticks_to_secs(1000*delay_diff(now, cl->idletime));
2155 	sp->adc_violations = cl->adc_violations;
2156 
2157 	sp->wc_cycles_enqueue = cl->cl_jif->wc_cycles_enqueue;
2158 	sp->wc_cycles_dequeue = cl->cl_jif->wc_cycles_dequeue;
2159 	sp->bc_cycles_enqueue = cl->cl_jif->bc_cycles_enqueue;
2160 	sp->bc_cycles_dequeue = cl->cl_jif->bc_cycles_dequeue;
2161 	sp->avg_cycles_enqueue = cl->cl_jif->avg_cycles_enqueue;
2162 	sp->avg_cycles_dequeue = cl->cl_jif->avg_cycles_dequeue;
2163 	sp->avg_cycles2_enqueue = cl->cl_jif->avg_cycles2_enqueue;
2164 	sp->avg_cycles2_dequeue = cl->cl_jif->avg_cycles2_dequeue;
2165 	sp->total_enqueued = cl->cl_jif->total_enqueued;
2166 	sp->total_dequeued = cl->cl_jif->total_dequeued;
2167 }
2168 
2169 /* convert a class handle to the corresponding class pointer */
2170 static struct jobs_class *
clh_to_clp(struct jobs_if * jif,u_long chandle)2171 clh_to_clp(struct jobs_if *jif, u_long chandle)
2172 {
2173 	struct jobs_class *cl;
2174 
2175 	cl = (struct jobs_class *)chandle;
2176 	if (chandle != ALIGN(cl)) {
2177 #if 1
2178 		printf("clh_to_cl: unaligned pointer %p\n", cl);
2179 #endif
2180 		return (NULL);
2181 	}
2182 
2183 	if (cl == NULL || cl->cl_handle != chandle || cl->cl_jif != jif)
2184 		return (NULL);
2185 	return (cl);
2186 }
2187 
2188 /* convert a class pointer to the corresponding class handle */
2189 static u_long
clp_to_clh(struct jobs_class * cl)2190 clp_to_clh(struct jobs_class *cl)
2191 {
2192 	return (cl->cl_handle);
2193 }
2194 
2195 #ifdef KLD_MODULE
2196 
2197 static struct altqsw jobs_sw =
2198 	{"jobs", jobsopen, jobsclose, jobsioctl};
2199 
2200 ALTQ_MODULE(altq_jobs, ALTQT_JOBS, &jobs_sw);
2201 
2202 #endif /* KLD_MODULE */
2203 
2204 #endif /* ALTQ3_COMPAT */
2205 #endif /* ALTQ_JOBS */
2206