xref: /dragonfly/sys/net/altq/altq_cbq.c (revision cfd1aba3)
1 /*	$KAME: altq_cbq.c,v 1.20 2004/04/17 10:54:48 kjc Exp $	*/
2 /*	$DragonFly: src/sys/net/altq/altq_cbq.c,v 1.7 2008/05/14 11:59:23 sephe Exp $ */
3 
4 /*
5  * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the SMCC Technology
21  *      Development Group at Sun Microsystems, Inc.
22  *
23  * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
24  *      promote products derived from this software without specific prior
25  *      written permission.
26  *
27  * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
28  * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE.  The software is
29  * provided "as is" without express or implied warranty of any kind.
30  *
31  * These notices must be retained in any copies of any part of this software.
32  */
33 
34 #include "opt_altq.h"
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #ifdef ALTQ_CBQ	/* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
39 
40 #include <sys/param.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/callout.h>
47 #include <sys/errno.h>
48 #include <sys/time.h>
49 #include <sys/thread.h>
50 
51 #include <net/if.h>
52 #include <net/ifq_var.h>
53 #include <netinet/in.h>
54 
55 #include <net/pf/pfvar.h>
56 #include <net/altq/altq.h>
57 #include <net/altq/altq_cbq.h>
58 
59 #include <sys/thread2.h>
60 
61 #define CBQ_SUBQ_INDEX		ALTQ_SUBQ_INDEX_DEFAULT
62 #define CBQ_LOCK(ifq) \
63     ALTQ_SQ_LOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
64 #define CBQ_UNLOCK(ifq) \
65     ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
66 #define CBQ_ASSERT_LOCKED(ifq) \
67     ALTQ_SQ_ASSERT_LOCKED(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
68 
69 /*
70  * Forward Declarations.
71  */
72 static int		 cbq_class_destroy(cbq_state_t *, struct rm_class *);
73 static struct rm_class  *clh_to_clp(cbq_state_t *, uint32_t);
74 static int		 cbq_clear_interface(cbq_state_t *);
75 static int		 cbq_request(struct ifaltq_subque *, int, void *);
76 static int		 cbq_enqueue(struct ifaltq_subque *, struct mbuf *,
77 			     struct altq_pktattr *);
78 static struct mbuf	*cbq_dequeue(struct ifaltq_subque *, int);
79 static void		 cbqrestart(struct ifaltq *);
80 static void		 get_class_stats(class_stats_t *, struct rm_class *);
81 static void		 cbq_purge(cbq_state_t *);
82 
83 /*
84  * int
85  * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
86  *	function destroys a given traffic class.  Before destroying
87  *	the class, all traffic for that class is released.
88  */
89 static int
90 cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
91 {
92 	int	i;
93 
94 	/* delete the class */
95 	rmc_delete_class(&cbqp->ifnp, cl);
96 
97 	/*
98 	 * free the class handle
99 	 */
100 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
101 		if (cbqp->cbq_class_tbl[i] == cl)
102 			cbqp->cbq_class_tbl[i] = NULL;
103 
104 	if (cl == cbqp->ifnp.root_)
105 		cbqp->ifnp.root_ = NULL;
106 	if (cl == cbqp->ifnp.default_)
107 		cbqp->ifnp.default_ = NULL;
108 	return (0);
109 }
110 
111 /* convert class handle to class pointer */
112 static struct rm_class *
113 clh_to_clp(cbq_state_t *cbqp, uint32_t chandle)
114 {
115 	int i;
116 	struct rm_class *cl;
117 
118 	if (chandle == 0)
119 		return (NULL);
120 	/*
121 	 * first, try optimistically the slot matching the lower bits of
122 	 * the handle.  if it fails, do the linear table search.
123 	 */
124 	i = chandle % CBQ_MAX_CLASSES;
125 	if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
126 	    cl->stats_.handle == chandle)
127 		return (cl);
128 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
129 		if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
130 		    cl->stats_.handle == chandle)
131 			return (cl);
132 	return (NULL);
133 }
134 
135 static int
136 cbq_clear_interface(cbq_state_t *cbqp)
137 {
138 	int		 again, i;
139 	struct rm_class	*cl;
140 
141 	/* clear out the classes now */
142 	do {
143 		again = 0;
144 		for (i = 0; i < CBQ_MAX_CLASSES; i++) {
145 			if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
146 				if (is_a_parent_class(cl))
147 					again++;
148 				else {
149 					cbq_class_destroy(cbqp, cl);
150 					cbqp->cbq_class_tbl[i] = NULL;
151 					if (cl == cbqp->ifnp.root_)
152 						cbqp->ifnp.root_ = NULL;
153 					if (cl == cbqp->ifnp.default_)
154 						cbqp->ifnp.default_ = NULL;
155 				}
156 			}
157 		}
158 	} while (again);
159 
160 	return (0);
161 }
162 
163 static int
164 cbq_request(struct ifaltq_subque *ifsq, int req, void *arg)
165 {
166 	struct ifaltq *ifq = ifsq->ifsq_altq;
167 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
168 
169 	crit_enter();
170 	switch (req) {
171 	case ALTRQ_PURGE:
172 		if (ifsq_get_index(ifsq) == CBQ_SUBQ_INDEX) {
173 			cbq_purge(cbqp);
174 		} else {
175 			/*
176 			 * Race happened, the unrelated subqueue was
177 			 * picked during the packet scheduler transition.
178 			 */
179 			ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
180 		}
181 		break;
182 	}
183 	crit_exit();
184 	return (0);
185 }
186 
187 /* copy the stats info in rm_class to class_states_t */
188 static void
189 get_class_stats(class_stats_t *statsp, struct rm_class *cl)
190 {
191 	statsp->xmit_cnt	= cl->stats_.xmit_cnt;
192 	statsp->drop_cnt	= cl->stats_.drop_cnt;
193 	statsp->over		= cl->stats_.over;
194 	statsp->borrows		= cl->stats_.borrows;
195 	statsp->overactions	= cl->stats_.overactions;
196 	statsp->delays		= cl->stats_.delays;
197 
198 	statsp->depth		= cl->depth_;
199 	statsp->priority	= cl->pri_;
200 	statsp->maxidle		= cl->maxidle_;
201 	statsp->minidle		= cl->minidle_;
202 	statsp->offtime		= cl->offtime_;
203 	statsp->qmax		= qlimit(cl->q_);
204 	statsp->ns_per_byte	= cl->ns_per_byte_;
205 	statsp->wrr_allot	= cl->w_allotment_;
206 	statsp->qcnt		= qlen(cl->q_);
207 	statsp->avgidle		= cl->avgidle_;
208 
209 	statsp->qtype		= qtype(cl->q_);
210 #ifdef ALTQ_RED
211 	if (q_is_red(cl->q_))
212 		red_getstats(cl->red_, &statsp->red[0]);
213 #endif
214 #ifdef ALTQ_RIO
215 	if (q_is_rio(cl->q_))
216 		rio_getstats((rio_t *)cl->red_, &statsp->red[0]);
217 #endif
218 }
219 
220 int
221 cbq_pfattach(struct pf_altq *a, struct ifaltq *ifq)
222 {
223 	return altq_attach(ifq, ALTQT_CBQ, a->altq_disc, ifq_mapsubq_default,
224 	    cbq_enqueue, cbq_dequeue, cbq_request, NULL, NULL);
225 }
226 
227 int
228 cbq_add_altq(struct pf_altq *a)
229 {
230 	cbq_state_t	*cbqp;
231 	struct ifnet	*ifp;
232 
233 	if ((ifp = ifunit(a->ifname)) == NULL)
234 		return (EINVAL);
235 	if (!ifq_is_ready(&ifp->if_snd))
236 		return (ENODEV);
237 
238 	/* allocate and initialize cbq_state_t */
239 	cbqp = kmalloc(sizeof(*cbqp), M_ALTQ, M_WAITOK | M_ZERO);
240 	callout_init(&cbqp->cbq_callout);
241 	cbqp->cbq_qlen = 0;
242 	cbqp->ifnp.ifq_ = &ifp->if_snd;	    /* keep the ifq */
243 	ifq_purge_all(&ifp->if_snd);
244 
245 	/* keep the state in pf_altq */
246 	a->altq_disc = cbqp;
247 
248 	return (0);
249 }
250 
251 int
252 cbq_remove_altq(struct pf_altq *a)
253 {
254 	cbq_state_t	*cbqp;
255 
256 	if ((cbqp = a->altq_disc) == NULL)
257 		return (EINVAL);
258 	a->altq_disc = NULL;
259 
260 	cbq_clear_interface(cbqp);
261 
262 	if (cbqp->ifnp.default_)
263 		cbq_class_destroy(cbqp, cbqp->ifnp.default_);
264 	if (cbqp->ifnp.root_)
265 		cbq_class_destroy(cbqp, cbqp->ifnp.root_);
266 
267 	/* deallocate cbq_state_t */
268 	kfree(cbqp, M_ALTQ);
269 
270 	return (0);
271 }
272 
273 static int
274 cbq_add_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
275 {
276 	struct rm_class	*borrow, *parent;
277 	struct rm_class	*cl;
278 	struct cbq_opts	*opts;
279 	int		i;
280 
281 	KKASSERT(a->qid != 0);
282 
283 	/*
284 	 * find a free slot in the class table.  if the slot matching
285 	 * the lower bits of qid is free, use this slot.  otherwise,
286 	 * use the first free slot.
287 	 */
288 	i = a->qid % CBQ_MAX_CLASSES;
289 	if (cbqp->cbq_class_tbl[i] != NULL) {
290 		for (i = 0; i < CBQ_MAX_CLASSES; i++)
291 			if (cbqp->cbq_class_tbl[i] == NULL)
292 				break;
293 		if (i == CBQ_MAX_CLASSES)
294 			return (EINVAL);
295 	}
296 
297 	opts = &a->pq_u.cbq_opts;
298 	/* check parameters */
299 	if (a->priority >= CBQ_MAXPRI)
300 		return (EINVAL);
301 
302 	/* Get pointers to parent and borrow classes.  */
303 	parent = clh_to_clp(cbqp, a->parent_qid);
304 	if (opts->flags & CBQCLF_BORROW)
305 		borrow = parent;
306 	else
307 		borrow = NULL;
308 
309 	/*
310 	 * A class must borrow from it's parent or it can not
311 	 * borrow at all.  Hence, borrow can be null.
312 	 */
313 	if (parent == NULL && (opts->flags & CBQCLF_ROOTCLASS) == 0) {
314 		kprintf("cbq_add_queue: no parent class!\n");
315 		return (EINVAL);
316 	}
317 
318 	if ((borrow != parent)  && (borrow != NULL)) {
319 		kprintf("cbq_add_class: borrow class != parent\n");
320 		return (EINVAL);
321 	}
322 
323 	/*
324 	 * check parameters
325 	 */
326 	switch (opts->flags & CBQCLF_CLASSMASK) {
327 	case CBQCLF_ROOTCLASS:
328 		if (parent != NULL)
329 			return (EINVAL);
330 		if (cbqp->ifnp.root_)
331 			return (EINVAL);
332 		break;
333 	case CBQCLF_DEFCLASS:
334 		if (cbqp->ifnp.default_)
335 			return (EINVAL);
336 		break;
337 	case 0:
338 		if (a->qid == 0)
339 			return (EINVAL);
340 		break;
341 	default:
342 		/* more than two flags bits set */
343 		return (EINVAL);
344 	}
345 
346 	/*
347 	 * create a class.  if this is a root class, initialize the
348 	 * interface.
349 	 */
350 	if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
351 		rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, opts->ns_per_byte,
352 		    cbqrestart, a->qlimit, RM_MAXQUEUED,
353 		    opts->maxidle, opts->minidle, opts->offtime,
354 		    opts->flags);
355 		cl = cbqp->ifnp.root_;
356 	} else {
357 		cl = rmc_newclass(a->priority,
358 				  &cbqp->ifnp, opts->ns_per_byte,
359 				  rmc_delay_action, a->qlimit, parent, borrow,
360 				  opts->maxidle, opts->minidle, opts->offtime,
361 				  opts->pktsize, opts->flags);
362 	}
363 	if (cl == NULL)
364 		return (ENOMEM);
365 
366 	/* return handle to user space. */
367 	cl->stats_.handle = a->qid;
368 	cl->stats_.depth = cl->depth_;
369 
370 	/* save the allocated class */
371 	cbqp->cbq_class_tbl[i] = cl;
372 
373 	if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
374 		cbqp->ifnp.default_ = cl;
375 
376 	return (0);
377 }
378 
379 int
380 cbq_add_queue(struct pf_altq *a)
381 {
382 	cbq_state_t *cbqp;
383 	struct ifaltq *ifq;
384 	int error;
385 
386 	if (a->qid == 0)
387 		return (EINVAL);
388 
389 	/* XXX not MP safe */
390 	if ((cbqp = a->altq_disc) == NULL)
391 		return (EINVAL);
392 	ifq = cbqp->ifnp.ifq_;
393 
394 	CBQ_LOCK(ifq);
395 	error = cbq_add_queue_locked(a, cbqp);
396 	CBQ_UNLOCK(ifq);
397 
398 	return error;
399 }
400 
401 static int
402 cbq_remove_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
403 {
404 	struct rm_class	*cl;
405 	int		i;
406 
407 	if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
408 		return (EINVAL);
409 
410 	/* if we are a parent class, then return an error. */
411 	if (is_a_parent_class(cl))
412 		return (EINVAL);
413 
414 	/* delete the class */
415 	rmc_delete_class(&cbqp->ifnp, cl);
416 
417 	/*
418 	 * free the class handle
419 	 */
420 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
421 		if (cbqp->cbq_class_tbl[i] == cl) {
422 			cbqp->cbq_class_tbl[i] = NULL;
423 			if (cl == cbqp->ifnp.root_)
424 				cbqp->ifnp.root_ = NULL;
425 			if (cl == cbqp->ifnp.default_)
426 				cbqp->ifnp.default_ = NULL;
427 			break;
428 		}
429 
430 	return (0);
431 }
432 
433 int
434 cbq_remove_queue(struct pf_altq *a)
435 {
436 	cbq_state_t *cbqp;
437 	struct ifaltq *ifq;
438 	int error;
439 
440 	/* XXX not MP safe */
441 	if ((cbqp = a->altq_disc) == NULL)
442 		return (EINVAL);
443 	ifq = cbqp->ifnp.ifq_;
444 
445 	CBQ_LOCK(ifq);
446 	error = cbq_remove_queue_locked(a, cbqp);
447 	CBQ_UNLOCK(ifq);
448 
449 	return error;
450 }
451 
452 int
453 cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
454 {
455 	cbq_state_t	*cbqp;
456 	struct rm_class	*cl;
457 	class_stats_t	 stats;
458 	int		 error = 0;
459 	struct ifaltq 	*ifq;
460 
461 	if (*nbytes < sizeof(stats))
462 		return (EINVAL);
463 
464 	/* XXX not MP safe */
465 	if ((cbqp = altq_lookup(a->ifname, ALTQT_CBQ)) == NULL)
466 		return (EBADF);
467 	ifq = cbqp->ifnp.ifq_;
468 
469 	CBQ_LOCK(ifq);
470 
471 	if ((cl = clh_to_clp(cbqp, a->qid)) == NULL) {
472 		CBQ_UNLOCK(ifq);
473 		return (EINVAL);
474 	}
475 
476 	get_class_stats(&stats, cl);
477 
478 	CBQ_UNLOCK(ifq);
479 
480 	if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
481 		return (error);
482 	*nbytes = sizeof(stats);
483 	return (0);
484 }
485 
486 /*
487  * int
488  * cbq_enqueue(struct ifaltq_subqueue *ifq, struct mbuf *m,
489  *     struct altq_pktattr *pattr)
490  *		- Queue data packets.
491  *
492  *	cbq_enqueue is set to ifp->if_altqenqueue and called by an upper
493  *	layer (e.g. ether_output).  cbq_enqueue queues the given packet
494  *	to the cbq, then invokes the driver's start routine.
495  *
496  *	Returns:	0 if the queueing is successful.
497  *			ENOBUFS if a packet dropping occurred as a result of
498  *			the queueing.
499  */
500 
501 static int
502 cbq_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
503     struct altq_pktattr *pktattr __unused)
504 {
505 	struct ifaltq *ifq = ifsq->ifsq_altq;
506 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
507 	struct rm_class	*cl;
508 	int len;
509 
510 	if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
511 		/*
512 		 * Race happened, the unrelated subqueue was
513 		 * picked during the packet scheduler transition.
514 		 */
515 		ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
516 		m_freem(m);
517 		return (ENOBUFS);
518 	}
519 
520 	/* grab class set by classifier */
521 	M_ASSERTPKTHDR(m);
522 	if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
523 		cl = clh_to_clp(cbqp, m->m_pkthdr.pf.qid);
524 	else
525 		cl = NULL;
526 	if (cl == NULL) {
527 		cl = cbqp->ifnp.default_;
528 		if (cl == NULL) {
529 			m_freem(m);
530 			return (ENOBUFS);
531 		}
532 	}
533 	crit_enter();
534 	cl->pktattr_ = NULL;
535 	len = m_pktlen(m);
536 	if (rmc_queue_packet(cl, m) != 0) {
537 		/* drop occurred.  some mbuf was freed in rmc_queue_packet. */
538 		PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
539 		crit_exit();
540 		return (ENOBUFS);
541 	}
542 
543 	/* successfully queued. */
544 	++cbqp->cbq_qlen;
545 	ALTQ_SQ_PKTCNT_INC(ifsq);
546 	crit_exit();
547 	return (0);
548 }
549 
550 static struct mbuf *
551 cbq_dequeue(struct ifaltq_subque *ifsq, int op)
552 {
553 	struct ifaltq *ifq = ifsq->ifsq_altq;
554 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
555 	struct mbuf	*m;
556 
557 	if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
558 		/*
559 		 * Race happened, the unrelated subqueue was
560 		 * picked during the packet scheduler transition.
561 		 */
562 		ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
563 		return NULL;
564 	}
565 
566 	crit_enter();
567 	m = rmc_dequeue_next(&cbqp->ifnp, op);
568 
569 	if (m && op == ALTDQ_REMOVE) {
570 		--cbqp->cbq_qlen;  /* decrement # of packets in cbq */
571 		ALTQ_SQ_PKTCNT_DEC(ifsq);
572 
573 		/* Update the class. */
574 		rmc_update_class_util(&cbqp->ifnp);
575 	}
576 	crit_exit();
577 	return (m);
578 }
579 
580 /*
581  * void
582  * cbqrestart(queue_t *) - Restart sending of data.
583  * called from rmc_restart in a critical section via timeout after waking up
584  * a suspended class.
585  *	Returns:	NONE
586  */
587 
588 static void
589 cbqrestart(struct ifaltq *ifq)
590 {
591 	cbq_state_t	*cbqp;
592 
593 	CBQ_ASSERT_LOCKED(ifq);
594 
595 	if (!ifq_is_enabled(ifq))
596 		/* cbq must have been detached */
597 		return;
598 
599 	if ((cbqp = (cbq_state_t *)ifq->altq_disc) == NULL)
600 		/* should not happen */
601 		return;
602 
603 	if (cbqp->cbq_qlen > 0) {
604 		struct ifnet *ifp = ifq->altq_ifp;
605 		struct ifaltq_subque *ifsq = &ifq->altq_subq[CBQ_SUBQ_INDEX];
606 
607 		/* Release the altq lock to avoid deadlock */
608 		CBQ_UNLOCK(ifq);
609 
610 		ifsq_serialize_hw(ifsq);
611 		if (ifp->if_start && !ifsq_is_oactive(ifsq))
612 			(*ifp->if_start)(ifp, ifsq);
613 		ifsq_deserialize_hw(ifsq);
614 
615 		CBQ_LOCK(ifq);
616 	}
617 }
618 
619 static void
620 cbq_purge(cbq_state_t *cbqp)
621 {
622 	struct rm_class	*cl;
623 	int i;
624 	for (i = 0; i < CBQ_MAX_CLASSES; i++) {
625 		if ((cl = cbqp->cbq_class_tbl[i]) != NULL)
626 			rmc_dropall(cl);
627 	}
628 	if (ifq_is_enabled(cbqp->ifnp.ifq_))
629 		ALTQ_SQ_CNTR_RESET(&cbqp->ifnp.ifq_->altq_subq[CBQ_SUBQ_INDEX]);
630 }
631 
632 #endif /* ALTQ_CBQ */
633