xref: /dragonfly/sys/net/altq/altq_cbq.c (revision 73610d44)
1 /*	$KAME: altq_cbq.c,v 1.20 2004/04/17 10:54:48 kjc Exp $	*/
2 /*	$DragonFly: src/sys/net/altq/altq_cbq.c,v 1.7 2008/05/14 11:59:23 sephe Exp $ */
3 
4 /*
5  * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the SMCC Technology
21  *      Development Group at Sun Microsystems, Inc.
22  *
23  * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
24  *      promote products derived from this software without specific prior
25  *      written permission.
26  *
27  * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
28  * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE.  The software is
29  * provided "as is" without express or implied warranty of any kind.
30  *
31  * These notices must be retained in any copies of any part of this software.
32  */
33 
34 #include "opt_altq.h"
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #ifdef ALTQ_CBQ	/* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
39 
40 #include <sys/param.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/callout.h>
47 #include <sys/errno.h>
48 #include <sys/time.h>
49 #include <sys/thread.h>
50 
51 #include <net/if.h>
52 #include <net/ifq_var.h>
53 #include <netinet/in.h>
54 
55 #include <net/pf/pfvar.h>
56 #include <net/altq/altq.h>
57 #include <net/altq/altq_cbq.h>
58 
59 #include <sys/thread2.h>
60 
61 #define CBQ_SUBQ_INDEX		ALTQ_SUBQ_INDEX_DEFAULT
62 #define CBQ_LOCK(ifq) \
63     ALTQ_SQ_LOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
64 #define CBQ_UNLOCK(ifq) \
65     ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
66 #define CBQ_ASSERT_LOCKED(ifq) \
67     ALTQ_SQ_ASSERT_LOCKED(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
68 
69 /*
70  * Forward Declarations.
71  */
72 static int		 cbq_class_destroy(cbq_state_t *, struct rm_class *);
73 static struct rm_class  *clh_to_clp(cbq_state_t *, uint32_t);
74 static int		 cbq_clear_interface(cbq_state_t *);
75 static int		 cbq_request(struct ifaltq_subque *, int, void *);
76 static int		 cbq_enqueue(struct ifaltq_subque *, struct mbuf *,
77 			     struct altq_pktattr *);
78 static struct mbuf	*cbq_dequeue(struct ifaltq_subque *, int);
79 static void		 cbqrestart(struct ifaltq *);
80 static void		 get_class_stats(class_stats_t *, struct rm_class *);
81 static void		 cbq_purge(cbq_state_t *);
82 
83 /*
84  * int
85  * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
86  *	function destroys a given traffic class.  Before destroying
87  *	the class, all traffic for that class is released.
88  */
89 static int
90 cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
91 {
92 	int	i;
93 
94 	/* delete the class */
95 	rmc_delete_class(&cbqp->ifnp, cl);
96 
97 	/*
98 	 * free the class handle
99 	 */
100 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
101 		if (cbqp->cbq_class_tbl[i] == cl)
102 			cbqp->cbq_class_tbl[i] = NULL;
103 
104 	if (cl == cbqp->ifnp.root_)
105 		cbqp->ifnp.root_ = NULL;
106 	if (cl == cbqp->ifnp.default_)
107 		cbqp->ifnp.default_ = NULL;
108 	return (0);
109 }
110 
111 /* convert class handle to class pointer */
112 static struct rm_class *
113 clh_to_clp(cbq_state_t *cbqp, uint32_t chandle)
114 {
115 	int i;
116 	struct rm_class *cl;
117 
118 	if (chandle == 0)
119 		return (NULL);
120 	/*
121 	 * first, try optimistically the slot matching the lower bits of
122 	 * the handle.  if it fails, do the linear table search.
123 	 */
124 	i = chandle % CBQ_MAX_CLASSES;
125 	if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
126 	    cl->stats_.handle == chandle)
127 		return (cl);
128 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
129 		if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
130 		    cl->stats_.handle == chandle)
131 			return (cl);
132 	return (NULL);
133 }
134 
135 static int
136 cbq_clear_interface(cbq_state_t *cbqp)
137 {
138 	int		 again, i;
139 	struct rm_class	*cl;
140 
141 	/* clear out the classes now */
142 	do {
143 		again = 0;
144 		for (i = 0; i < CBQ_MAX_CLASSES; i++) {
145 			if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
146 				if (is_a_parent_class(cl))
147 					again++;
148 				else {
149 					cbq_class_destroy(cbqp, cl);
150 					cbqp->cbq_class_tbl[i] = NULL;
151 					if (cl == cbqp->ifnp.root_)
152 						cbqp->ifnp.root_ = NULL;
153 					if (cl == cbqp->ifnp.default_)
154 						cbqp->ifnp.default_ = NULL;
155 				}
156 			}
157 		}
158 	} while (again);
159 
160 	return (0);
161 }
162 
163 static int
164 cbq_request(struct ifaltq_subque *ifsq, int req, void *arg)
165 {
166 	struct ifaltq *ifq = ifsq->ifsq_altq;
167 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
168 
169 	crit_enter();
170 	switch (req) {
171 	case ALTRQ_PURGE:
172 		if (ifsq_get_index(ifsq) == CBQ_SUBQ_INDEX) {
173 			cbq_purge(cbqp);
174 		} else {
175 			/*
176 			 * Race happened, the unrelated subqueue was
177 			 * picked during the packet scheduler transition.
178 			 */
179 			ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
180 		}
181 		break;
182 	}
183 	crit_exit();
184 	return (0);
185 }
186 
187 /* copy the stats info in rm_class to class_states_t */
188 static void
189 get_class_stats(class_stats_t *statsp, struct rm_class *cl)
190 {
191 	statsp->xmit_cnt	= cl->stats_.xmit_cnt;
192 	statsp->drop_cnt	= cl->stats_.drop_cnt;
193 	statsp->over		= cl->stats_.over;
194 	statsp->borrows		= cl->stats_.borrows;
195 	statsp->overactions	= cl->stats_.overactions;
196 	statsp->delays		= cl->stats_.delays;
197 
198 	statsp->depth		= cl->depth_;
199 	statsp->priority	= cl->pri_;
200 	statsp->maxidle		= cl->maxidle_;
201 	statsp->minidle		= cl->minidle_;
202 	statsp->offtime		= cl->offtime_;
203 	statsp->qmax		= qlimit(cl->q_);
204 	statsp->ns_per_byte	= cl->ns_per_byte_;
205 	statsp->wrr_allot	= cl->w_allotment_;
206 	statsp->qcnt		= qlen(cl->q_);
207 	statsp->avgidle		= cl->avgidle_;
208 
209 	statsp->qtype		= qtype(cl->q_);
210 #ifdef ALTQ_RED
211 	if (q_is_red(cl->q_))
212 		red_getstats(cl->red_, &statsp->red[0]);
213 #endif
214 #ifdef ALTQ_RIO
215 	if (q_is_rio(cl->q_))
216 		rio_getstats((rio_t *)cl->red_, &statsp->red[0]);
217 #endif
218 }
219 
220 int
221 cbq_pfattach(struct pf_altq *a, struct ifaltq *ifq)
222 {
223 	return altq_attach(ifq, ALTQT_CBQ, a->altq_disc, ifq_mapsubq_default,
224 	    cbq_enqueue, cbq_dequeue, cbq_request, NULL, NULL);
225 }
226 
227 int
228 cbq_add_altq(struct pf_altq *a)
229 {
230 	cbq_state_t	*cbqp;
231 	struct ifnet	*ifp;
232 
233 	ifnet_lock();
234 
235 	if ((ifp = ifunit(a->ifname)) == NULL) {
236 		ifnet_unlock();
237 		return (EINVAL);
238 	}
239 	if (!ifq_is_ready(&ifp->if_snd)) {
240 		ifnet_unlock();
241 		return (ENODEV);
242 	}
243 
244 	/* allocate and initialize cbq_state_t */
245 	cbqp = kmalloc(sizeof(*cbqp), M_ALTQ, M_WAITOK | M_ZERO);
246 	callout_init(&cbqp->cbq_callout);
247 	cbqp->cbq_qlen = 0;
248 	cbqp->ifnp.ifq_ = &ifp->if_snd;	    /* keep the ifq */
249 	ifq_purge_all(&ifp->if_snd);
250 
251 	ifnet_unlock();
252 
253 	/* keep the state in pf_altq */
254 	a->altq_disc = cbqp;
255 
256 	return (0);
257 }
258 
259 int
260 cbq_remove_altq(struct pf_altq *a)
261 {
262 	cbq_state_t	*cbqp;
263 
264 	if ((cbqp = a->altq_disc) == NULL)
265 		return (EINVAL);
266 	a->altq_disc = NULL;
267 
268 	cbq_clear_interface(cbqp);
269 
270 	if (cbqp->ifnp.default_)
271 		cbq_class_destroy(cbqp, cbqp->ifnp.default_);
272 	if (cbqp->ifnp.root_)
273 		cbq_class_destroy(cbqp, cbqp->ifnp.root_);
274 
275 	/* deallocate cbq_state_t */
276 	kfree(cbqp, M_ALTQ);
277 
278 	return (0);
279 }
280 
281 static int
282 cbq_add_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
283 {
284 	struct rm_class	*borrow, *parent;
285 	struct rm_class	*cl;
286 	struct cbq_opts	*opts;
287 	int		i;
288 
289 	KKASSERT(a->qid != 0);
290 
291 	/*
292 	 * find a free slot in the class table.  if the slot matching
293 	 * the lower bits of qid is free, use this slot.  otherwise,
294 	 * use the first free slot.
295 	 */
296 	i = a->qid % CBQ_MAX_CLASSES;
297 	if (cbqp->cbq_class_tbl[i] != NULL) {
298 		for (i = 0; i < CBQ_MAX_CLASSES; i++)
299 			if (cbqp->cbq_class_tbl[i] == NULL)
300 				break;
301 		if (i == CBQ_MAX_CLASSES)
302 			return (EINVAL);
303 	}
304 
305 	opts = &a->pq_u.cbq_opts;
306 	/* check parameters */
307 	if (a->priority >= CBQ_MAXPRI)
308 		return (EINVAL);
309 
310 	/* Get pointers to parent and borrow classes.  */
311 	parent = clh_to_clp(cbqp, a->parent_qid);
312 	if (opts->flags & CBQCLF_BORROW)
313 		borrow = parent;
314 	else
315 		borrow = NULL;
316 
317 	/*
318 	 * A class must borrow from it's parent or it can not
319 	 * borrow at all.  Hence, borrow can be null.
320 	 */
321 	if (parent == NULL && (opts->flags & CBQCLF_ROOTCLASS) == 0) {
322 		kprintf("cbq_add_queue: no parent class!\n");
323 		return (EINVAL);
324 	}
325 
326 	if ((borrow != parent)  && (borrow != NULL)) {
327 		kprintf("cbq_add_class: borrow class != parent\n");
328 		return (EINVAL);
329 	}
330 
331 	/*
332 	 * check parameters
333 	 */
334 	switch (opts->flags & CBQCLF_CLASSMASK) {
335 	case CBQCLF_ROOTCLASS:
336 		if (parent != NULL)
337 			return (EINVAL);
338 		if (cbqp->ifnp.root_)
339 			return (EINVAL);
340 		break;
341 	case CBQCLF_DEFCLASS:
342 		if (cbqp->ifnp.default_)
343 			return (EINVAL);
344 		break;
345 	case 0:
346 		if (a->qid == 0)
347 			return (EINVAL);
348 		break;
349 	default:
350 		/* more than two flags bits set */
351 		return (EINVAL);
352 	}
353 
354 	/*
355 	 * create a class.  if this is a root class, initialize the
356 	 * interface.
357 	 */
358 	if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
359 		rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, opts->ns_per_byte,
360 		    cbqrestart, a->qlimit, RM_MAXQUEUED,
361 		    opts->maxidle, opts->minidle, opts->offtime,
362 		    opts->flags);
363 		cl = cbqp->ifnp.root_;
364 	} else {
365 		cl = rmc_newclass(a->priority,
366 				  &cbqp->ifnp, opts->ns_per_byte,
367 				  rmc_delay_action, a->qlimit, parent, borrow,
368 				  opts->maxidle, opts->minidle, opts->offtime,
369 				  opts->pktsize, opts->flags);
370 	}
371 	if (cl == NULL)
372 		return (ENOMEM);
373 
374 	/* return handle to user space. */
375 	cl->stats_.handle = a->qid;
376 	cl->stats_.depth = cl->depth_;
377 
378 	/* save the allocated class */
379 	cbqp->cbq_class_tbl[i] = cl;
380 
381 	if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
382 		cbqp->ifnp.default_ = cl;
383 
384 	return (0);
385 }
386 
387 int
388 cbq_add_queue(struct pf_altq *a)
389 {
390 	cbq_state_t *cbqp;
391 	struct ifaltq *ifq;
392 	int error;
393 
394 	if (a->qid == 0)
395 		return (EINVAL);
396 
397 	/* XXX not MP safe */
398 	if ((cbqp = a->altq_disc) == NULL)
399 		return (EINVAL);
400 	ifq = cbqp->ifnp.ifq_;
401 
402 	CBQ_LOCK(ifq);
403 	error = cbq_add_queue_locked(a, cbqp);
404 	CBQ_UNLOCK(ifq);
405 
406 	return error;
407 }
408 
409 static int
410 cbq_remove_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
411 {
412 	struct rm_class	*cl;
413 	int		i;
414 
415 	if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
416 		return (EINVAL);
417 
418 	/* if we are a parent class, then return an error. */
419 	if (is_a_parent_class(cl))
420 		return (EINVAL);
421 
422 	/* delete the class */
423 	rmc_delete_class(&cbqp->ifnp, cl);
424 
425 	/*
426 	 * free the class handle
427 	 */
428 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
429 		if (cbqp->cbq_class_tbl[i] == cl) {
430 			cbqp->cbq_class_tbl[i] = NULL;
431 			if (cl == cbqp->ifnp.root_)
432 				cbqp->ifnp.root_ = NULL;
433 			if (cl == cbqp->ifnp.default_)
434 				cbqp->ifnp.default_ = NULL;
435 			break;
436 		}
437 
438 	return (0);
439 }
440 
441 int
442 cbq_remove_queue(struct pf_altq *a)
443 {
444 	cbq_state_t *cbqp;
445 	struct ifaltq *ifq;
446 	int error;
447 
448 	/* XXX not MP safe */
449 	if ((cbqp = a->altq_disc) == NULL)
450 		return (EINVAL);
451 	ifq = cbqp->ifnp.ifq_;
452 
453 	CBQ_LOCK(ifq);
454 	error = cbq_remove_queue_locked(a, cbqp);
455 	CBQ_UNLOCK(ifq);
456 
457 	return error;
458 }
459 
460 int
461 cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
462 {
463 	cbq_state_t	*cbqp;
464 	struct rm_class	*cl;
465 	class_stats_t	 stats;
466 	int		 error = 0;
467 	struct ifaltq 	*ifq;
468 
469 	if (*nbytes < sizeof(stats))
470 		return (EINVAL);
471 
472 	ifnet_lock();
473 
474 	/* XXX not MP safe */
475 	if ((cbqp = altq_lookup(a->ifname, ALTQT_CBQ)) == NULL) {
476 		ifnet_unlock();
477 		return (EBADF);
478 	}
479 	ifq = cbqp->ifnp.ifq_;
480 
481 	CBQ_LOCK(ifq);
482 
483 	if ((cl = clh_to_clp(cbqp, a->qid)) == NULL) {
484 		CBQ_UNLOCK(ifq);
485 		ifnet_unlock();
486 		return (EINVAL);
487 	}
488 
489 	get_class_stats(&stats, cl);
490 
491 	CBQ_UNLOCK(ifq);
492 
493 	ifnet_unlock();
494 
495 	if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
496 		return (error);
497 	*nbytes = sizeof(stats);
498 	return (0);
499 }
500 
501 /*
502  * int
503  * cbq_enqueue(struct ifaltq_subqueue *ifq, struct mbuf *m,
504  *     struct altq_pktattr *pattr)
505  *		- Queue data packets.
506  *
507  *	cbq_enqueue is set to ifp->if_altqenqueue and called by an upper
508  *	layer (e.g. ether_output).  cbq_enqueue queues the given packet
509  *	to the cbq, then invokes the driver's start routine.
510  *
511  *	Returns:	0 if the queueing is successful.
512  *			ENOBUFS if a packet dropping occurred as a result of
513  *			the queueing.
514  */
515 
516 static int
517 cbq_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
518     struct altq_pktattr *pktattr __unused)
519 {
520 	struct ifaltq *ifq = ifsq->ifsq_altq;
521 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
522 	struct rm_class	*cl;
523 	int len;
524 
525 	if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
526 		/*
527 		 * Race happened, the unrelated subqueue was
528 		 * picked during the packet scheduler transition.
529 		 */
530 		ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
531 		m_freem(m);
532 		return (ENOBUFS);
533 	}
534 
535 	/* grab class set by classifier */
536 	M_ASSERTPKTHDR(m);
537 	if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
538 		cl = clh_to_clp(cbqp, m->m_pkthdr.pf.qid);
539 	else
540 		cl = NULL;
541 	if (cl == NULL) {
542 		cl = cbqp->ifnp.default_;
543 		if (cl == NULL) {
544 			m_freem(m);
545 			return (ENOBUFS);
546 		}
547 	}
548 	crit_enter();
549 	cl->pktattr_ = NULL;
550 	len = m_pktlen(m);
551 	if (rmc_queue_packet(cl, m) != 0) {
552 		/* drop occurred.  some mbuf was freed in rmc_queue_packet. */
553 		PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
554 		crit_exit();
555 		return (ENOBUFS);
556 	}
557 
558 	/* successfully queued. */
559 	++cbqp->cbq_qlen;
560 	ALTQ_SQ_PKTCNT_INC(ifsq);
561 	crit_exit();
562 	return (0);
563 }
564 
565 static struct mbuf *
566 cbq_dequeue(struct ifaltq_subque *ifsq, int op)
567 {
568 	struct ifaltq *ifq = ifsq->ifsq_altq;
569 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
570 	struct mbuf	*m;
571 
572 	if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
573 		/*
574 		 * Race happened, the unrelated subqueue was
575 		 * picked during the packet scheduler transition.
576 		 */
577 		ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
578 		return NULL;
579 	}
580 
581 	crit_enter();
582 	m = rmc_dequeue_next(&cbqp->ifnp, op);
583 
584 	if (m && op == ALTDQ_REMOVE) {
585 		--cbqp->cbq_qlen;  /* decrement # of packets in cbq */
586 		ALTQ_SQ_PKTCNT_DEC(ifsq);
587 
588 		/* Update the class. */
589 		rmc_update_class_util(&cbqp->ifnp);
590 	}
591 	crit_exit();
592 	return (m);
593 }
594 
595 /*
596  * void
597  * cbqrestart(queue_t *) - Restart sending of data.
598  * called from rmc_restart in a critical section via timeout after waking up
599  * a suspended class.
600  *	Returns:	NONE
601  */
602 
603 static void
604 cbqrestart(struct ifaltq *ifq)
605 {
606 	cbq_state_t	*cbqp;
607 
608 	CBQ_ASSERT_LOCKED(ifq);
609 
610 	if (!ifq_is_enabled(ifq))
611 		/* cbq must have been detached */
612 		return;
613 
614 	if ((cbqp = (cbq_state_t *)ifq->altq_disc) == NULL)
615 		/* should not happen */
616 		return;
617 
618 	if (cbqp->cbq_qlen > 0) {
619 		struct ifnet *ifp = ifq->altq_ifp;
620 		struct ifaltq_subque *ifsq = &ifq->altq_subq[CBQ_SUBQ_INDEX];
621 
622 		/* Release the altq lock to avoid deadlock */
623 		CBQ_UNLOCK(ifq);
624 
625 		ifsq_serialize_hw(ifsq);
626 		if (ifp->if_start && !ifsq_is_oactive(ifsq))
627 			(*ifp->if_start)(ifp, ifsq);
628 		ifsq_deserialize_hw(ifsq);
629 
630 		CBQ_LOCK(ifq);
631 	}
632 }
633 
634 static void
635 cbq_purge(cbq_state_t *cbqp)
636 {
637 	struct rm_class	*cl;
638 	int i;
639 	for (i = 0; i < CBQ_MAX_CLASSES; i++) {
640 		if ((cl = cbqp->cbq_class_tbl[i]) != NULL)
641 			rmc_dropall(cl);
642 	}
643 	if (ifq_is_enabled(cbqp->ifnp.ifq_))
644 		ALTQ_SQ_CNTR_RESET(&cbqp->ifnp.ifq_->altq_subq[CBQ_SUBQ_INDEX]);
645 }
646 
647 #endif /* ALTQ_CBQ */
648