xref: /dragonfly/sys/net/altq/altq_cbq.c (revision 6700dd34)
1 /*	$KAME: altq_cbq.c,v 1.20 2004/04/17 10:54:48 kjc Exp $	*/
2 /*	$DragonFly: src/sys/net/altq/altq_cbq.c,v 1.7 2008/05/14 11:59:23 sephe Exp $ */
3 
4 /*
5  * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the SMCC Technology
21  *      Development Group at Sun Microsystems, Inc.
22  *
23  * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
24  *      promote products derived from this software without specific prior
25  *      written permission.
26  *
27  * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
28  * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE.  The software is
29  * provided "as is" without express or implied warranty of any kind.
30  *
31  * These notices must be retained in any copies of any part of this software.
32  */
33 
34 #include "opt_altq.h"
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #ifdef ALTQ_CBQ	/* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
39 
40 #include <sys/param.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/callout.h>
47 #include <sys/errno.h>
48 #include <sys/time.h>
49 #include <sys/thread.h>
50 
51 #include <net/if.h>
52 #include <net/ifq_var.h>
53 #include <net/netmsg2.h>
54 #include <net/netisr2.h>
55 #include <netinet/in.h>
56 
57 #include <net/pf/pfvar.h>
58 #include <net/altq/altq.h>
59 #include <net/altq/altq_cbq.h>
60 
61 #include <sys/thread2.h>
62 
63 #define CBQ_SUBQ_INDEX		ALTQ_SUBQ_INDEX_DEFAULT
64 #define CBQ_LOCK(ifq) \
65     ALTQ_SQ_LOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
66 #define CBQ_UNLOCK(ifq) \
67     ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
68 #define CBQ_ASSERT_LOCKED(ifq) \
69     ALTQ_SQ_ASSERT_LOCKED(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
70 
71 /*
72  * Forward Declarations.
73  */
74 static int		 cbq_class_destroy(cbq_state_t *, struct rm_class *);
75 static struct rm_class  *clh_to_clp(cbq_state_t *, uint32_t);
76 static int		 cbq_clear_interface(cbq_state_t *);
77 static int		 cbq_request(struct ifaltq_subque *, int, void *);
78 static int		 cbq_enqueue(struct ifaltq_subque *, struct mbuf *,
79 			     struct altq_pktattr *);
80 static struct mbuf	*cbq_dequeue(struct ifaltq_subque *, int);
81 static void		 cbqrestart(struct ifaltq *);
82 static void		 get_class_stats(class_stats_t *, struct rm_class *);
83 static void		 cbq_purge(cbq_state_t *);
84 
85 /*
86  * int
87  * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
88  *	function destroys a given traffic class.  Before destroying
89  *	the class, all traffic for that class is released.
90  */
91 static int
92 cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
93 {
94 	int	i;
95 
96 	/* delete the class */
97 	rmc_delete_class(&cbqp->ifnp, cl);
98 
99 	/*
100 	 * free the class handle
101 	 */
102 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
103 		if (cbqp->cbq_class_tbl[i] == cl)
104 			cbqp->cbq_class_tbl[i] = NULL;
105 
106 	if (cl == cbqp->ifnp.root_)
107 		cbqp->ifnp.root_ = NULL;
108 	if (cl == cbqp->ifnp.default_)
109 		cbqp->ifnp.default_ = NULL;
110 	return (0);
111 }
112 
113 /* convert class handle to class pointer */
114 static struct rm_class *
115 clh_to_clp(cbq_state_t *cbqp, uint32_t chandle)
116 {
117 	int i;
118 	struct rm_class *cl;
119 
120 	if (chandle == 0)
121 		return (NULL);
122 	/*
123 	 * first, try optimistically the slot matching the lower bits of
124 	 * the handle.  if it fails, do the linear table search.
125 	 */
126 	i = chandle % CBQ_MAX_CLASSES;
127 	if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
128 	    cl->stats_.handle == chandle)
129 		return (cl);
130 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
131 		if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
132 		    cl->stats_.handle == chandle)
133 			return (cl);
134 	return (NULL);
135 }
136 
137 static int
138 cbq_clear_interface(cbq_state_t *cbqp)
139 {
140 	int		 again, i;
141 	struct rm_class	*cl;
142 
143 	/* clear out the classes now */
144 	do {
145 		again = 0;
146 		for (i = 0; i < CBQ_MAX_CLASSES; i++) {
147 			if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
148 				if (is_a_parent_class(cl))
149 					again++;
150 				else {
151 					cbq_class_destroy(cbqp, cl);
152 					cbqp->cbq_class_tbl[i] = NULL;
153 					if (cl == cbqp->ifnp.root_)
154 						cbqp->ifnp.root_ = NULL;
155 					if (cl == cbqp->ifnp.default_)
156 						cbqp->ifnp.default_ = NULL;
157 				}
158 			}
159 		}
160 	} while (again);
161 
162 	return (0);
163 }
164 
165 static int
166 cbq_request(struct ifaltq_subque *ifsq, int req, void *arg)
167 {
168 	struct ifaltq *ifq = ifsq->ifsq_altq;
169 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
170 
171 	crit_enter();
172 	switch (req) {
173 	case ALTRQ_PURGE:
174 		if (ifsq_get_index(ifsq) == CBQ_SUBQ_INDEX) {
175 			cbq_purge(cbqp);
176 		} else {
177 			/*
178 			 * Race happened, the unrelated subqueue was
179 			 * picked during the packet scheduler transition.
180 			 */
181 			ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
182 		}
183 		break;
184 	}
185 	crit_exit();
186 	return (0);
187 }
188 
189 /* copy the stats info in rm_class to class_states_t */
190 static void
191 get_class_stats(class_stats_t *statsp, struct rm_class *cl)
192 {
193 	statsp->xmit_cnt	= cl->stats_.xmit_cnt;
194 	statsp->drop_cnt	= cl->stats_.drop_cnt;
195 	statsp->over		= cl->stats_.over;
196 	statsp->borrows		= cl->stats_.borrows;
197 	statsp->overactions	= cl->stats_.overactions;
198 	statsp->delays		= cl->stats_.delays;
199 
200 	statsp->depth		= cl->depth_;
201 	statsp->priority	= cl->pri_;
202 	statsp->maxidle		= cl->maxidle_;
203 	statsp->minidle		= cl->minidle_;
204 	statsp->offtime		= cl->offtime_;
205 	statsp->qmax		= qlimit(cl->q_);
206 	statsp->ns_per_byte	= cl->ns_per_byte_;
207 	statsp->wrr_allot	= cl->w_allotment_;
208 	statsp->qcnt		= qlen(cl->q_);
209 	statsp->avgidle		= cl->avgidle_;
210 
211 	statsp->qtype		= qtype(cl->q_);
212 #ifdef ALTQ_RED
213 	if (q_is_red(cl->q_))
214 		red_getstats(cl->red_, &statsp->red[0]);
215 #endif
216 #ifdef ALTQ_RIO
217 	if (q_is_rio(cl->q_))
218 		rio_getstats((rio_t *)cl->red_, &statsp->red[0]);
219 #endif
220 }
221 
222 int
223 cbq_pfattach(struct pf_altq *a, struct ifaltq *ifq)
224 {
225 	return altq_attach(ifq, ALTQT_CBQ, a->altq_disc, ifq_mapsubq_default,
226 	    cbq_enqueue, cbq_dequeue, cbq_request, NULL, NULL);
227 }
228 
229 int
230 cbq_add_altq(struct pf_altq *a)
231 {
232 	cbq_state_t	*cbqp;
233 	struct ifnet	*ifp;
234 
235 	ifnet_lock();
236 
237 	if ((ifp = ifunit(a->ifname)) == NULL) {
238 		ifnet_unlock();
239 		return (EINVAL);
240 	}
241 	if (!ifq_is_ready(&ifp->if_snd)) {
242 		ifnet_unlock();
243 		return (ENODEV);
244 	}
245 
246 	/* allocate and initialize cbq_state_t */
247 	cbqp = kmalloc(sizeof(*cbqp), M_ALTQ, M_WAITOK | M_ZERO);
248 	callout_init(&cbqp->cbq_callout);
249 	cbqp->cbq_qlen = 0;
250 	cbqp->ifnp.ifq_ = &ifp->if_snd;	    /* keep the ifq */
251 	ifq_purge_all(&ifp->if_snd);
252 
253 	ifnet_unlock();
254 
255 	/* keep the state in pf_altq */
256 	a->altq_disc = cbqp;
257 
258 	return (0);
259 }
260 
261 int
262 cbq_remove_altq(struct pf_altq *a)
263 {
264 	cbq_state_t *cbqp;
265 	struct ifaltq *ifq;
266 
267 	if ((cbqp = a->altq_disc) == NULL)
268 		return (EINVAL);
269 	a->altq_disc = NULL;
270 
271 	ifq = cbqp->ifnp.ifq_;
272 	CBQ_LOCK(ifq);
273 
274 	cbq_clear_interface(cbqp);
275 
276 	if (cbqp->ifnp.default_)
277 		cbq_class_destroy(cbqp, cbqp->ifnp.default_);
278 	if (cbqp->ifnp.root_)
279 		cbq_class_destroy(cbqp, cbqp->ifnp.root_);
280 
281 	CBQ_UNLOCK(ifq);
282 
283 	/* deallocate cbq_state_t */
284 	kfree(cbqp, M_ALTQ);
285 
286 	return (0);
287 }
288 
289 static int
290 cbq_add_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
291 {
292 	struct rm_class	*borrow, *parent;
293 	struct rm_class	*cl;
294 	struct cbq_opts	*opts;
295 	int		i;
296 
297 	KKASSERT(a->qid != 0);
298 
299 	/*
300 	 * find a free slot in the class table.  if the slot matching
301 	 * the lower bits of qid is free, use this slot.  otherwise,
302 	 * use the first free slot.
303 	 */
304 	i = a->qid % CBQ_MAX_CLASSES;
305 	if (cbqp->cbq_class_tbl[i] != NULL) {
306 		for (i = 0; i < CBQ_MAX_CLASSES; i++)
307 			if (cbqp->cbq_class_tbl[i] == NULL)
308 				break;
309 		if (i == CBQ_MAX_CLASSES)
310 			return (EINVAL);
311 	}
312 
313 	opts = &a->pq_u.cbq_opts;
314 	/* check parameters */
315 	if (a->priority >= CBQ_MAXPRI)
316 		return (EINVAL);
317 
318 	/* Get pointers to parent and borrow classes.  */
319 	parent = clh_to_clp(cbqp, a->parent_qid);
320 	if (opts->flags & CBQCLF_BORROW)
321 		borrow = parent;
322 	else
323 		borrow = NULL;
324 
325 	/*
326 	 * A class must borrow from it's parent or it can not
327 	 * borrow at all.  Hence, borrow can be null.
328 	 */
329 	if (parent == NULL && (opts->flags & CBQCLF_ROOTCLASS) == 0) {
330 		kprintf("cbq_add_queue: no parent class!\n");
331 		return (EINVAL);
332 	}
333 
334 	if ((borrow != parent)  && (borrow != NULL)) {
335 		kprintf("cbq_add_class: borrow class != parent\n");
336 		return (EINVAL);
337 	}
338 
339 	/*
340 	 * check parameters
341 	 */
342 	switch (opts->flags & CBQCLF_CLASSMASK) {
343 	case CBQCLF_ROOTCLASS:
344 		if (parent != NULL)
345 			return (EINVAL);
346 		if (cbqp->ifnp.root_)
347 			return (EINVAL);
348 		break;
349 	case CBQCLF_DEFCLASS:
350 		if (cbqp->ifnp.default_)
351 			return (EINVAL);
352 		break;
353 	case 0:
354 		if (a->qid == 0)
355 			return (EINVAL);
356 		break;
357 	default:
358 		/* more than two flags bits set */
359 		return (EINVAL);
360 	}
361 
362 	/*
363 	 * create a class.  if this is a root class, initialize the
364 	 * interface.
365 	 */
366 	if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
367 		rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, opts->ns_per_byte,
368 		    cbqrestart, a->qlimit, RM_MAXQUEUED,
369 		    opts->maxidle, opts->minidle, opts->offtime,
370 		    opts->flags);
371 		cl = cbqp->ifnp.root_;
372 	} else {
373 		cl = rmc_newclass(a->priority,
374 				  &cbqp->ifnp, opts->ns_per_byte,
375 				  rmc_delay_action, a->qlimit, parent, borrow,
376 				  opts->maxidle, opts->minidle, opts->offtime,
377 				  opts->pktsize, opts->flags);
378 	}
379 	if (cl == NULL)
380 		return (ENOMEM);
381 
382 	/* return handle to user space. */
383 	cl->stats_.handle = a->qid;
384 	cl->stats_.depth = cl->depth_;
385 
386 	/* save the allocated class */
387 	cbqp->cbq_class_tbl[i] = cl;
388 
389 	if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
390 		cbqp->ifnp.default_ = cl;
391 
392 	return (0);
393 }
394 
395 int
396 cbq_add_queue(struct pf_altq *a)
397 {
398 	cbq_state_t *cbqp;
399 	struct ifaltq *ifq;
400 	int error;
401 
402 	if (a->qid == 0)
403 		return (EINVAL);
404 
405 	/* XXX not MP safe */
406 	if ((cbqp = a->altq_disc) == NULL)
407 		return (EINVAL);
408 	ifq = cbqp->ifnp.ifq_;
409 
410 	CBQ_LOCK(ifq);
411 	error = cbq_add_queue_locked(a, cbqp);
412 	CBQ_UNLOCK(ifq);
413 
414 	return error;
415 }
416 
417 static int
418 cbq_remove_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
419 {
420 	struct rm_class	*cl;
421 	int		i;
422 
423 	if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
424 		return (EINVAL);
425 
426 	/* if we are a parent class, then return an error. */
427 	if (is_a_parent_class(cl))
428 		return (EINVAL);
429 
430 	/* delete the class */
431 	rmc_delete_class(&cbqp->ifnp, cl);
432 
433 	/*
434 	 * free the class handle
435 	 */
436 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
437 		if (cbqp->cbq_class_tbl[i] == cl) {
438 			cbqp->cbq_class_tbl[i] = NULL;
439 			if (cl == cbqp->ifnp.root_)
440 				cbqp->ifnp.root_ = NULL;
441 			if (cl == cbqp->ifnp.default_)
442 				cbqp->ifnp.default_ = NULL;
443 			break;
444 		}
445 
446 	return (0);
447 }
448 
449 int
450 cbq_remove_queue(struct pf_altq *a)
451 {
452 	cbq_state_t *cbqp;
453 	struct ifaltq *ifq;
454 	int error;
455 
456 	/* XXX not MP safe */
457 	if ((cbqp = a->altq_disc) == NULL)
458 		return (EINVAL);
459 	ifq = cbqp->ifnp.ifq_;
460 
461 	CBQ_LOCK(ifq);
462 	error = cbq_remove_queue_locked(a, cbqp);
463 	CBQ_UNLOCK(ifq);
464 
465 	return error;
466 }
467 
468 int
469 cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
470 {
471 	cbq_state_t	*cbqp;
472 	struct rm_class	*cl;
473 	class_stats_t	 stats;
474 	int		 error = 0;
475 	struct ifaltq 	*ifq;
476 
477 	if (*nbytes < sizeof(stats))
478 		return (EINVAL);
479 
480 	ifnet_lock();
481 
482 	/* XXX not MP safe */
483 	if ((cbqp = altq_lookup(a->ifname, ALTQT_CBQ)) == NULL) {
484 		ifnet_unlock();
485 		return (EBADF);
486 	}
487 	ifq = cbqp->ifnp.ifq_;
488 
489 	CBQ_LOCK(ifq);
490 
491 	if ((cl = clh_to_clp(cbqp, a->qid)) == NULL) {
492 		CBQ_UNLOCK(ifq);
493 		ifnet_unlock();
494 		return (EINVAL);
495 	}
496 
497 	get_class_stats(&stats, cl);
498 
499 	CBQ_UNLOCK(ifq);
500 
501 	ifnet_unlock();
502 
503 	if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
504 		return (error);
505 	*nbytes = sizeof(stats);
506 	return (0);
507 }
508 
509 /*
510  * int
511  * cbq_enqueue(struct ifaltq_subqueue *ifq, struct mbuf *m,
512  *     struct altq_pktattr *pattr)
513  *		- Queue data packets.
514  *
515  *	cbq_enqueue is set to ifp->if_altqenqueue and called by an upper
516  *	layer (e.g. ether_output).  cbq_enqueue queues the given packet
517  *	to the cbq, then invokes the driver's start routine.
518  *
519  *	Returns:	0 if the queueing is successful.
520  *			ENOBUFS if a packet dropping occurred as a result of
521  *			the queueing.
522  */
523 
524 static int
525 cbq_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
526     struct altq_pktattr *pktattr __unused)
527 {
528 	struct ifaltq *ifq = ifsq->ifsq_altq;
529 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
530 	struct rm_class	*cl;
531 	int len;
532 
533 	if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
534 		/*
535 		 * Race happened, the unrelated subqueue was
536 		 * picked during the packet scheduler transition.
537 		 */
538 		ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
539 		m_freem(m);
540 		return (ENOBUFS);
541 	}
542 
543 	/* grab class set by classifier */
544 	M_ASSERTPKTHDR(m);
545 	if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
546 		cl = clh_to_clp(cbqp, m->m_pkthdr.pf.qid);
547 	else
548 		cl = NULL;
549 	if (cl == NULL) {
550 		cl = cbqp->ifnp.default_;
551 		if (cl == NULL) {
552 			m_freem(m);
553 			return (ENOBUFS);
554 		}
555 	}
556 	crit_enter();
557 	cl->pktattr_ = NULL;
558 	len = m_pktlen(m);
559 	if (rmc_queue_packet(cl, m) != 0) {
560 		/* drop occurred.  some mbuf was freed in rmc_queue_packet. */
561 		PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
562 		crit_exit();
563 		return (ENOBUFS);
564 	}
565 
566 	/* successfully queued. */
567 	++cbqp->cbq_qlen;
568 	ALTQ_SQ_PKTCNT_INC(ifsq);
569 	crit_exit();
570 	return (0);
571 }
572 
573 static struct mbuf *
574 cbq_dequeue(struct ifaltq_subque *ifsq, int op)
575 {
576 	struct ifaltq *ifq = ifsq->ifsq_altq;
577 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
578 	struct mbuf	*m;
579 
580 	if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
581 		/*
582 		 * Race happened, the unrelated subqueue was
583 		 * picked during the packet scheduler transition.
584 		 */
585 		ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
586 		return NULL;
587 	}
588 
589 	crit_enter();
590 	m = rmc_dequeue_next(&cbqp->ifnp, op);
591 
592 	if (m && op == ALTDQ_REMOVE) {
593 		--cbqp->cbq_qlen;  /* decrement # of packets in cbq */
594 		ALTQ_SQ_PKTCNT_DEC(ifsq);
595 
596 		/* Update the class. */
597 		rmc_update_class_util(&cbqp->ifnp);
598 	}
599 	crit_exit();
600 	return (m);
601 }
602 
603 /*
604  * void
605  * cbqrestart(queue_t *) - Restart sending of data.
606  * called from rmc_restart in a critical section via timeout after waking up
607  * a suspended class.
608  *	Returns:	NONE
609  */
610 
611 static void
612 cbqrestart(struct ifaltq *ifq)
613 {
614 	cbq_state_t	*cbqp;
615 
616 	CBQ_ASSERT_LOCKED(ifq);
617 
618 	if (!ifq_is_enabled(ifq))
619 		/* cbq must have been detached */
620 		return;
621 
622 	if ((cbqp = (cbq_state_t *)ifq->altq_disc) == NULL)
623 		/* should not happen */
624 		return;
625 
626 	if (cbqp->cbq_qlen > 0) {
627 		struct ifnet *ifp = ifq->altq_ifp;
628 		struct ifaltq_subque *ifsq = &ifq->altq_subq[CBQ_SUBQ_INDEX];
629 
630 		/* Release the altq lock to avoid deadlock */
631 		CBQ_UNLOCK(ifq);
632 
633 		ifsq_serialize_hw(ifsq);
634 		if (ifp->if_start && !ifsq_is_oactive(ifsq))
635 			(*ifp->if_start)(ifp, ifsq);
636 		ifsq_deserialize_hw(ifsq);
637 
638 		CBQ_LOCK(ifq);
639 	}
640 }
641 
642 static void
643 cbq_purge(cbq_state_t *cbqp)
644 {
645 	struct rm_class	*cl;
646 	int i;
647 	for (i = 0; i < CBQ_MAX_CLASSES; i++) {
648 		if ((cl = cbqp->cbq_class_tbl[i]) != NULL)
649 			rmc_dropall(cl);
650 	}
651 	if (ifq_is_enabled(cbqp->ifnp.ifq_))
652 		ALTQ_SQ_CNTR_RESET(&cbqp->ifnp.ifq_->altq_subq[CBQ_SUBQ_INDEX]);
653 }
654 
655 #endif /* ALTQ_CBQ */
656