xref: /dragonfly/sys/net/altq/altq_cbq.c (revision 36a3d1d6)
1 /*	$KAME: altq_cbq.c,v 1.20 2004/04/17 10:54:48 kjc Exp $	*/
2 /*	$DragonFly: src/sys/net/altq/altq_cbq.c,v 1.7 2008/05/14 11:59:23 sephe Exp $ */
3 
4 /*
5  * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the SMCC Technology
21  *      Development Group at Sun Microsystems, Inc.
22  *
23  * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
24  *      promote products derived from this software without specific prior
25  *      written permission.
26  *
27  * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
28  * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE.  The software is
29  * provided "as is" without express or implied warranty of any kind.
30  *
31  * These notices must be retained in any copies of any part of this software.
32  */
33 
34 #include "opt_altq.h"
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #ifdef ALTQ_CBQ	/* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
39 
40 #include <sys/param.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/callout.h>
47 #include <sys/errno.h>
48 #include <sys/time.h>
49 #include <sys/thread.h>
50 
51 #include <net/if.h>
52 #include <net/ifq_var.h>
53 #include <netinet/in.h>
54 
55 #include <net/pf/pfvar.h>
56 #include <net/altq/altq.h>
57 #include <net/altq/altq_cbq.h>
58 
59 #include <sys/thread2.h>
60 
61 /*
62  * Forward Declarations.
63  */
64 static int		 cbq_class_destroy(cbq_state_t *, struct rm_class *);
65 static struct rm_class  *clh_to_clp(cbq_state_t *, uint32_t);
66 static int		 cbq_clear_interface(cbq_state_t *);
67 static int		 cbq_request(struct ifaltq *, int, void *);
68 static int		 cbq_enqueue(struct ifaltq *, struct mbuf *,
69 			     struct altq_pktattr *);
70 static struct mbuf	*cbq_dequeue(struct ifaltq *, struct mbuf *, int);
71 static void		 cbqrestart(struct ifaltq *);
72 static void		 get_class_stats(class_stats_t *, struct rm_class *);
73 static void		 cbq_purge(cbq_state_t *);
74 
75 /*
76  * int
77  * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
78  *	function destroys a given traffic class.  Before destroying
79  *	the class, all traffic for that class is released.
80  */
81 static int
82 cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
83 {
84 	int	i;
85 
86 	/* delete the class */
87 	rmc_delete_class(&cbqp->ifnp, cl);
88 
89 	/*
90 	 * free the class handle
91 	 */
92 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
93 		if (cbqp->cbq_class_tbl[i] == cl)
94 			cbqp->cbq_class_tbl[i] = NULL;
95 
96 	if (cl == cbqp->ifnp.root_)
97 		cbqp->ifnp.root_ = NULL;
98 	if (cl == cbqp->ifnp.default_)
99 		cbqp->ifnp.default_ = NULL;
100 	return (0);
101 }
102 
103 /* convert class handle to class pointer */
104 static struct rm_class *
105 clh_to_clp(cbq_state_t *cbqp, uint32_t chandle)
106 {
107 	int i;
108 	struct rm_class *cl;
109 
110 	if (chandle == 0)
111 		return (NULL);
112 	/*
113 	 * first, try optimistically the slot matching the lower bits of
114 	 * the handle.  if it fails, do the linear table search.
115 	 */
116 	i = chandle % CBQ_MAX_CLASSES;
117 	if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
118 	    cl->stats_.handle == chandle)
119 		return (cl);
120 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
121 		if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
122 		    cl->stats_.handle == chandle)
123 			return (cl);
124 	return (NULL);
125 }
126 
127 static int
128 cbq_clear_interface(cbq_state_t *cbqp)
129 {
130 	int		 again, i;
131 	struct rm_class	*cl;
132 
133 	/* clear out the classes now */
134 	do {
135 		again = 0;
136 		for (i = 0; i < CBQ_MAX_CLASSES; i++) {
137 			if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
138 				if (is_a_parent_class(cl))
139 					again++;
140 				else {
141 					cbq_class_destroy(cbqp, cl);
142 					cbqp->cbq_class_tbl[i] = NULL;
143 					if (cl == cbqp->ifnp.root_)
144 						cbqp->ifnp.root_ = NULL;
145 					if (cl == cbqp->ifnp.default_)
146 						cbqp->ifnp.default_ = NULL;
147 				}
148 			}
149 		}
150 	} while (again);
151 
152 	return (0);
153 }
154 
155 static int
156 cbq_request(struct ifaltq *ifq, int req, void *arg)
157 {
158 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
159 
160 	crit_enter();
161 	switch (req) {
162 	case ALTRQ_PURGE:
163 		cbq_purge(cbqp);
164 		break;
165 	}
166 	crit_exit();
167 	return (0);
168 }
169 
170 /* copy the stats info in rm_class to class_states_t */
171 static void
172 get_class_stats(class_stats_t *statsp, struct rm_class *cl)
173 {
174 	statsp->xmit_cnt	= cl->stats_.xmit_cnt;
175 	statsp->drop_cnt	= cl->stats_.drop_cnt;
176 	statsp->over		= cl->stats_.over;
177 	statsp->borrows		= cl->stats_.borrows;
178 	statsp->overactions	= cl->stats_.overactions;
179 	statsp->delays		= cl->stats_.delays;
180 
181 	statsp->depth		= cl->depth_;
182 	statsp->priority	= cl->pri_;
183 	statsp->maxidle		= cl->maxidle_;
184 	statsp->minidle		= cl->minidle_;
185 	statsp->offtime		= cl->offtime_;
186 	statsp->qmax		= qlimit(cl->q_);
187 	statsp->ns_per_byte	= cl->ns_per_byte_;
188 	statsp->wrr_allot	= cl->w_allotment_;
189 	statsp->qcnt		= qlen(cl->q_);
190 	statsp->avgidle		= cl->avgidle_;
191 
192 	statsp->qtype		= qtype(cl->q_);
193 #ifdef ALTQ_RED
194 	if (q_is_red(cl->q_))
195 		red_getstats(cl->red_, &statsp->red[0]);
196 #endif
197 #ifdef ALTQ_RIO
198 	if (q_is_rio(cl->q_))
199 		rio_getstats((rio_t *)cl->red_, &statsp->red[0]);
200 #endif
201 }
202 
203 int
204 cbq_pfattach(struct pf_altq *a, struct ifaltq *ifq)
205 {
206 	return altq_attach(ifq, ALTQT_CBQ, a->altq_disc,
207 	    cbq_enqueue, cbq_dequeue, cbq_request, NULL, NULL);
208 }
209 
210 int
211 cbq_add_altq(struct pf_altq *a)
212 {
213 	cbq_state_t	*cbqp;
214 	struct ifnet	*ifp;
215 
216 	if ((ifp = ifunit(a->ifname)) == NULL)
217 		return (EINVAL);
218 	if (!ifq_is_ready(&ifp->if_snd))
219 		return (ENODEV);
220 
221 	/* allocate and initialize cbq_state_t */
222 	cbqp = kmalloc(sizeof(*cbqp), M_ALTQ, M_WAITOK | M_ZERO);
223 	callout_init(&cbqp->cbq_callout);
224 	cbqp->cbq_qlen = 0;
225 	cbqp->ifnp.ifq_ = &ifp->if_snd;	    /* keep the ifq */
226 	ifq_purge(&ifp->if_snd);
227 
228 	/* keep the state in pf_altq */
229 	a->altq_disc = cbqp;
230 
231 	return (0);
232 }
233 
234 int
235 cbq_remove_altq(struct pf_altq *a)
236 {
237 	cbq_state_t	*cbqp;
238 
239 	if ((cbqp = a->altq_disc) == NULL)
240 		return (EINVAL);
241 	a->altq_disc = NULL;
242 
243 	cbq_clear_interface(cbqp);
244 
245 	if (cbqp->ifnp.default_)
246 		cbq_class_destroy(cbqp, cbqp->ifnp.default_);
247 	if (cbqp->ifnp.root_)
248 		cbq_class_destroy(cbqp, cbqp->ifnp.root_);
249 
250 	/* deallocate cbq_state_t */
251 	kfree(cbqp, M_ALTQ);
252 
253 	return (0);
254 }
255 
256 static int
257 cbq_add_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
258 {
259 	struct rm_class	*borrow, *parent;
260 	struct rm_class	*cl;
261 	struct cbq_opts	*opts;
262 	int		i;
263 
264 	KKASSERT(a->qid != 0);
265 
266 	/*
267 	 * find a free slot in the class table.  if the slot matching
268 	 * the lower bits of qid is free, use this slot.  otherwise,
269 	 * use the first free slot.
270 	 */
271 	i = a->qid % CBQ_MAX_CLASSES;
272 	if (cbqp->cbq_class_tbl[i] != NULL) {
273 		for (i = 0; i < CBQ_MAX_CLASSES; i++)
274 			if (cbqp->cbq_class_tbl[i] == NULL)
275 				break;
276 		if (i == CBQ_MAX_CLASSES)
277 			return (EINVAL);
278 	}
279 
280 	opts = &a->pq_u.cbq_opts;
281 	/* check parameters */
282 	if (a->priority >= CBQ_MAXPRI)
283 		return (EINVAL);
284 
285 	/* Get pointers to parent and borrow classes.  */
286 	parent = clh_to_clp(cbqp, a->parent_qid);
287 	if (opts->flags & CBQCLF_BORROW)
288 		borrow = parent;
289 	else
290 		borrow = NULL;
291 
292 	/*
293 	 * A class must borrow from it's parent or it can not
294 	 * borrow at all.  Hence, borrow can be null.
295 	 */
296 	if (parent == NULL && (opts->flags & CBQCLF_ROOTCLASS) == 0) {
297 		kprintf("cbq_add_queue: no parent class!\n");
298 		return (EINVAL);
299 	}
300 
301 	if ((borrow != parent)  && (borrow != NULL)) {
302 		kprintf("cbq_add_class: borrow class != parent\n");
303 		return (EINVAL);
304 	}
305 
306 	/*
307 	 * check parameters
308 	 */
309 	switch (opts->flags & CBQCLF_CLASSMASK) {
310 	case CBQCLF_ROOTCLASS:
311 		if (parent != NULL)
312 			return (EINVAL);
313 		if (cbqp->ifnp.root_)
314 			return (EINVAL);
315 		break;
316 	case CBQCLF_DEFCLASS:
317 		if (cbqp->ifnp.default_)
318 			return (EINVAL);
319 		break;
320 	case 0:
321 		if (a->qid == 0)
322 			return (EINVAL);
323 		break;
324 	default:
325 		/* more than two flags bits set */
326 		return (EINVAL);
327 	}
328 
329 	/*
330 	 * create a class.  if this is a root class, initialize the
331 	 * interface.
332 	 */
333 	if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
334 		rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, opts->ns_per_byte,
335 		    cbqrestart, a->qlimit, RM_MAXQUEUED,
336 		    opts->maxidle, opts->minidle, opts->offtime,
337 		    opts->flags);
338 		cl = cbqp->ifnp.root_;
339 	} else {
340 		cl = rmc_newclass(a->priority,
341 				  &cbqp->ifnp, opts->ns_per_byte,
342 				  rmc_delay_action, a->qlimit, parent, borrow,
343 				  opts->maxidle, opts->minidle, opts->offtime,
344 				  opts->pktsize, opts->flags);
345 	}
346 	if (cl == NULL)
347 		return (ENOMEM);
348 
349 	/* return handle to user space. */
350 	cl->stats_.handle = a->qid;
351 	cl->stats_.depth = cl->depth_;
352 
353 	/* save the allocated class */
354 	cbqp->cbq_class_tbl[i] = cl;
355 
356 	if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
357 		cbqp->ifnp.default_ = cl;
358 
359 	return (0);
360 }
361 
362 int
363 cbq_add_queue(struct pf_altq *a)
364 {
365 	cbq_state_t *cbqp;
366 	struct ifaltq *ifq;
367 	int error;
368 
369 	if (a->qid == 0)
370 		return (EINVAL);
371 
372 	/* XXX not MP safe */
373 	if ((cbqp = a->altq_disc) == NULL)
374 		return (EINVAL);
375 	ifq = cbqp->ifnp.ifq_;
376 
377 	ALTQ_LOCK(ifq);
378 	error = cbq_add_queue_locked(a, cbqp);
379 	ALTQ_UNLOCK(ifq);
380 
381 	return error;
382 }
383 
384 static int
385 cbq_remove_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
386 {
387 	struct rm_class	*cl;
388 	int		i;
389 
390 	if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
391 		return (EINVAL);
392 
393 	/* if we are a parent class, then return an error. */
394 	if (is_a_parent_class(cl))
395 		return (EINVAL);
396 
397 	/* delete the class */
398 	rmc_delete_class(&cbqp->ifnp, cl);
399 
400 	/*
401 	 * free the class handle
402 	 */
403 	for (i = 0; i < CBQ_MAX_CLASSES; i++)
404 		if (cbqp->cbq_class_tbl[i] == cl) {
405 			cbqp->cbq_class_tbl[i] = NULL;
406 			if (cl == cbqp->ifnp.root_)
407 				cbqp->ifnp.root_ = NULL;
408 			if (cl == cbqp->ifnp.default_)
409 				cbqp->ifnp.default_ = NULL;
410 			break;
411 		}
412 
413 	return (0);
414 }
415 
416 int
417 cbq_remove_queue(struct pf_altq *a)
418 {
419 	cbq_state_t *cbqp;
420 	struct ifaltq *ifq;
421 	int error;
422 
423 	/* XXX not MP safe */
424 	if ((cbqp = a->altq_disc) == NULL)
425 		return (EINVAL);
426 	ifq = cbqp->ifnp.ifq_;
427 
428 	ALTQ_LOCK(ifq);
429 	error = cbq_remove_queue_locked(a, cbqp);
430 	ALTQ_UNLOCK(ifq);
431 
432 	return error;
433 }
434 
435 int
436 cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
437 {
438 	cbq_state_t	*cbqp;
439 	struct rm_class	*cl;
440 	class_stats_t	 stats;
441 	int		 error = 0;
442 	struct ifaltq 	*ifq;
443 
444 	if (*nbytes < sizeof(stats))
445 		return (EINVAL);
446 
447 	/* XXX not MP safe */
448 	if ((cbqp = altq_lookup(a->ifname, ALTQT_CBQ)) == NULL)
449 		return (EBADF);
450 	ifq = cbqp->ifnp.ifq_;
451 
452 	ALTQ_LOCK(ifq);
453 
454 	if ((cl = clh_to_clp(cbqp, a->qid)) == NULL) {
455 		ALTQ_UNLOCK(ifq);
456 		return (EINVAL);
457 	}
458 
459 	get_class_stats(&stats, cl);
460 
461 	ALTQ_UNLOCK(ifq);
462 
463 	if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
464 		return (error);
465 	*nbytes = sizeof(stats);
466 	return (0);
467 }
468 
469 /*
470  * int
471  * cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pattr)
472  *		- Queue data packets.
473  *
474  *	cbq_enqueue is set to ifp->if_altqenqueue and called by an upper
475  *	layer (e.g. ether_output).  cbq_enqueue queues the given packet
476  *	to the cbq, then invokes the driver's start routine.
477  *
478  *	Returns:	0 if the queueing is successful.
479  *			ENOBUFS if a packet dropping occurred as a result of
480  *			the queueing.
481  */
482 
483 static int
484 cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
485 {
486 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
487 	struct rm_class	*cl;
488 	int len;
489 
490 	/* grab class set by classifier */
491 	if ((m->m_flags & M_PKTHDR) == 0) {
492 		/* should not happen */
493 		if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n");
494 		m_freem(m);
495 		return (ENOBUFS);
496 	}
497 	if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
498 		cl = clh_to_clp(cbqp, m->m_pkthdr.pf.qid);
499 	else
500 		cl = NULL;
501 	if (cl == NULL) {
502 		cl = cbqp->ifnp.default_;
503 		if (cl == NULL) {
504 			m_freem(m);
505 			return (ENOBUFS);
506 		}
507 	}
508 	crit_enter();
509 	cl->pktattr_ = NULL;
510 	len = m_pktlen(m);
511 	if (rmc_queue_packet(cl, m) != 0) {
512 		/* drop occurred.  some mbuf was freed in rmc_queue_packet. */
513 		PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
514 		crit_exit();
515 		return (ENOBUFS);
516 	}
517 
518 	/* successfully queued. */
519 	++cbqp->cbq_qlen;
520 	++ifq->ifq_len;
521 	crit_exit();
522 	return (0);
523 }
524 
525 static struct mbuf *
526 cbq_dequeue(struct ifaltq *ifq, struct mbuf *mpolled, int op)
527 {
528 	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
529 	struct mbuf	*m;
530 
531 	crit_enter();
532 	m = rmc_dequeue_next(&cbqp->ifnp, op);
533 
534 	if (m && op == ALTDQ_REMOVE) {
535 		--cbqp->cbq_qlen;  /* decrement # of packets in cbq */
536 		--ifq->ifq_len;
537 
538 		/* Update the class. */
539 		rmc_update_class_util(&cbqp->ifnp);
540 	}
541 	crit_exit();
542 	KKASSERT(mpolled == NULL || mpolled == m);
543 	return (m);
544 }
545 
546 /*
547  * void
548  * cbqrestart(queue_t *) - Restart sending of data.
549  * called from rmc_restart in a critical section via timeout after waking up
550  * a suspended class.
551  *	Returns:	NONE
552  */
553 
554 static void
555 cbqrestart(struct ifaltq *ifq)
556 {
557 	cbq_state_t	*cbqp;
558 
559 	ALTQ_ASSERT_LOCKED(ifq);
560 
561 	if (!ifq_is_enabled(ifq))
562 		/* cbq must have been detached */
563 		return;
564 
565 	if ((cbqp = (cbq_state_t *)ifq->altq_disc) == NULL)
566 		/* should not happen */
567 		return;
568 
569 	if (cbqp->cbq_qlen > 0) {
570 		struct ifnet *ifp = ifq->altq_ifp;
571 
572 		/* Release the altq lock to avoid deadlock */
573 		ALTQ_UNLOCK(ifq);
574 
575 		ifnet_serialize_tx(ifp);
576 		if (ifp->if_start && (ifp->if_flags & IFF_OACTIVE) == 0)
577 			(*ifp->if_start)(ifp);
578 		ifnet_deserialize_tx(ifp);
579 
580 		ALTQ_LOCK(ifq);
581 	}
582 }
583 
584 static void
585 cbq_purge(cbq_state_t *cbqp)
586 {
587 	struct rm_class	*cl;
588 	int i;
589 	for (i = 0; i < CBQ_MAX_CLASSES; i++) {
590 		if ((cl = cbqp->cbq_class_tbl[i]) != NULL)
591 			rmc_dropall(cl);
592 	}
593 	if (ifq_is_enabled(cbqp->ifnp.ifq_))
594 		cbqp->ifnp.ifq_->ifq_len = 0;
595 }
596 
597 #endif /* ALTQ_CBQ */
598