xref: /dragonfly/sys/net/altq/if_altq.h (revision 0db87cb7)
1 /*	$KAME: if_altq.h,v 1.11 2003/07/10 12:07:50 kjc Exp $	*/
2 
3 /*
4  * Copyright (C) 1997-2003
5  *	Sony Computer Science Laboratories Inc.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #ifndef _NET_ALTQ_IF_ALTQ_H_
29 #define	_NET_ALTQ_IF_ALTQ_H_
30 
31 #ifndef _SYS_SERIALIZE_H_
32 #include <sys/serialize.h>
33 #endif
34 
35 /* Default subqueue */
36 #define ALTQ_SUBQ_INDEX_DEFAULT	0
37 
38 struct mbuf;
39 struct altq_pktattr;
40 
41 struct ifaltq_subque;
42 struct ifaltq;
43 
44 typedef int (*altq_mapsubq_t)(struct ifaltq *, int);
45 
46 typedef int (*ifsq_enqueue_t)(struct ifaltq_subque *, struct mbuf *,
47     struct altq_pktattr *);
48 typedef struct mbuf *(*ifsq_dequeue_t)(struct ifaltq_subque *, int);
49 typedef int (*ifsq_request_t)(struct ifaltq_subque *, int, void *);
50 
51 struct ifsubq_stage {
52 	struct ifaltq_subque *stg_subq;
53 	int		stg_cnt;
54 	int		stg_len;
55 	uint32_t	stg_flags;
56 	TAILQ_ENTRY(ifsubq_stage) stg_link;
57 } __cachealign;
58 
59 #define IFSQ_STAGE_FLAG_QUED	0x1
60 #define IFSQ_STAGE_FLAG_SCHED	0x2
61 
62 struct ifaltq_subque {
63 	struct lwkt_serialize ifsq_lock;
64 	int		ifsq_index;
65 
66 	struct ifaltq	*ifsq_altq;
67 	struct ifnet	*ifsq_ifp;
68 	void		*ifsq_hw_priv;	/* hw private data */
69 
70 	struct mbuf	*ifsq_prio_head;
71 	struct mbuf	*ifsq_prio_tail;
72 	struct mbuf	*ifsq_norm_head;
73 	struct mbuf	*ifsq_norm_tail;
74 	int		ifsq_prio_len;
75 	int		ifsq_prio_bcnt;
76 	int		ifsq_len;	/* packet counter */
77 	int		ifsq_maxlen;
78 	int		ifsq_bcnt;	/* byte counter */
79 	int		ifsq_maxbcnt;
80 
81 	ifsq_enqueue_t	ifsq_enqueue;
82 	ifsq_dequeue_t	ifsq_dequeue;
83 	ifsq_request_t	ifsq_request;
84 
85 	struct lwkt_serialize *ifsq_hw_serialize;
86 					/* hw serializer */
87 	struct mbuf	*ifsq_prepended;/* mbuf dequeued, but not yet xmit */
88 	int		ifsq_started;	/* ifnet.if_start interlock */
89 	int		ifsq_hw_oactive;/* hw too busy, protected by driver */
90 	int		ifsq_cpuid;	/* owner cpu */
91 	struct ifsubq_stage *ifsq_stage;/* packet staging information */
92 	struct netmsg_base *ifsq_ifstart_nmsg;
93 					/* percpu msgs to sched if_start */
94 } __cachealign;
95 
96 #ifdef _KERNEL
97 
98 #define ALTQ_SQ_ASSERT_LOCKED(ifsq)	ASSERT_SERIALIZED(&(ifsq)->ifsq_lock)
99 #define ALTQ_SQ_LOCK_INIT(ifsq)		lwkt_serialize_init(&(ifsq)->ifsq_lock)
100 #define ALTQ_SQ_LOCK(ifsq) \
101 	lwkt_serialize_adaptive_enter(&(ifsq)->ifsq_lock)
102 #define ALTQ_SQ_UNLOCK(ifsq)		lwkt_serialize_exit(&(ifsq)->ifsq_lock)
103 
104 #define ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq) \
105 	ASSERT_SERIALIZED((ifsq)->ifsq_hw_serialize)
106 #define ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq) \
107 	ASSERT_NOT_SERIALIZED((ifsq)->ifsq_hw_serialize)
108 
109 #define ALTQ_SQ_PKTCNT_INC(ifsq) \
110 do { \
111 	(ifsq)->ifsq_len++; \
112 } while (0)
113 
114 #define ALTQ_SQ_PKTCNT_DEC(ifsq) \
115 do { \
116 	KASSERT((ifsq)->ifsq_len > 0, ("invalid packet count")); \
117 	(ifsq)->ifsq_len--; \
118 } while (0)
119 
120 #define ALTQ_SQ_CNTR_INC(ifsq, bcnt) \
121 do { \
122 	ALTQ_SQ_PKTCNT_INC((ifsq)); \
123 	(ifsq)->ifsq_bcnt += (bcnt); \
124 } while (0)
125 
126 #define ALTQ_SQ_CNTR_DEC(ifsq, bcnt) \
127 do { \
128 	ALTQ_SQ_PKTCNT_DEC((ifsq)); \
129 	KASSERT((ifsq)->ifsq_bcnt >= (bcnt), ("invalid byte count")); \
130 	(ifsq)->ifsq_bcnt -= (bcnt); \
131 } while (0)
132 
133 #define ALTQ_SQ_CNTR_RESET(ifsq) \
134 do { \
135 	(ifsq)->ifsq_len = 0; \
136 	(ifsq)->ifsq_bcnt = 0; \
137 } while (0)
138 
139 #define ALTQ_SQ_PRIO_CNTR_INC(ifsq, bcnt) \
140 do { \
141 	(ifsq)->ifsq_prio_len++; \
142 	(ifsq)->ifsq_prio_bcnt += (bcnt); \
143 } while (0)
144 
145 #define ALTQ_SQ_PRIO_CNTR_DEC(ifsq, bcnt) \
146 do { \
147 	KASSERT((ifsq)->ifsq_prio_len > 0, \
148 	    ("invalid prio packet count")); \
149 	(ifsq)->ifsq_prio_len--; \
150 	KASSERT((ifsq)->ifsq_prio_bcnt >= (bcnt), \
151 	    ("invalid prio byte count")); \
152 	(ifsq)->ifsq_prio_bcnt -= (bcnt); \
153 } while (0)
154 
155 #endif	/* _KERNEL */
156 
157 /*
158  * Structure defining a queue for a network interface.
159  */
160 struct	ifaltq {
161 	/* alternate queueing related fields */
162 	int	altq_type;		/* discipline type */
163 	int	altq_flags;		/* flags (e.g. ready, in-use) */
164 	void	*altq_disc;		/* for discipline-specific use */
165 	struct	ifnet *altq_ifp;	/* back pointer to interface */
166 
167 	/* classifier fields */
168 	void	*altq_clfier;		/* classifier-specific use */
169 	void	*(*altq_classify)(struct ifaltq *, struct mbuf *,
170 				  struct altq_pktattr *);
171 
172 	/* token bucket regulator */
173 	struct	tb_regulator *altq_tbr;
174 
175 	/* Sub-queues mapping */
176 	altq_mapsubq_t altq_mapsubq;
177 	uint32_t altq_subq_mask;
178 
179 	/* Sub-queues */
180 	int	altq_subq_cnt;
181 	struct ifaltq_subque *altq_subq;
182 
183 	int	altq_maxlen;
184 };
185 
186 #ifdef _KERNEL
187 /* COMPAT */
188 #define ALTQ_LOCK(ifq) \
189 	ALTQ_SQ_LOCK(&(ifq)->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT])
190 /* COMPAT */
191 #define ALTQ_UNLOCK(ifq) \
192 	ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT])
193 #endif
194 
195 #ifdef _KERNEL
196 
197 /*
198  * packet attributes used by queueing disciplines.
199  * pattr_class is a discipline-dependent scheduling class that is
200  * set by a classifier.
201  * pattr_hdr and pattr_af may be used by a discipline to access
202  * the header within a mbuf.  (e.g. ECN needs to update the CE bit)
203  * note that pattr_hdr could be stale after m_pullup, though link
204  * layer output routines usually don't use m_pullup.  link-level
205  * compression also invalidates these fields.  thus, pattr_hdr needs
206  * to be verified when a discipline touches the header.
207  */
208 struct altq_pktattr {
209 	void	*pattr_class;		/* sched class set by classifier */
210 	int	pattr_af;		/* address family */
211 	caddr_t	pattr_hdr;		/* saved header position in mbuf */
212 };
213 
214 /*
215  * a token-bucket regulator limits the rate that a network driver can
216  * dequeue packets from the output queue.
217  * modern cards are able to buffer a large amount of packets and dequeue
218  * too many packets at a time.  this bursty dequeue behavior makes it
219  * impossible to schedule packets by queueing disciplines.
220  * a token-bucket is used to control the burst size in a device
221  * independent manner.
222  */
223 struct tb_regulator {
224 	int64_t		tbr_rate;	/* (scaled) token bucket rate */
225 	int64_t		tbr_depth;	/* (scaled) token bucket depth */
226 
227 	int64_t		tbr_token;	/* (scaled) current token */
228 	int64_t		tbr_filluptime;	/* (scaled) time to fill up bucket */
229 	uint64_t	tbr_last;	/* last time token was updated */
230 
231 	int		tbr_lastop;	/* last dequeue operation type
232 					   needed for poll-and-dequeue */
233 };
234 
235 /* if_altqflags */
236 #define	ALTQF_READY	 0x01	/* driver supports alternate queueing */
237 #define	ALTQF_ENABLED	 0x02	/* altq is in use */
238 #define	ALTQF_CLASSIFY	 0x04	/* classify packets */
239 #define	ALTQF_DRIVER1	 0x40	/* driver specific */
240 
241 /* if_altqflags set internally only: */
242 #define	ALTQF_CANTCHANGE 	(ALTQF_READY)
243 
244 /* altq_dequeue 2nd arg */
245 #define	ALTDQ_REMOVE		1	/* dequeue mbuf from the queue */
246 #define	ALTDQ_POLL		2	/* don't dequeue mbuf from the queue */
247 
248 /* altq request types (currently only purge is defined) */
249 #define	ALTRQ_PURGE		1	/* purge all packets */
250 
251 int	altq_attach(struct ifaltq *, int, void *, altq_mapsubq_t,
252 	    ifsq_enqueue_t, ifsq_dequeue_t, ifsq_request_t, void *,
253 	    void *(*)(struct ifaltq *, struct mbuf *, struct altq_pktattr *));
254 int	altq_detach(struct ifaltq *);
255 int	altq_enable(struct ifaltq *);
256 int	altq_disable(struct ifaltq *);
257 struct mbuf *tbr_dequeue(struct ifaltq_subque *, int);
258 extern int	(*altq_input)(struct mbuf *, int);
259 #endif /* _KERNEL */
260 
261 #endif /* _NET_ALTQ_IF_ALTQ_H_ */
262