1 /* $NetBSD: npf_mbuf.c,v 1.16 2016/03/18 10:09:46 mrg Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF network buffer management interface.
34 *
35 * Network buffer in NetBSD is mbuf. Internal mbuf structures are
36 * abstracted within this source.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: npf_mbuf.c,v 1.16 2016/03/18 10:09:46 mrg Exp $");
41
42 #include <sys/param.h>
43 #include <sys/mbuf.h>
44
45 #include "npf_impl.h"
46
47 #define NBUF_ENSURE_ALIGN (MAX(COHERENCY_UNIT, 64))
48 #define NBUF_ENSURE_MASK (NBUF_ENSURE_ALIGN - 1)
49 #define NBUF_ENSURE_ROUNDUP(x) (((x) + NBUF_ENSURE_ALIGN) & ~NBUF_ENSURE_MASK)
50
51 void
nbuf_init(nbuf_t * nbuf,struct mbuf * m,const ifnet_t * ifp)52 nbuf_init(nbuf_t *nbuf, struct mbuf *m, const ifnet_t *ifp)
53 {
54 u_int ifid = npf_ifmap_getid(ifp);
55
56 KASSERT((m->m_flags & M_PKTHDR) != 0);
57
58 nbuf->nb_mbuf0 = m;
59 nbuf->nb_ifp = ifp;
60 nbuf->nb_ifid = ifid;
61 nbuf_reset(nbuf);
62 }
63
64 void
nbuf_reset(nbuf_t * nbuf)65 nbuf_reset(nbuf_t *nbuf)
66 {
67 struct mbuf *m = nbuf->nb_mbuf0;
68
69 nbuf->nb_mbuf = m;
70 nbuf->nb_nptr = mtod(m, void *);
71 }
72
73 void *
nbuf_dataptr(nbuf_t * nbuf)74 nbuf_dataptr(nbuf_t *nbuf)
75 {
76 KASSERT(nbuf->nb_nptr);
77 return nbuf->nb_nptr;
78 }
79
80 size_t
nbuf_offset(const nbuf_t * nbuf)81 nbuf_offset(const nbuf_t *nbuf)
82 {
83 const struct mbuf *m = nbuf->nb_mbuf;
84 const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
85 const int poff = m_length(nbuf->nb_mbuf0) - m_length(m) + off;
86
87 return poff;
88 }
89
90 struct mbuf *
nbuf_head_mbuf(nbuf_t * nbuf)91 nbuf_head_mbuf(nbuf_t *nbuf)
92 {
93 return nbuf->nb_mbuf0;
94 }
95
96 bool
nbuf_flag_p(const nbuf_t * nbuf,int flag)97 nbuf_flag_p(const nbuf_t *nbuf, int flag)
98 {
99 return (nbuf->nb_flags & flag) != 0;
100 }
101
102 void
nbuf_unset_flag(nbuf_t * nbuf,int flag)103 nbuf_unset_flag(nbuf_t *nbuf, int flag)
104 {
105 nbuf->nb_flags &= ~flag;
106 }
107
108 /*
109 * nbuf_advance: advance in nbuf or chain by specified amount of bytes and,
110 * if requested, ensure that the area *after* advance is contiguous.
111 *
112 * => Returns new pointer to data in nbuf or NULL if offset is invalid.
113 * => Current nbuf and the offset is stored in the nbuf metadata.
114 */
115 void *
nbuf_advance(nbuf_t * nbuf,size_t len,size_t ensure)116 nbuf_advance(nbuf_t *nbuf, size_t len, size_t ensure)
117 {
118 struct mbuf *m = nbuf->nb_mbuf;
119 u_int off, wmark;
120 uint8_t *d;
121
122 /* Offset with amount to advance. */
123 off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t) + len;
124 wmark = m->m_len;
125
126 /* Find the mbuf according to offset. */
127 while (__predict_false(wmark <= off)) {
128 m = m->m_next;
129 if (__predict_false(m == NULL)) {
130 /*
131 * If end of the chain, then the offset is
132 * higher than packet length.
133 */
134 return NULL;
135 }
136 wmark += m->m_len;
137 }
138 KASSERT(off < m_length(nbuf->nb_mbuf0));
139
140 /* Offset in mbuf data. */
141 d = mtod(m, uint8_t *);
142 KASSERT(off >= (wmark - m->m_len));
143 d += (off - (wmark - m->m_len));
144
145 nbuf->nb_mbuf = m;
146 nbuf->nb_nptr = d;
147
148 if (ensure) {
149 /* Ensure contiguousness (may change nbuf chain). */
150 d = nbuf_ensure_contig(nbuf, ensure);
151 }
152 return d;
153 }
154
155 /*
156 * nbuf_ensure_contig: check whether the specified length from the current
157 * point in the nbuf is contiguous. If not, rearrange the chain to be so.
158 *
159 * => Returns pointer to the data at the current offset in the buffer.
160 * => Returns NULL on failure and nbuf becomes invalid.
161 */
162 void *
nbuf_ensure_contig(nbuf_t * nbuf,size_t len)163 nbuf_ensure_contig(nbuf_t *nbuf, size_t len)
164 {
165 const struct mbuf * const n = nbuf->nb_mbuf;
166 const size_t off = (uintptr_t)nbuf->nb_nptr - mtod(n, uintptr_t);
167
168 KASSERT(off <= n->m_len);
169
170 if (__predict_false(n->m_len < (off + len))) {
171 struct mbuf *m = nbuf->nb_mbuf0;
172 const size_t foff = nbuf_offset(nbuf);
173 const size_t plen = m_length(m);
174 const size_t mlen = m->m_len;
175 size_t target;
176 bool success;
177
178 npf_stats_inc(NPF_STAT_NBUF_NONCONTIG);
179
180 /* Attempt to round-up to NBUF_ENSURE_ALIGN bytes. */
181 if ((target = NBUF_ENSURE_ROUNDUP(foff + len)) > plen) {
182 target = foff + len;
183 }
184
185 /* Rearrange the chain to be contiguous. */
186 KASSERT((m->m_flags & M_PKTHDR) != 0);
187 success = m_ensure_contig(&m, target);
188 KASSERT(m != NULL);
189
190 /* If no change in the chain: return what we have. */
191 if (m == nbuf->nb_mbuf0 && m->m_len == mlen) {
192 return success ? nbuf->nb_nptr : NULL;
193 }
194
195 /*
196 * The mbuf chain was re-arranged. Update the pointers
197 * accordingly and indicate that the references to the data
198 * might need a reset.
199 */
200 KASSERT((m->m_flags & M_PKTHDR) != 0);
201 nbuf->nb_mbuf0 = m;
202 nbuf->nb_mbuf = m;
203
204 KASSERT(foff < m->m_len && foff < m_length(m));
205 nbuf->nb_nptr = mtod(m, uint8_t *) + foff;
206 nbuf->nb_flags |= NBUF_DATAREF_RESET;
207
208 if (!success) {
209 npf_stats_inc(NPF_STAT_NBUF_CONTIG_FAIL);
210 return NULL;
211 }
212 }
213 return nbuf->nb_nptr;
214 }
215
216 void *
nbuf_ensure_writable(nbuf_t * nbuf,size_t len)217 nbuf_ensure_writable(nbuf_t *nbuf, size_t len)
218 {
219 struct mbuf *m = nbuf->nb_mbuf;
220 const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
221 const int tlen = off + len;
222 bool head_buf;
223
224 KASSERT(off < m_length(nbuf->nb_mbuf0));
225
226 if (!M_UNWRITABLE(m, tlen)) {
227 return nbuf->nb_nptr;
228 }
229 head_buf = (nbuf->nb_mbuf0 == m);
230 if (m_makewritable(&m, 0, tlen, M_NOWAIT)) {
231 memset(nbuf, 0, sizeof(nbuf_t));
232 return NULL;
233 }
234 if (head_buf) {
235 KASSERT((m->m_flags & M_PKTHDR) != 0);
236 KASSERT(off < m_length(m));
237 nbuf->nb_mbuf0 = m;
238 }
239 nbuf->nb_mbuf = m;
240 nbuf->nb_nptr = mtod(m, uint8_t *) + off;
241
242 return nbuf->nb_nptr;
243 }
244
245 bool
nbuf_cksum_barrier(nbuf_t * nbuf,int di)246 nbuf_cksum_barrier(nbuf_t *nbuf, int di)
247 {
248 struct mbuf *m;
249
250 if (di != PFIL_OUT) {
251 return false;
252 }
253 m = nbuf->nb_mbuf0;
254 KASSERT((m->m_flags & M_PKTHDR) != 0);
255
256 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
257 in_delayed_cksum(m);
258 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4 | M_CSUM_UDPv4);
259 return true;
260 }
261 #ifdef INET6
262 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
263 in6_delayed_cksum(m);
264 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv6 | M_CSUM_UDPv6);
265 return true;
266 }
267 #endif
268 return false;
269 }
270
271 /*
272 * nbuf_add_tag: add a tag to specified network buffer.
273 *
274 * => Returns 0 on success or errno on failure.
275 */
276 int
nbuf_add_tag(nbuf_t * nbuf,uint32_t key,uint32_t val)277 nbuf_add_tag(nbuf_t *nbuf, uint32_t key, uint32_t val)
278 {
279 struct mbuf *m = nbuf->nb_mbuf0;
280 struct m_tag *mt;
281 uint32_t *dat;
282
283 KASSERT((m->m_flags & M_PKTHDR) != 0);
284
285 mt = m_tag_get(PACKET_TAG_NPF, sizeof(uint32_t), M_NOWAIT);
286 if (mt == NULL) {
287 return ENOMEM;
288 }
289 dat = (uint32_t *)(mt + 1);
290 *dat = val;
291 m_tag_prepend(m, mt);
292 return 0;
293 }
294
295 /*
296 * nbuf_find_tag: find a tag in specified network buffer.
297 *
298 * => Returns 0 on success or errno on failure.
299 */
300 int
nbuf_find_tag(nbuf_t * nbuf,uint32_t key,void ** data)301 nbuf_find_tag(nbuf_t *nbuf, uint32_t key, void **data)
302 {
303 struct mbuf *m = nbuf->nb_mbuf0;
304 struct m_tag *mt;
305
306 KASSERT((m->m_flags & M_PKTHDR) != 0);
307
308 mt = m_tag_find(m, PACKET_TAG_NPF, NULL);
309 if (mt == NULL) {
310 return EINVAL;
311 }
312 *data = (void *)(mt + 1);
313 return 0;
314 }
315