1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #include "opt_param.h"
34 #include "opt_mbuf_stress_test.h"
35 #include "opt_mbuf_profiling.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/sysctl.h>
45 #include <sys/domain.h>
46 #include <sys/protosw.h>
47 #include <sys/uio.h>
48 #include <sys/vmmeter.h>
49 #include <sys/sbuf.h>
50 #include <sys/sdt.h>
51 #include <vm/vm.h>
52 #include <vm/vm_pageout.h>
53 #include <vm/vm_page.h>
54
55 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init,
56 "struct mbuf *", "mbufinfo_t *",
57 "uint32_t", "uint32_t",
58 "uint16_t", "uint16_t",
59 "uint32_t", "uint32_t",
60 "uint32_t", "uint32_t");
61
62 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr_raw,
63 "uint32_t", "uint32_t",
64 "uint16_t", "uint16_t",
65 "struct mbuf *", "mbufinfo_t *");
66
67 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr,
68 "uint32_t", "uint32_t",
69 "uint16_t", "uint16_t",
70 "struct mbuf *", "mbufinfo_t *");
71
72 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get_raw,
73 "uint32_t", "uint32_t",
74 "uint16_t", "uint16_t",
75 "struct mbuf *", "mbufinfo_t *");
76
77 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get,
78 "uint32_t", "uint32_t",
79 "uint16_t", "uint16_t",
80 "struct mbuf *", "mbufinfo_t *");
81
82 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl,
83 "uint32_t", "uint32_t",
84 "uint16_t", "uint16_t",
85 "uint32_t", "uint32_t",
86 "struct mbuf *", "mbufinfo_t *");
87
88 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__getjcl,
89 "uint32_t", "uint32_t",
90 "uint16_t", "uint16_t",
91 "uint32_t", "uint32_t",
92 "uint32_t", "uint32_t",
93 "struct mbuf *", "mbufinfo_t *");
94
95 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget,
96 "struct mbuf *", "mbufinfo_t *",
97 "uint32_t", "uint32_t",
98 "uint32_t", "uint32_t");
99
100 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget,
101 "struct mbuf *", "mbufinfo_t *",
102 "uint32_t", "uint32_t",
103 "uint32_t", "uint32_t",
104 "void*", "void*");
105
106 SDT_PROBE_DEFINE(sdt, , , m__cljset);
107
108 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free,
109 "struct mbuf *", "mbufinfo_t *");
110
111 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem,
112 "struct mbuf *", "mbufinfo_t *");
113
114 #include <security/mac/mac_framework.h>
115
116 /*
117 * Provide minimum possible defaults for link and protocol header space,
118 * assuming IPv4 over Ethernet. Enabling IPv6, IEEE802.11 or some other
119 * protocol may grow these values.
120 */
121 u_int max_linkhdr = 16;
122 u_int max_protohdr = 40;
123 u_int max_hdr = 16 + 40;
124 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
125 &max_linkhdr, 16, "Size of largest link layer header");
126 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
127 &max_protohdr, 40, "Size of largest protocol layer header");
128 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
129 &max_hdr, 16 + 40, "Size of largest link plus protocol header");
130
131 static void
max_hdr_grow(void)132 max_hdr_grow(void)
133 {
134
135 max_hdr = max_linkhdr + max_protohdr;
136 MPASS(max_hdr <= MHLEN);
137 }
138
139 void
max_linkhdr_grow(u_int new)140 max_linkhdr_grow(u_int new)
141 {
142
143 if (new > max_linkhdr) {
144 max_linkhdr = new;
145 max_hdr_grow();
146 }
147 }
148
149 void
max_protohdr_grow(u_int new)150 max_protohdr_grow(u_int new)
151 {
152
153 if (new > max_protohdr) {
154 max_protohdr = new;
155 max_hdr_grow();
156 }
157 }
158
159 #ifdef MBUF_STRESS_TEST
160 int m_defragpackets;
161 int m_defragbytes;
162 int m_defraguseless;
163 int m_defragfailure;
164 int m_defragrandomfailures;
165
166 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
167 &m_defragpackets, 0, "");
168 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
169 &m_defragbytes, 0, "");
170 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
171 &m_defraguseless, 0, "");
172 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
173 &m_defragfailure, 0, "");
174 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
175 &m_defragrandomfailures, 0, "");
176 #endif
177
178 /*
179 * Ensure the correct size of various mbuf parameters. It could be off due
180 * to compiler-induced padding and alignment artifacts.
181 */
182 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
183 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
184
185 /*
186 * mbuf data storage should be 64-bit aligned regardless of architectural
187 * pointer size; check this is the case with and without a packet header.
188 */
189 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
190 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
191
192 /*
193 * While the specific values here don't matter too much (i.e., +/- a few
194 * words), we do want to ensure that changes to these values are carefully
195 * reasoned about and properly documented. This is especially the case as
196 * network-protocol and device-driver modules encode these layouts, and must
197 * be recompiled if the structures change. Check these values at compile time
198 * against the ones documented in comments in mbuf.h.
199 *
200 * NB: Possibly they should be documented there via #define's and not just
201 * comments.
202 */
203 #if defined(__LP64__)
204 CTASSERT(offsetof(struct mbuf, m_dat) == 32);
205 CTASSERT(sizeof(struct pkthdr) == 64);
206 CTASSERT(sizeof(struct m_ext) == 160);
207 #else
208 CTASSERT(offsetof(struct mbuf, m_dat) == 24);
209 CTASSERT(sizeof(struct pkthdr) == 56);
210 #if defined(__powerpc__) && defined(BOOKE)
211 /* PowerPC booke has 64-bit physical pointers. */
212 CTASSERT(sizeof(struct m_ext) == 176);
213 #else
214 CTASSERT(sizeof(struct m_ext) == 172);
215 #endif
216 #endif
217
218 /*
219 * Assert that the queue(3) macros produce code of the same size as an old
220 * plain pointer does.
221 */
222 #ifdef INVARIANTS
223 static struct mbuf __used m_assertbuf;
224 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
225 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
226 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
227 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt));
228 #endif
229
230 /*
231 * Attach the cluster from *m to *n, set up m_ext in *n
232 * and bump the refcount of the cluster.
233 */
234 void
mb_dupcl(struct mbuf * n,struct mbuf * m)235 mb_dupcl(struct mbuf *n, struct mbuf *m)
236 {
237 volatile u_int *refcnt;
238
239 KASSERT(m->m_flags & (M_EXT | M_EXTPG),
240 ("%s: M_EXT | M_EXTPG not set on %p", __func__, m));
241 KASSERT(!(n->m_flags & (M_EXT | M_EXTPG)),
242 ("%s: M_EXT | M_EXTPG set on %p", __func__, n));
243
244 /*
245 * Cache access optimization.
246 *
247 * o Regular M_EXT storage doesn't need full copy of m_ext, since
248 * the holder of the 'ext_count' is responsible to carry the free
249 * routine and its arguments.
250 * o M_EXTPG data is split between main part of mbuf and m_ext, the
251 * main part is copied in full, the m_ext part is similar to M_EXT.
252 * o EXT_EXTREF, where 'ext_cnt' doesn't point into mbuf at all, is
253 * special - it needs full copy of m_ext into each mbuf, since any
254 * copy could end up as the last to free.
255 */
256 if (m->m_flags & M_EXTPG) {
257 bcopy(&m->m_epg_startcopy, &n->m_epg_startcopy,
258 __rangeof(struct mbuf, m_epg_startcopy, m_epg_endcopy));
259 bcopy(&m->m_ext, &n->m_ext, m_epg_ext_copylen);
260 } else if (m->m_ext.ext_type == EXT_EXTREF)
261 bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext));
262 else
263 bcopy(&m->m_ext, &n->m_ext, m_ext_copylen);
264
265 n->m_flags |= m->m_flags & (M_RDONLY | M_EXT | M_EXTPG);
266
267 /* See if this is the mbuf that holds the embedded refcount. */
268 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
269 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count;
270 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF;
271 } else {
272 KASSERT(m->m_ext.ext_cnt != NULL,
273 ("%s: no refcounting pointer on %p", __func__, m));
274 refcnt = m->m_ext.ext_cnt;
275 }
276
277 if (*refcnt == 1)
278 *refcnt += 1;
279 else
280 atomic_add_int(refcnt, 1);
281 }
282
283 void
m_demote_pkthdr(struct mbuf * m)284 m_demote_pkthdr(struct mbuf *m)
285 {
286
287 M_ASSERTPKTHDR(m);
288 M_ASSERT_NO_SND_TAG(m);
289
290 m_tag_delete_chain(m, NULL);
291 m->m_flags &= ~M_PKTHDR;
292 bzero(&m->m_pkthdr, sizeof(struct pkthdr));
293 }
294
295 /*
296 * Clean up mbuf (chain) from any tags and packet headers.
297 * If "all" is set then the first mbuf in the chain will be
298 * cleaned too.
299 */
300 void
m_demote(struct mbuf * m0,int all,int flags)301 m_demote(struct mbuf *m0, int all, int flags)
302 {
303 struct mbuf *m;
304
305 flags |= M_DEMOTEFLAGS;
306
307 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
308 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
309 __func__, m, m0));
310 if (m->m_flags & M_PKTHDR)
311 m_demote_pkthdr(m);
312 m->m_flags &= flags;
313 }
314 }
315
316 /*
317 * Sanity checks on mbuf (chain) for use in KASSERT() and general
318 * debugging.
319 * Returns 0 or panics when bad and 1 on all tests passed.
320 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
321 * blow up later.
322 */
323 int
m_sanity(struct mbuf * m0,int sanitize)324 m_sanity(struct mbuf *m0, int sanitize)
325 {
326 struct mbuf *m;
327 caddr_t a, b;
328 int pktlen = 0;
329
330 #ifdef INVARIANTS
331 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
332 #else
333 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
334 #endif
335
336 for (m = m0; m != NULL; m = m->m_next) {
337 /*
338 * Basic pointer checks. If any of these fails then some
339 * unrelated kernel memory before or after us is trashed.
340 * No way to recover from that.
341 */
342 a = M_START(m);
343 b = a + M_SIZE(m);
344 if ((caddr_t)m->m_data < a)
345 M_SANITY_ACTION("m_data outside mbuf data range left");
346 if ((caddr_t)m->m_data > b)
347 M_SANITY_ACTION("m_data outside mbuf data range right");
348 if ((caddr_t)m->m_data + m->m_len > b)
349 M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
350
351 /* m->m_nextpkt may only be set on first mbuf in chain. */
352 if (m != m0 && m->m_nextpkt != NULL) {
353 if (sanitize) {
354 m_freem(m->m_nextpkt);
355 m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
356 } else
357 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
358 }
359
360 /* packet length (not mbuf length!) calculation */
361 if (m0->m_flags & M_PKTHDR)
362 pktlen += m->m_len;
363
364 /* m_tags may only be attached to first mbuf in chain. */
365 if (m != m0 && m->m_flags & M_PKTHDR &&
366 !SLIST_EMPTY(&m->m_pkthdr.tags)) {
367 if (sanitize) {
368 m_tag_delete_chain(m, NULL);
369 /* put in 0xDEADC0DE perhaps? */
370 } else
371 M_SANITY_ACTION("m_tags on in-chain mbuf");
372 }
373
374 /* M_PKTHDR may only be set on first mbuf in chain */
375 if (m != m0 && m->m_flags & M_PKTHDR) {
376 if (sanitize) {
377 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
378 m->m_flags &= ~M_PKTHDR;
379 /* put in 0xDEADCODE and leave hdr flag in */
380 } else
381 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
382 }
383 }
384 m = m0;
385 if (pktlen && pktlen != m->m_pkthdr.len) {
386 if (sanitize)
387 m->m_pkthdr.len = 0;
388 else
389 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
390 }
391 return 1;
392
393 #undef M_SANITY_ACTION
394 }
395
396 /*
397 * Non-inlined part of m_init().
398 */
399 int
m_pkthdr_init(struct mbuf * m,int how)400 m_pkthdr_init(struct mbuf *m, int how)
401 {
402 #ifdef MAC
403 int error;
404 #endif
405 m->m_data = m->m_pktdat;
406 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
407 #ifdef NUMA
408 m->m_pkthdr.numa_domain = M_NODOM;
409 #endif
410 #ifdef MAC
411 /* If the label init fails, fail the alloc */
412 error = mac_mbuf_init(m, how);
413 if (error)
414 return (error);
415 #endif
416
417 return (0);
418 }
419
420 /*
421 * "Move" mbuf pkthdr from "from" to "to".
422 * "from" must have M_PKTHDR set, and "to" must be empty.
423 */
424 void
m_move_pkthdr(struct mbuf * to,struct mbuf * from)425 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
426 {
427
428 #if 0
429 /* see below for why these are not enabled */
430 M_ASSERTPKTHDR(to);
431 /* Note: with MAC, this may not be a good assertion. */
432 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
433 ("m_move_pkthdr: to has tags"));
434 #endif
435 #ifdef MAC
436 /*
437 * XXXMAC: It could be this should also occur for non-MAC?
438 */
439 if (to->m_flags & M_PKTHDR)
440 m_tag_delete_chain(to, NULL);
441 #endif
442 to->m_flags = (from->m_flags & M_COPYFLAGS) |
443 (to->m_flags & (M_EXT | M_EXTPG));
444 if ((to->m_flags & M_EXT) == 0)
445 to->m_data = to->m_pktdat;
446 to->m_pkthdr = from->m_pkthdr; /* especially tags */
447 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
448 from->m_flags &= ~M_PKTHDR;
449 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) {
450 from->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
451 from->m_pkthdr.snd_tag = NULL;
452 }
453 }
454
455 /*
456 * Duplicate "from"'s mbuf pkthdr in "to".
457 * "from" must have M_PKTHDR set, and "to" must be empty.
458 * In particular, this does a deep copy of the packet tags.
459 */
460 int
m_dup_pkthdr(struct mbuf * to,const struct mbuf * from,int how)461 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
462 {
463
464 #if 0
465 /*
466 * The mbuf allocator only initializes the pkthdr
467 * when the mbuf is allocated with m_gethdr(). Many users
468 * (e.g. m_copy*, m_prepend) use m_get() and then
469 * smash the pkthdr as needed causing these
470 * assertions to trip. For now just disable them.
471 */
472 M_ASSERTPKTHDR(to);
473 /* Note: with MAC, this may not be a good assertion. */
474 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
475 #endif
476 MBUF_CHECKSLEEP(how);
477 #ifdef MAC
478 if (to->m_flags & M_PKTHDR)
479 m_tag_delete_chain(to, NULL);
480 #endif
481 to->m_flags = (from->m_flags & M_COPYFLAGS) |
482 (to->m_flags & (M_EXT | M_EXTPG));
483 if ((to->m_flags & M_EXT) == 0)
484 to->m_data = to->m_pktdat;
485 to->m_pkthdr = from->m_pkthdr;
486 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG)
487 m_snd_tag_ref(from->m_pkthdr.snd_tag);
488 SLIST_INIT(&to->m_pkthdr.tags);
489 return (m_tag_copy_chain(to, from, how));
490 }
491
492 /*
493 * Lesser-used path for M_PREPEND:
494 * allocate new mbuf to prepend to chain,
495 * copy junk along.
496 */
497 struct mbuf *
m_prepend(struct mbuf * m,int len,int how)498 m_prepend(struct mbuf *m, int len, int how)
499 {
500 struct mbuf *mn;
501
502 if (m->m_flags & M_PKTHDR)
503 mn = m_gethdr(how, m->m_type);
504 else
505 mn = m_get(how, m->m_type);
506 if (mn == NULL) {
507 m_freem(m);
508 return (NULL);
509 }
510 if (m->m_flags & M_PKTHDR)
511 m_move_pkthdr(mn, m);
512 mn->m_next = m;
513 m = mn;
514 if (len < M_SIZE(m))
515 M_ALIGN(m, len);
516 m->m_len = len;
517 return (m);
518 }
519
520 /*
521 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
522 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
523 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
524 * Note that the copy is read-only, because clusters are not copied,
525 * only their reference counts are incremented.
526 */
527 struct mbuf *
m_copym(struct mbuf * m,int off0,int len,int wait)528 m_copym(struct mbuf *m, int off0, int len, int wait)
529 {
530 struct mbuf *n, **np;
531 int off = off0;
532 struct mbuf *top;
533 int copyhdr = 0;
534
535 KASSERT(off >= 0, ("m_copym, negative off %d", off));
536 KASSERT(len >= 0, ("m_copym, negative len %d", len));
537 MBUF_CHECKSLEEP(wait);
538 if (off == 0 && m->m_flags & M_PKTHDR)
539 copyhdr = 1;
540 while (off > 0) {
541 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
542 if (off < m->m_len)
543 break;
544 off -= m->m_len;
545 m = m->m_next;
546 }
547 np = ⊤
548 top = NULL;
549 while (len > 0) {
550 if (m == NULL) {
551 KASSERT(len == M_COPYALL,
552 ("m_copym, length > size of mbuf chain"));
553 break;
554 }
555 if (copyhdr)
556 n = m_gethdr(wait, m->m_type);
557 else
558 n = m_get(wait, m->m_type);
559 *np = n;
560 if (n == NULL)
561 goto nospace;
562 if (copyhdr) {
563 if (!m_dup_pkthdr(n, m, wait))
564 goto nospace;
565 if (len == M_COPYALL)
566 n->m_pkthdr.len -= off0;
567 else
568 n->m_pkthdr.len = len;
569 copyhdr = 0;
570 }
571 n->m_len = min(len, m->m_len - off);
572 if (m->m_flags & (M_EXT | M_EXTPG)) {
573 n->m_data = m->m_data + off;
574 mb_dupcl(n, m);
575 } else
576 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
577 (u_int)n->m_len);
578 if (len != M_COPYALL)
579 len -= n->m_len;
580 off = 0;
581 m = m->m_next;
582 np = &n->m_next;
583 }
584
585 return (top);
586 nospace:
587 m_freem(top);
588 return (NULL);
589 }
590
591 /*
592 * Copy an entire packet, including header (which must be present).
593 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
594 * Note that the copy is read-only, because clusters are not copied,
595 * only their reference counts are incremented.
596 * Preserve alignment of the first mbuf so if the creator has left
597 * some room at the beginning (e.g. for inserting protocol headers)
598 * the copies still have the room available.
599 */
600 struct mbuf *
m_copypacket(struct mbuf * m,int how)601 m_copypacket(struct mbuf *m, int how)
602 {
603 struct mbuf *top, *n, *o;
604
605 MBUF_CHECKSLEEP(how);
606 n = m_get(how, m->m_type);
607 top = n;
608 if (n == NULL)
609 goto nospace;
610
611 if (!m_dup_pkthdr(n, m, how))
612 goto nospace;
613 n->m_len = m->m_len;
614 if (m->m_flags & (M_EXT | M_EXTPG)) {
615 n->m_data = m->m_data;
616 mb_dupcl(n, m);
617 } else {
618 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
619 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
620 }
621
622 m = m->m_next;
623 while (m) {
624 o = m_get(how, m->m_type);
625 if (o == NULL)
626 goto nospace;
627
628 n->m_next = o;
629 n = n->m_next;
630
631 n->m_len = m->m_len;
632 if (m->m_flags & (M_EXT | M_EXTPG)) {
633 n->m_data = m->m_data;
634 mb_dupcl(n, m);
635 } else {
636 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
637 }
638
639 m = m->m_next;
640 }
641 return top;
642 nospace:
643 m_freem(top);
644 return (NULL);
645 }
646
647 static void
m_copyfromunmapped(const struct mbuf * m,int off,int len,caddr_t cp)648 m_copyfromunmapped(const struct mbuf *m, int off, int len, caddr_t cp)
649 {
650 struct iovec iov;
651 struct uio uio;
652 int error __diagused;
653
654 KASSERT(off >= 0, ("m_copyfromunmapped: negative off %d", off));
655 KASSERT(len >= 0, ("m_copyfromunmapped: negative len %d", len));
656 KASSERT(off < m->m_len,
657 ("m_copyfromunmapped: len exceeds mbuf length"));
658 iov.iov_base = cp;
659 iov.iov_len = len;
660 uio.uio_resid = len;
661 uio.uio_iov = &iov;
662 uio.uio_segflg = UIO_SYSSPACE;
663 uio.uio_iovcnt = 1;
664 uio.uio_offset = 0;
665 uio.uio_rw = UIO_READ;
666 error = m_unmapped_uiomove(m, off, &uio, len);
667 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off,
668 len));
669 }
670
671 /*
672 * Copy data from an mbuf chain starting "off" bytes from the beginning,
673 * continuing for "len" bytes, into the indicated buffer.
674 */
675 void
m_copydata(const struct mbuf * m,int off,int len,caddr_t cp)676 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
677 {
678 u_int count;
679
680 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
681 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
682 while (off > 0) {
683 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
684 if (off < m->m_len)
685 break;
686 off -= m->m_len;
687 m = m->m_next;
688 }
689 while (len > 0) {
690 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
691 count = min(m->m_len - off, len);
692 if ((m->m_flags & M_EXTPG) != 0)
693 m_copyfromunmapped(m, off, count, cp);
694 else
695 bcopy(mtod(m, caddr_t) + off, cp, count);
696 len -= count;
697 cp += count;
698 off = 0;
699 m = m->m_next;
700 }
701 }
702
703 /*
704 * Copy a packet header mbuf chain into a completely new chain, including
705 * copying any mbuf clusters. Use this instead of m_copypacket() when
706 * you need a writable copy of an mbuf chain.
707 */
708 struct mbuf *
m_dup(const struct mbuf * m,int how)709 m_dup(const struct mbuf *m, int how)
710 {
711 struct mbuf **p, *top = NULL;
712 int remain, moff, nsize;
713
714 MBUF_CHECKSLEEP(how);
715 /* Sanity check */
716 if (m == NULL)
717 return (NULL);
718 M_ASSERTPKTHDR(m);
719
720 /* While there's more data, get a new mbuf, tack it on, and fill it */
721 remain = m->m_pkthdr.len;
722 moff = 0;
723 p = ⊤
724 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
725 struct mbuf *n;
726
727 /* Get the next new mbuf */
728 if (remain >= MINCLSIZE) {
729 n = m_getcl(how, m->m_type, 0);
730 nsize = MCLBYTES;
731 } else {
732 n = m_get(how, m->m_type);
733 nsize = MLEN;
734 }
735 if (n == NULL)
736 goto nospace;
737
738 if (top == NULL) { /* First one, must be PKTHDR */
739 if (!m_dup_pkthdr(n, m, how)) {
740 m_free(n);
741 goto nospace;
742 }
743 if ((n->m_flags & M_EXT) == 0)
744 nsize = MHLEN;
745 n->m_flags &= ~M_RDONLY;
746 }
747 n->m_len = 0;
748
749 /* Link it into the new chain */
750 *p = n;
751 p = &n->m_next;
752
753 /* Copy data from original mbuf(s) into new mbuf */
754 while (n->m_len < nsize && m != NULL) {
755 int chunk = min(nsize - n->m_len, m->m_len - moff);
756
757 m_copydata(m, moff, chunk, n->m_data + n->m_len);
758 moff += chunk;
759 n->m_len += chunk;
760 remain -= chunk;
761 if (moff == m->m_len) {
762 m = m->m_next;
763 moff = 0;
764 }
765 }
766
767 /* Check correct total mbuf length */
768 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
769 ("%s: bogus m_pkthdr.len", __func__));
770 }
771 return (top);
772
773 nospace:
774 m_freem(top);
775 return (NULL);
776 }
777
778 /*
779 * Concatenate mbuf chain n to m.
780 * Both chains must be of the same type (e.g. MT_DATA).
781 * Any m_pkthdr is not updated.
782 */
783 void
m_cat(struct mbuf * m,struct mbuf * n)784 m_cat(struct mbuf *m, struct mbuf *n)
785 {
786 while (m->m_next)
787 m = m->m_next;
788 while (n) {
789 if (!M_WRITABLE(m) ||
790 (n->m_flags & M_EXTPG) != 0 ||
791 M_TRAILINGSPACE(m) < n->m_len) {
792 /* just join the two chains */
793 m->m_next = n;
794 return;
795 }
796 /* splat the data from one into the other */
797 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
798 (u_int)n->m_len);
799 m->m_len += n->m_len;
800 n = m_free(n);
801 }
802 }
803
804 /*
805 * Concatenate two pkthdr mbuf chains.
806 */
807 void
m_catpkt(struct mbuf * m,struct mbuf * n)808 m_catpkt(struct mbuf *m, struct mbuf *n)
809 {
810
811 M_ASSERTPKTHDR(m);
812 M_ASSERTPKTHDR(n);
813
814 m->m_pkthdr.len += n->m_pkthdr.len;
815 m_demote(n, 1, 0);
816
817 m_cat(m, n);
818 }
819
820 void
m_adj(struct mbuf * mp,int req_len)821 m_adj(struct mbuf *mp, int req_len)
822 {
823 int len = req_len;
824 struct mbuf *m;
825 int count;
826
827 if ((m = mp) == NULL)
828 return;
829 if (len >= 0) {
830 /*
831 * Trim from head.
832 */
833 while (m != NULL && len > 0) {
834 if (m->m_len <= len) {
835 len -= m->m_len;
836 m->m_len = 0;
837 m = m->m_next;
838 } else {
839 m->m_len -= len;
840 m->m_data += len;
841 len = 0;
842 }
843 }
844 if (mp->m_flags & M_PKTHDR)
845 mp->m_pkthdr.len -= (req_len - len);
846 } else {
847 /*
848 * Trim from tail. Scan the mbuf chain,
849 * calculating its length and finding the last mbuf.
850 * If the adjustment only affects this mbuf, then just
851 * adjust and return. Otherwise, rescan and truncate
852 * after the remaining size.
853 */
854 len = -len;
855 count = 0;
856 for (;;) {
857 count += m->m_len;
858 if (m->m_next == (struct mbuf *)0)
859 break;
860 m = m->m_next;
861 }
862 if (m->m_len >= len) {
863 m->m_len -= len;
864 if (mp->m_flags & M_PKTHDR)
865 mp->m_pkthdr.len -= len;
866 return;
867 }
868 count -= len;
869 if (count < 0)
870 count = 0;
871 /*
872 * Correct length for chain is "count".
873 * Find the mbuf with last data, adjust its length,
874 * and toss data from remaining mbufs on chain.
875 */
876 m = mp;
877 if (m->m_flags & M_PKTHDR)
878 m->m_pkthdr.len = count;
879 for (; m; m = m->m_next) {
880 if (m->m_len >= count) {
881 m->m_len = count;
882 if (m->m_next != NULL) {
883 m_freem(m->m_next);
884 m->m_next = NULL;
885 }
886 break;
887 }
888 count -= m->m_len;
889 }
890 }
891 }
892
893 void
m_adj_decap(struct mbuf * mp,int len)894 m_adj_decap(struct mbuf *mp, int len)
895 {
896 uint8_t rsstype;
897
898 m_adj(mp, len);
899 if ((mp->m_flags & M_PKTHDR) != 0) {
900 /*
901 * If flowid was calculated by card from the inner
902 * headers, move flowid to the decapsulated mbuf
903 * chain, otherwise clear. This depends on the
904 * internals of m_adj, which keeps pkthdr as is, in
905 * particular not changing rsstype and flowid.
906 */
907 rsstype = mp->m_pkthdr.rsstype;
908 if ((rsstype & M_HASHTYPE_INNER) != 0) {
909 M_HASHTYPE_SET(mp, rsstype & ~M_HASHTYPE_INNER);
910 } else {
911 M_HASHTYPE_CLEAR(mp);
912 }
913 }
914 }
915
916 /*
917 * Rearange an mbuf chain so that len bytes are contiguous
918 * and in the data area of an mbuf (so that mtod will work
919 * for a structure of size len). Returns the resulting
920 * mbuf chain on success, frees it and returns null on failure.
921 * If there is room, it will add up to max_protohdr-len extra bytes to the
922 * contiguous region in an attempt to avoid being called next time.
923 */
924 struct mbuf *
m_pullup(struct mbuf * n,int len)925 m_pullup(struct mbuf *n, int len)
926 {
927 struct mbuf *m;
928 int count;
929 int space;
930
931 KASSERT((n->m_flags & M_EXTPG) == 0,
932 ("%s: unmapped mbuf %p", __func__, n));
933
934 /*
935 * If first mbuf has no cluster, and has room for len bytes
936 * without shifting current data, pullup into it,
937 * otherwise allocate a new mbuf to prepend to the chain.
938 */
939 if ((n->m_flags & M_EXT) == 0 &&
940 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
941 if (n->m_len >= len)
942 return (n);
943 m = n;
944 n = n->m_next;
945 len -= m->m_len;
946 } else {
947 if (len > MHLEN)
948 goto bad;
949 m = m_get(M_NOWAIT, n->m_type);
950 if (m == NULL)
951 goto bad;
952 if (n->m_flags & M_PKTHDR)
953 m_move_pkthdr(m, n);
954 }
955 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
956 do {
957 count = min(min(max(len, max_protohdr), space), n->m_len);
958 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
959 (u_int)count);
960 len -= count;
961 m->m_len += count;
962 n->m_len -= count;
963 space -= count;
964 if (n->m_len)
965 n->m_data += count;
966 else
967 n = m_free(n);
968 } while (len > 0 && n);
969 if (len > 0) {
970 (void) m_free(m);
971 goto bad;
972 }
973 m->m_next = n;
974 return (m);
975 bad:
976 m_freem(n);
977 return (NULL);
978 }
979
980 /*
981 * Like m_pullup(), except a new mbuf is always allocated, and we allow
982 * the amount of empty space before the data in the new mbuf to be specified
983 * (in the event that the caller expects to prepend later).
984 */
985 struct mbuf *
m_copyup(struct mbuf * n,int len,int dstoff)986 m_copyup(struct mbuf *n, int len, int dstoff)
987 {
988 struct mbuf *m;
989 int count, space;
990
991 if (len > (MHLEN - dstoff))
992 goto bad;
993 m = m_get(M_NOWAIT, n->m_type);
994 if (m == NULL)
995 goto bad;
996 if (n->m_flags & M_PKTHDR)
997 m_move_pkthdr(m, n);
998 m->m_data += dstoff;
999 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1000 do {
1001 count = min(min(max(len, max_protohdr), space), n->m_len);
1002 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1003 (unsigned)count);
1004 len -= count;
1005 m->m_len += count;
1006 n->m_len -= count;
1007 space -= count;
1008 if (n->m_len)
1009 n->m_data += count;
1010 else
1011 n = m_free(n);
1012 } while (len > 0 && n);
1013 if (len > 0) {
1014 (void) m_free(m);
1015 goto bad;
1016 }
1017 m->m_next = n;
1018 return (m);
1019 bad:
1020 m_freem(n);
1021 return (NULL);
1022 }
1023
1024 /*
1025 * Partition an mbuf chain in two pieces, returning the tail --
1026 * all but the first len0 bytes. In case of failure, it returns NULL and
1027 * attempts to restore the chain to its original state.
1028 *
1029 * Note that the resulting mbufs might be read-only, because the new
1030 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1031 * the "breaking point" happens to lie within a cluster mbuf. Use the
1032 * M_WRITABLE() macro to check for this case.
1033 */
1034 struct mbuf *
m_split(struct mbuf * m0,int len0,int wait)1035 m_split(struct mbuf *m0, int len0, int wait)
1036 {
1037 struct mbuf *m, *n;
1038 u_int len = len0, remain;
1039
1040 MBUF_CHECKSLEEP(wait);
1041 for (m = m0; m && len > m->m_len; m = m->m_next)
1042 len -= m->m_len;
1043 if (m == NULL)
1044 return (NULL);
1045 remain = m->m_len - len;
1046 if (m0->m_flags & M_PKTHDR && remain == 0) {
1047 n = m_gethdr(wait, m0->m_type);
1048 if (n == NULL)
1049 return (NULL);
1050 n->m_next = m->m_next;
1051 m->m_next = NULL;
1052 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1053 n->m_pkthdr.snd_tag =
1054 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
1055 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
1056 } else
1057 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1058 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1059 m0->m_pkthdr.len = len0;
1060 return (n);
1061 } else if (m0->m_flags & M_PKTHDR) {
1062 n = m_gethdr(wait, m0->m_type);
1063 if (n == NULL)
1064 return (NULL);
1065 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1066 n->m_pkthdr.snd_tag =
1067 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
1068 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
1069 } else
1070 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1071 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1072 m0->m_pkthdr.len = len0;
1073 if (m->m_flags & (M_EXT | M_EXTPG))
1074 goto extpacket;
1075 if (remain > MHLEN) {
1076 /* m can't be the lead packet */
1077 M_ALIGN(n, 0);
1078 n->m_next = m_split(m, len, wait);
1079 if (n->m_next == NULL) {
1080 (void) m_free(n);
1081 return (NULL);
1082 } else {
1083 n->m_len = 0;
1084 return (n);
1085 }
1086 } else
1087 M_ALIGN(n, remain);
1088 } else if (remain == 0) {
1089 n = m->m_next;
1090 m->m_next = NULL;
1091 return (n);
1092 } else {
1093 n = m_get(wait, m->m_type);
1094 if (n == NULL)
1095 return (NULL);
1096 M_ALIGN(n, remain);
1097 }
1098 extpacket:
1099 if (m->m_flags & (M_EXT | M_EXTPG)) {
1100 n->m_data = m->m_data + len;
1101 mb_dupcl(n, m);
1102 } else {
1103 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1104 }
1105 n->m_len = remain;
1106 m->m_len = len;
1107 n->m_next = m->m_next;
1108 m->m_next = NULL;
1109 return (n);
1110 }
1111
1112 /*
1113 * Partition mchain in two pieces, keeping len0 bytes in head and transferring
1114 * remainder to tail. In case of failure, both chains to be left untouched.
1115 * M_EOR is observed correctly.
1116 * Resulting mbufs might be read-only.
1117 */
1118 int
mc_split(struct mchain * head,struct mchain * tail,u_int len0,int wait)1119 mc_split(struct mchain *head, struct mchain *tail, u_int len0, int wait)
1120 {
1121 struct mbuf *m, *n;
1122 u_int len, mlen, remain;
1123
1124 MPASS(!(mc_first(head)->m_flags & M_PKTHDR));
1125 MBUF_CHECKSLEEP(wait);
1126
1127 mlen = 0;
1128 len = len0;
1129 STAILQ_FOREACH(m, &head->mc_q, m_stailq) {
1130 mlen += MSIZE;
1131 if (m->m_flags & M_EXT)
1132 mlen += m->m_ext.ext_size;
1133 if (len > m->m_len)
1134 len -= m->m_len;
1135 else
1136 break;
1137 }
1138 if (__predict_false(m == NULL)) {
1139 *tail = MCHAIN_INITIALIZER(tail);
1140 return (0);
1141 }
1142 remain = m->m_len - len;
1143 if (remain > 0) {
1144 if (__predict_false((n = m_get(wait, m->m_type)) == NULL))
1145 return (ENOMEM);
1146 m_align(n, remain);
1147 if (m->m_flags & M_EXT) {
1148 n->m_data = m->m_data + len;
1149 mb_dupcl(n, m);
1150 } else
1151 bcopy(mtod(m, char *) + len, mtod(n, char *), remain);
1152 }
1153
1154 /* XXXGL: need STAILQ_SPLIT */
1155 STAILQ_FIRST(&tail->mc_q) = STAILQ_NEXT(m, m_stailq);
1156 tail->mc_q.stqh_last = head->mc_q.stqh_last;
1157 tail->mc_len = head->mc_len - len0;
1158 tail->mc_mlen = head->mc_mlen - mlen;
1159 if (remain > 0) {
1160 MPASS(n->m_len == 0);
1161 mc_prepend(tail, n);
1162 n->m_len = remain;
1163 m->m_len -= remain;
1164 if (m->m_flags & M_EOR) {
1165 m->m_flags &= ~M_EOR;
1166 n->m_flags |= M_EOR;
1167 }
1168 }
1169 head->mc_q.stqh_last = &STAILQ_NEXT(m, m_stailq);
1170 STAILQ_NEXT(m, m_stailq) = NULL;
1171 head->mc_len = len0;
1172 head->mc_mlen = mlen;
1173
1174 return (0);
1175 }
1176
1177 /*
1178 * Routine to copy from device local memory into mbufs.
1179 * Note that `off' argument is offset into first mbuf of target chain from
1180 * which to begin copying the data to.
1181 */
1182 struct mbuf *
m_devget(char * buf,int totlen,int off,struct ifnet * ifp,void (* copy)(char * from,caddr_t to,u_int len))1183 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1184 void (*copy)(char *from, caddr_t to, u_int len))
1185 {
1186 struct mbuf *m;
1187 struct mbuf *top = NULL, **mp = ⊤
1188 int len;
1189
1190 if (off < 0 || off > MHLEN)
1191 return (NULL);
1192
1193 while (totlen > 0) {
1194 if (top == NULL) { /* First one, must be PKTHDR */
1195 if (totlen + off >= MINCLSIZE) {
1196 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1197 len = MCLBYTES;
1198 } else {
1199 m = m_gethdr(M_NOWAIT, MT_DATA);
1200 len = MHLEN;
1201
1202 /* Place initial small packet/header at end of mbuf */
1203 if (m && totlen + off + max_linkhdr <= MHLEN) {
1204 m->m_data += max_linkhdr;
1205 len -= max_linkhdr;
1206 }
1207 }
1208 if (m == NULL)
1209 return NULL;
1210 m->m_pkthdr.rcvif = ifp;
1211 m->m_pkthdr.len = totlen;
1212 } else {
1213 if (totlen + off >= MINCLSIZE) {
1214 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1215 len = MCLBYTES;
1216 } else {
1217 m = m_get(M_NOWAIT, MT_DATA);
1218 len = MLEN;
1219 }
1220 if (m == NULL) {
1221 m_freem(top);
1222 return NULL;
1223 }
1224 }
1225 if (off) {
1226 m->m_data += off;
1227 len -= off;
1228 off = 0;
1229 }
1230 m->m_len = len = min(totlen, len);
1231 if (copy)
1232 copy(buf, mtod(m, caddr_t), (u_int)len);
1233 else
1234 bcopy(buf, mtod(m, caddr_t), (u_int)len);
1235 buf += len;
1236 *mp = m;
1237 mp = &m->m_next;
1238 totlen -= len;
1239 }
1240 return (top);
1241 }
1242
1243 static void
m_copytounmapped(const struct mbuf * m,int off,int len,c_caddr_t cp)1244 m_copytounmapped(const struct mbuf *m, int off, int len, c_caddr_t cp)
1245 {
1246 struct iovec iov;
1247 struct uio uio;
1248 int error __diagused;
1249
1250 KASSERT(off >= 0, ("m_copytounmapped: negative off %d", off));
1251 KASSERT(len >= 0, ("m_copytounmapped: negative len %d", len));
1252 KASSERT(off < m->m_len, ("m_copytounmapped: len exceeds mbuf length"));
1253 iov.iov_base = __DECONST(caddr_t, cp);
1254 iov.iov_len = len;
1255 uio.uio_resid = len;
1256 uio.uio_iov = &iov;
1257 uio.uio_segflg = UIO_SYSSPACE;
1258 uio.uio_iovcnt = 1;
1259 uio.uio_offset = 0;
1260 uio.uio_rw = UIO_WRITE;
1261 error = m_unmapped_uiomove(m, off, &uio, len);
1262 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off,
1263 len));
1264 }
1265
1266 /*
1267 * Copy data from a buffer back into the indicated mbuf chain,
1268 * starting "off" bytes from the beginning, extending the mbuf
1269 * chain if necessary.
1270 */
1271 void
m_copyback(struct mbuf * m0,int off,int len,c_caddr_t cp)1272 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1273 {
1274 int mlen;
1275 struct mbuf *m = m0, *n;
1276 int totlen = 0;
1277
1278 if (m0 == NULL)
1279 return;
1280 while (off > (mlen = m->m_len)) {
1281 off -= mlen;
1282 totlen += mlen;
1283 if (m->m_next == NULL) {
1284 n = m_get(M_NOWAIT, m->m_type);
1285 if (n == NULL)
1286 goto out;
1287 bzero(mtod(n, caddr_t), MLEN);
1288 n->m_len = min(MLEN, len + off);
1289 m->m_next = n;
1290 }
1291 m = m->m_next;
1292 }
1293 while (len > 0) {
1294 if (m->m_next == NULL && (len > m->m_len - off)) {
1295 m->m_len += min(len - (m->m_len - off),
1296 M_TRAILINGSPACE(m));
1297 }
1298 mlen = min (m->m_len - off, len);
1299 if ((m->m_flags & M_EXTPG) != 0)
1300 m_copytounmapped(m, off, mlen, cp);
1301 else
1302 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1303 cp += mlen;
1304 len -= mlen;
1305 mlen += off;
1306 off = 0;
1307 totlen += mlen;
1308 if (len == 0)
1309 break;
1310 if (m->m_next == NULL) {
1311 n = m_get(M_NOWAIT, m->m_type);
1312 if (n == NULL)
1313 break;
1314 n->m_len = min(MLEN, len);
1315 m->m_next = n;
1316 }
1317 m = m->m_next;
1318 }
1319 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1320 m->m_pkthdr.len = totlen;
1321 }
1322
1323 /*
1324 * Append the specified data to the indicated mbuf chain,
1325 * Extend the mbuf chain if the new data does not fit in
1326 * existing space.
1327 *
1328 * Return 1 if able to complete the job; otherwise 0.
1329 */
1330 int
m_append(struct mbuf * m0,int len,c_caddr_t cp)1331 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1332 {
1333 struct mbuf *m, *n;
1334 int remainder, space;
1335
1336 for (m = m0; m->m_next != NULL; m = m->m_next)
1337 ;
1338 remainder = len;
1339 space = M_TRAILINGSPACE(m);
1340 if (space > 0) {
1341 /*
1342 * Copy into available space.
1343 */
1344 if (space > remainder)
1345 space = remainder;
1346 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1347 m->m_len += space;
1348 cp += space, remainder -= space;
1349 }
1350 while (remainder > 0) {
1351 /*
1352 * Allocate a new mbuf; could check space
1353 * and allocate a cluster instead.
1354 */
1355 n = m_get(M_NOWAIT, m->m_type);
1356 if (n == NULL)
1357 break;
1358 n->m_len = min(MLEN, remainder);
1359 bcopy(cp, mtod(n, caddr_t), n->m_len);
1360 cp += n->m_len, remainder -= n->m_len;
1361 m->m_next = n;
1362 m = n;
1363 }
1364 if (m0->m_flags & M_PKTHDR)
1365 m0->m_pkthdr.len += len - remainder;
1366 return (remainder == 0);
1367 }
1368
1369 static int
m_apply_extpg_one(struct mbuf * m,int off,int len,int (* f)(void *,void *,u_int),void * arg)1370 m_apply_extpg_one(struct mbuf *m, int off, int len,
1371 int (*f)(void *, void *, u_int), void *arg)
1372 {
1373 void *p;
1374 u_int i, count, pgoff, pglen;
1375 int rval;
1376
1377 KASSERT(PMAP_HAS_DMAP,
1378 ("m_apply_extpg_one does not support unmapped mbufs"));
1379 off += mtod(m, vm_offset_t);
1380 if (off < m->m_epg_hdrlen) {
1381 count = min(m->m_epg_hdrlen - off, len);
1382 rval = f(arg, m->m_epg_hdr + off, count);
1383 if (rval)
1384 return (rval);
1385 len -= count;
1386 off = 0;
1387 } else
1388 off -= m->m_epg_hdrlen;
1389 pgoff = m->m_epg_1st_off;
1390 for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
1391 pglen = m_epg_pagelen(m, i, pgoff);
1392 if (off < pglen) {
1393 count = min(pglen - off, len);
1394 p = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff + off);
1395 rval = f(arg, p, count);
1396 if (rval)
1397 return (rval);
1398 len -= count;
1399 off = 0;
1400 } else
1401 off -= pglen;
1402 pgoff = 0;
1403 }
1404 if (len > 0) {
1405 KASSERT(off < m->m_epg_trllen,
1406 ("m_apply_extpg_one: offset beyond trailer"));
1407 KASSERT(len <= m->m_epg_trllen - off,
1408 ("m_apply_extpg_one: length beyond trailer"));
1409 return (f(arg, m->m_epg_trail + off, len));
1410 }
1411 return (0);
1412 }
1413
1414 /* Apply function f to the data in a single mbuf. */
1415 static int
m_apply_one(struct mbuf * m,int off,int len,int (* f)(void *,void *,u_int),void * arg)1416 m_apply_one(struct mbuf *m, int off, int len,
1417 int (*f)(void *, void *, u_int), void *arg)
1418 {
1419 if ((m->m_flags & M_EXTPG) != 0)
1420 return (m_apply_extpg_one(m, off, len, f, arg));
1421 else
1422 return (f(arg, mtod(m, caddr_t) + off, len));
1423 }
1424
1425 /*
1426 * Apply function f to the data in an mbuf chain starting "off" bytes from
1427 * the beginning, continuing for "len" bytes.
1428 */
1429 int
m_apply(struct mbuf * m,int off,int len,int (* f)(void *,void *,u_int),void * arg)1430 m_apply(struct mbuf *m, int off, int len,
1431 int (*f)(void *, void *, u_int), void *arg)
1432 {
1433 u_int count;
1434 int rval;
1435
1436 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1437 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1438 while (off > 0) {
1439 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1440 if (off < m->m_len)
1441 break;
1442 off -= m->m_len;
1443 m = m->m_next;
1444 }
1445 while (len > 0) {
1446 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1447 count = min(m->m_len - off, len);
1448 rval = m_apply_one(m, off, count, f, arg);
1449 if (rval)
1450 return (rval);
1451 len -= count;
1452 off = 0;
1453 m = m->m_next;
1454 }
1455 return (0);
1456 }
1457
1458 /*
1459 * Return a pointer to mbuf/offset of location in mbuf chain.
1460 */
1461 struct mbuf *
m_getptr(struct mbuf * m,int loc,int * off)1462 m_getptr(struct mbuf *m, int loc, int *off)
1463 {
1464
1465 while (loc >= 0) {
1466 /* Normal end of search. */
1467 if (m->m_len > loc) {
1468 *off = loc;
1469 return (m);
1470 } else {
1471 loc -= m->m_len;
1472 if (m->m_next == NULL) {
1473 if (loc == 0) {
1474 /* Point at the end of valid data. */
1475 *off = m->m_len;
1476 return (m);
1477 }
1478 return (NULL);
1479 }
1480 m = m->m_next;
1481 }
1482 }
1483 return (NULL);
1484 }
1485
1486 void
m_print(const struct mbuf * m,int maxlen)1487 m_print(const struct mbuf *m, int maxlen)
1488 {
1489 int len;
1490 int pdata;
1491 const struct mbuf *m2;
1492
1493 if (m == NULL) {
1494 printf("mbuf: %p\n", m);
1495 return;
1496 }
1497
1498 if (m->m_flags & M_PKTHDR)
1499 len = m->m_pkthdr.len;
1500 else
1501 len = -1;
1502 m2 = m;
1503 while (m2 != NULL && (len == -1 || len)) {
1504 pdata = m2->m_len;
1505 if (maxlen != -1 && pdata > maxlen)
1506 pdata = maxlen;
1507 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1508 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1509 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1510 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1511 if (pdata)
1512 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1513 if (len != -1)
1514 len -= m2->m_len;
1515 m2 = m2->m_next;
1516 }
1517 if (len > 0)
1518 printf("%d bytes unaccounted for.\n", len);
1519 return;
1520 }
1521
1522 u_int
m_fixhdr(struct mbuf * m0)1523 m_fixhdr(struct mbuf *m0)
1524 {
1525 u_int len;
1526
1527 len = m_length(m0, NULL);
1528 m0->m_pkthdr.len = len;
1529 return (len);
1530 }
1531
1532 u_int
m_length(struct mbuf * m0,struct mbuf ** last)1533 m_length(struct mbuf *m0, struct mbuf **last)
1534 {
1535 struct mbuf *m;
1536 u_int len;
1537
1538 len = 0;
1539 for (m = m0; m != NULL; m = m->m_next) {
1540 len += m->m_len;
1541 if (m->m_next == NULL)
1542 break;
1543 }
1544 if (last != NULL)
1545 *last = m;
1546 return (len);
1547 }
1548
1549 /*
1550 * Defragment a mbuf chain, returning the shortest possible
1551 * chain of mbufs and clusters. If allocation fails and
1552 * this cannot be completed, NULL will be returned, but
1553 * the passed in chain will be unchanged. Upon success,
1554 * the original chain will be freed, and the new chain
1555 * will be returned.
1556 *
1557 * If a non-packet header is passed in, the original
1558 * mbuf (chain?) will be returned unharmed.
1559 */
1560 struct mbuf *
m_defrag(struct mbuf * m0,int how)1561 m_defrag(struct mbuf *m0, int how)
1562 {
1563 struct mbuf *m_new = NULL, *m_final = NULL;
1564 int progress = 0, length;
1565
1566 MBUF_CHECKSLEEP(how);
1567 if (!(m0->m_flags & M_PKTHDR))
1568 return (m0);
1569
1570 m_fixhdr(m0); /* Needed sanity check */
1571
1572 #ifdef MBUF_STRESS_TEST
1573 if (m_defragrandomfailures) {
1574 int temp = arc4random() & 0xff;
1575 if (temp == 0xba)
1576 goto nospace;
1577 }
1578 #endif
1579
1580 if (m0->m_pkthdr.len > MHLEN)
1581 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1582 else
1583 m_final = m_gethdr(how, MT_DATA);
1584
1585 if (m_final == NULL)
1586 goto nospace;
1587
1588 if (m_dup_pkthdr(m_final, m0, how) == 0)
1589 goto nospace;
1590
1591 m_new = m_final;
1592
1593 while (progress < m0->m_pkthdr.len) {
1594 length = m0->m_pkthdr.len - progress;
1595 if (length > MCLBYTES)
1596 length = MCLBYTES;
1597
1598 if (m_new == NULL) {
1599 if (length > MLEN)
1600 m_new = m_getcl(how, MT_DATA, 0);
1601 else
1602 m_new = m_get(how, MT_DATA);
1603 if (m_new == NULL)
1604 goto nospace;
1605 }
1606
1607 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1608 progress += length;
1609 m_new->m_len = length;
1610 if (m_new != m_final)
1611 m_cat(m_final, m_new);
1612 m_new = NULL;
1613 }
1614 #ifdef MBUF_STRESS_TEST
1615 if (m0->m_next == NULL)
1616 m_defraguseless++;
1617 #endif
1618 m_freem(m0);
1619 m0 = m_final;
1620 #ifdef MBUF_STRESS_TEST
1621 m_defragpackets++;
1622 m_defragbytes += m0->m_pkthdr.len;
1623 #endif
1624 return (m0);
1625 nospace:
1626 #ifdef MBUF_STRESS_TEST
1627 m_defragfailure++;
1628 #endif
1629 if (m_final)
1630 m_freem(m_final);
1631 return (NULL);
1632 }
1633
1634 /*
1635 * Return the number of fragments an mbuf will use. This is usually
1636 * used as a proxy for the number of scatter/gather elements needed by
1637 * a DMA engine to access an mbuf. In general mapped mbufs are
1638 * assumed to be backed by physically contiguous buffers that only
1639 * need a single fragment. Unmapped mbufs, on the other hand, can
1640 * span disjoint physical pages.
1641 */
1642 static int
frags_per_mbuf(struct mbuf * m)1643 frags_per_mbuf(struct mbuf *m)
1644 {
1645 int frags;
1646
1647 if ((m->m_flags & M_EXTPG) == 0)
1648 return (1);
1649
1650 /*
1651 * The header and trailer are counted as a single fragment
1652 * each when present.
1653 *
1654 * XXX: This overestimates the number of fragments by assuming
1655 * all the backing physical pages are disjoint.
1656 */
1657 frags = 0;
1658 if (m->m_epg_hdrlen != 0)
1659 frags++;
1660 frags += m->m_epg_npgs;
1661 if (m->m_epg_trllen != 0)
1662 frags++;
1663
1664 return (frags);
1665 }
1666
1667 /*
1668 * Defragment an mbuf chain, returning at most maxfrags separate
1669 * mbufs+clusters. If this is not possible NULL is returned and
1670 * the original mbuf chain is left in its present (potentially
1671 * modified) state. We use two techniques: collapsing consecutive
1672 * mbufs and replacing consecutive mbufs by a cluster.
1673 *
1674 * NB: this should really be named m_defrag but that name is taken
1675 */
1676 struct mbuf *
m_collapse(struct mbuf * m0,int how,int maxfrags)1677 m_collapse(struct mbuf *m0, int how, int maxfrags)
1678 {
1679 struct mbuf *m, *n, *n2, **prev;
1680 u_int curfrags;
1681
1682 /*
1683 * Calculate the current number of frags.
1684 */
1685 curfrags = 0;
1686 for (m = m0; m != NULL; m = m->m_next)
1687 curfrags += frags_per_mbuf(m);
1688 /*
1689 * First, try to collapse mbufs. Note that we always collapse
1690 * towards the front so we don't need to deal with moving the
1691 * pkthdr. This may be suboptimal if the first mbuf has much
1692 * less data than the following.
1693 */
1694 m = m0;
1695 again:
1696 for (;;) {
1697 n = m->m_next;
1698 if (n == NULL)
1699 break;
1700 if (M_WRITABLE(m) &&
1701 n->m_len < M_TRAILINGSPACE(m)) {
1702 m_copydata(n, 0, n->m_len,
1703 mtod(m, char *) + m->m_len);
1704 m->m_len += n->m_len;
1705 m->m_next = n->m_next;
1706 curfrags -= frags_per_mbuf(n);
1707 m_free(n);
1708 if (curfrags <= maxfrags)
1709 return m0;
1710 } else
1711 m = n;
1712 }
1713 KASSERT(maxfrags > 1,
1714 ("maxfrags %u, but normal collapse failed", maxfrags));
1715 /*
1716 * Collapse consecutive mbufs to a cluster.
1717 */
1718 prev = &m0->m_next; /* NB: not the first mbuf */
1719 while ((n = *prev) != NULL) {
1720 if ((n2 = n->m_next) != NULL &&
1721 n->m_len + n2->m_len < MCLBYTES) {
1722 m = m_getcl(how, MT_DATA, 0);
1723 if (m == NULL)
1724 goto bad;
1725 m_copydata(n, 0, n->m_len, mtod(m, char *));
1726 m_copydata(n2, 0, n2->m_len,
1727 mtod(m, char *) + n->m_len);
1728 m->m_len = n->m_len + n2->m_len;
1729 m->m_next = n2->m_next;
1730 *prev = m;
1731 curfrags += 1; /* For the new cluster */
1732 curfrags -= frags_per_mbuf(n);
1733 curfrags -= frags_per_mbuf(n2);
1734 m_free(n);
1735 m_free(n2);
1736 if (curfrags <= maxfrags)
1737 return m0;
1738 /*
1739 * Still not there, try the normal collapse
1740 * again before we allocate another cluster.
1741 */
1742 goto again;
1743 }
1744 prev = &n->m_next;
1745 }
1746 /*
1747 * No place where we can collapse to a cluster; punt.
1748 * This can occur if, for example, you request 2 frags
1749 * but the packet requires that both be clusters (we
1750 * never reallocate the first mbuf to avoid moving the
1751 * packet header).
1752 */
1753 bad:
1754 return NULL;
1755 }
1756
1757 #ifdef MBUF_STRESS_TEST
1758
1759 /*
1760 * Fragment an mbuf chain. There's no reason you'd ever want to do
1761 * this in normal usage, but it's great for stress testing various
1762 * mbuf consumers.
1763 *
1764 * If fragmentation is not possible, the original chain will be
1765 * returned.
1766 *
1767 * Possible length values:
1768 * 0 no fragmentation will occur
1769 * > 0 each fragment will be of the specified length
1770 * -1 each fragment will be the same random value in length
1771 * -2 each fragment's length will be entirely random
1772 * (Random values range from 1 to 256)
1773 */
1774 struct mbuf *
m_fragment(struct mbuf * m0,int how,int length)1775 m_fragment(struct mbuf *m0, int how, int length)
1776 {
1777 struct mbuf *m_first, *m_last;
1778 int divisor = 255, progress = 0, fraglen;
1779
1780 if (!(m0->m_flags & M_PKTHDR))
1781 return (m0);
1782
1783 if (length == 0 || length < -2)
1784 return (m0);
1785 if (length > MCLBYTES)
1786 length = MCLBYTES;
1787 if (length < 0 && divisor > MCLBYTES)
1788 divisor = MCLBYTES;
1789 if (length == -1)
1790 length = 1 + (arc4random() % divisor);
1791 if (length > 0)
1792 fraglen = length;
1793
1794 m_fixhdr(m0); /* Needed sanity check */
1795
1796 m_first = m_getcl(how, MT_DATA, M_PKTHDR);
1797 if (m_first == NULL)
1798 goto nospace;
1799
1800 if (m_dup_pkthdr(m_first, m0, how) == 0)
1801 goto nospace;
1802
1803 m_last = m_first;
1804
1805 while (progress < m0->m_pkthdr.len) {
1806 if (length == -2)
1807 fraglen = 1 + (arc4random() % divisor);
1808 if (fraglen > m0->m_pkthdr.len - progress)
1809 fraglen = m0->m_pkthdr.len - progress;
1810
1811 if (progress != 0) {
1812 struct mbuf *m_new = m_getcl(how, MT_DATA, 0);
1813 if (m_new == NULL)
1814 goto nospace;
1815
1816 m_last->m_next = m_new;
1817 m_last = m_new;
1818 }
1819
1820 m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t));
1821 progress += fraglen;
1822 m_last->m_len = fraglen;
1823 }
1824 m_freem(m0);
1825 m0 = m_first;
1826 return (m0);
1827 nospace:
1828 if (m_first)
1829 m_freem(m_first);
1830 /* Return the original chain on failure */
1831 return (m0);
1832 }
1833
1834 #endif
1835
1836 /*
1837 * Free pages from mbuf_ext_pgs, assuming they were allocated via
1838 * vm_page_alloc() and aren't associated with any object. Complement
1839 * to allocator from m_uiotombuf_nomap().
1840 */
1841 void
mb_free_mext_pgs(struct mbuf * m)1842 mb_free_mext_pgs(struct mbuf *m)
1843 {
1844 vm_page_t pg;
1845
1846 M_ASSERTEXTPG(m);
1847 for (int i = 0; i < m->m_epg_npgs; i++) {
1848 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1849 vm_page_unwire_noq(pg);
1850 vm_page_free(pg);
1851 }
1852 }
1853
1854 static struct mbuf *
m_uiotombuf_nomap(struct uio * uio,int how,int len,int maxseg,int flags)1855 m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
1856 {
1857 struct mbuf *m, *mb, *prev;
1858 vm_page_t pg_array[MBUF_PEXT_MAX_PGS];
1859 int error, length, i, needed;
1860 ssize_t total;
1861 int pflags = malloc2vm_flags(how) | VM_ALLOC_NODUMP | VM_ALLOC_WIRED;
1862
1863 MPASS((flags & M_PKTHDR) == 0);
1864 MPASS((how & M_ZERO) == 0);
1865
1866 /*
1867 * len can be zero or an arbitrary large value bound by
1868 * the total data supplied by the uio.
1869 */
1870 if (len > 0)
1871 total = MIN(uio->uio_resid, len);
1872 else
1873 total = uio->uio_resid;
1874
1875 if (maxseg == 0)
1876 maxseg = MBUF_PEXT_MAX_PGS * PAGE_SIZE;
1877
1878 /*
1879 * If total is zero, return an empty mbuf. This can occur
1880 * for TLS 1.0 connections which send empty fragments as
1881 * a countermeasure against the known-IV weakness in CBC
1882 * ciphersuites.
1883 */
1884 if (__predict_false(total == 0)) {
1885 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1886 if (mb == NULL)
1887 return (NULL);
1888 mb->m_epg_flags = EPG_FLAG_ANON;
1889 return (mb);
1890 }
1891
1892 /*
1893 * Allocate the pages
1894 */
1895 m = NULL;
1896 while (total > 0) {
1897 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1898 if (mb == NULL)
1899 goto failed;
1900 if (m == NULL)
1901 m = mb;
1902 else
1903 prev->m_next = mb;
1904 prev = mb;
1905 mb->m_epg_flags = EPG_FLAG_ANON;
1906 needed = length = MIN(maxseg, total);
1907 for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) {
1908 retry_page:
1909 pg_array[i] = vm_page_alloc_noobj(pflags);
1910 if (pg_array[i] == NULL) {
1911 if (how & M_NOWAIT) {
1912 goto failed;
1913 } else {
1914 vm_wait(NULL);
1915 goto retry_page;
1916 }
1917 }
1918 mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
1919 mb->m_epg_npgs++;
1920 }
1921 mb->m_epg_last_len = length - PAGE_SIZE * (mb->m_epg_npgs - 1);
1922 MBUF_EXT_PGS_ASSERT_SANITY(mb);
1923 total -= length;
1924 error = uiomove_fromphys(pg_array, 0, length, uio);
1925 if (error != 0)
1926 goto failed;
1927 mb->m_len = length;
1928 mb->m_ext.ext_size += PAGE_SIZE * mb->m_epg_npgs;
1929 if (flags & M_PKTHDR)
1930 m->m_pkthdr.len += length;
1931 }
1932 return (m);
1933
1934 failed:
1935 m_freem(m);
1936 return (NULL);
1937 }
1938
1939 /*
1940 * Copy the contents of uio into a properly sized mbuf chain.
1941 * A compat KPI. Users are recommended to use direct calls to backing
1942 * functions.
1943 */
1944 struct mbuf *
m_uiotombuf(struct uio * uio,int how,int len,int lspace,int flags)1945 m_uiotombuf(struct uio *uio, int how, int len, int lspace, int flags)
1946 {
1947
1948 if (flags & M_EXTPG) {
1949 /* XXX: 'lspace' magically becomes maxseg! */
1950 return (m_uiotombuf_nomap(uio, how, len, lspace, flags));
1951 } else if (__predict_false(uio->uio_resid == 0)) {
1952 struct mbuf *m;
1953
1954 /*
1955 * m_uiotombuf() is known to return zero length buffer, keep
1956 * this compatibility. mc_uiotomc() won't do that.
1957 */
1958 if (flags & M_PKTHDR) {
1959 m = m_gethdr(how, MT_DATA);
1960 m->m_pkthdr.memlen = MSIZE;
1961 } else
1962 m = m_get(how, MT_DATA);
1963 if (m != NULL)
1964 m->m_data += lspace;
1965 return (m);
1966 } else {
1967 struct mchain mc;
1968 int error;
1969
1970 error = mc_uiotomc(&mc, uio, len, lspace, how, flags);
1971 if (__predict_true(error == 0)) {
1972 if (flags & M_PKTHDR) {
1973 mc_first(&mc)->m_pkthdr.len = mc.mc_len;
1974 mc_first(&mc)->m_pkthdr.memlen = mc.mc_mlen;
1975 }
1976 return (mc_first(&mc));
1977 } else
1978 return (NULL);
1979 }
1980 }
1981
1982 /*
1983 * Copy the contents of uio into a properly sized mbuf chain.
1984 * In case of failure state of mchain is inconsistent.
1985 * @param length Limit copyout length. If 0 entire uio_resid is copied.
1986 * @param lspace Provide leading space in the first mbuf in the chain.
1987 */
1988 int
mc_uiotomc(struct mchain * mc,struct uio * uio,u_int length,u_int lspace,int how,int flags)1989 mc_uiotomc(struct mchain *mc, struct uio *uio, u_int length, u_int lspace,
1990 int how, int flags)
1991 {
1992 struct mbuf *mb;
1993 u_int total;
1994 int error;
1995
1996 MPASS(lspace < MHLEN);
1997 MPASS(UINT_MAX - lspace >= length);
1998 MPASS(uio->uio_rw == UIO_WRITE);
1999 MPASS(uio->uio_resid >= 0);
2000
2001 if (length > 0) {
2002 if (uio->uio_resid > length) {
2003 total = length;
2004 flags &= ~M_EOR;
2005 } else
2006 total = uio->uio_resid;
2007 } else if (__predict_false(uio->uio_resid + lspace > UINT_MAX))
2008 return (EOVERFLOW);
2009 else
2010 total = uio->uio_resid;
2011
2012 if (__predict_false(total + lspace == 0)) {
2013 *mc = MCHAIN_INITIALIZER(mc);
2014 return (0);
2015 }
2016
2017 error = mc_get(mc, total + lspace, how, MT_DATA, flags);
2018 if (__predict_false(error))
2019 return (error);
2020 mc_first(mc)->m_data += lspace;
2021
2022 /* Fill all mbufs with uio data and update header information. */
2023 STAILQ_FOREACH(mb, &mc->mc_q, m_stailq) {
2024 u_int mlen;
2025
2026 mlen = min(M_TRAILINGSPACE(mb), total - mc->mc_len);
2027 error = uiomove(mtod(mb, void *), mlen, uio);
2028 if (__predict_false(error)) {
2029 mc_freem(mc);
2030 return (error);
2031 }
2032 mb->m_len = mlen;
2033 mc->mc_len += mlen;
2034 }
2035 MPASS(mc->mc_len == total);
2036
2037 return (0);
2038 }
2039
2040 /*
2041 * Copy data to/from an unmapped mbuf into a uio limited by len if set.
2042 */
2043 int
m_unmapped_uiomove(const struct mbuf * m,int m_off,struct uio * uio,int len)2044 m_unmapped_uiomove(const struct mbuf *m, int m_off, struct uio *uio, int len)
2045 {
2046 vm_page_t pg;
2047 int error, i, off, pglen, pgoff, seglen, segoff;
2048
2049 M_ASSERTEXTPG(m);
2050 error = 0;
2051
2052 /* Skip over any data removed from the front. */
2053 off = mtod(m, vm_offset_t);
2054
2055 off += m_off;
2056 if (m->m_epg_hdrlen != 0) {
2057 if (off >= m->m_epg_hdrlen) {
2058 off -= m->m_epg_hdrlen;
2059 } else {
2060 seglen = m->m_epg_hdrlen - off;
2061 segoff = off;
2062 seglen = min(seglen, len);
2063 off = 0;
2064 len -= seglen;
2065 error = uiomove(__DECONST(void *,
2066 &m->m_epg_hdr[segoff]), seglen, uio);
2067 }
2068 }
2069 pgoff = m->m_epg_1st_off;
2070 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
2071 pglen = m_epg_pagelen(m, i, pgoff);
2072 if (off >= pglen) {
2073 off -= pglen;
2074 pgoff = 0;
2075 continue;
2076 }
2077 seglen = pglen - off;
2078 segoff = pgoff + off;
2079 off = 0;
2080 seglen = min(seglen, len);
2081 len -= seglen;
2082 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
2083 error = uiomove_fromphys(&pg, segoff, seglen, uio);
2084 pgoff = 0;
2085 };
2086 if (len != 0 && error == 0) {
2087 KASSERT((off + len) <= m->m_epg_trllen,
2088 ("off + len > trail (%d + %d > %d, m_off = %d)", off, len,
2089 m->m_epg_trllen, m_off));
2090 error = uiomove(__DECONST(void *, &m->m_epg_trail[off]),
2091 len, uio);
2092 }
2093 return (error);
2094 }
2095
2096 /*
2097 * Copy an mbuf chain into a uio limited by len if set.
2098 */
2099 int
m_mbuftouio(struct uio * uio,const struct mbuf * m,int len)2100 m_mbuftouio(struct uio *uio, const struct mbuf *m, int len)
2101 {
2102 int error, length, total;
2103 int progress = 0;
2104
2105 if (len > 0)
2106 total = min(uio->uio_resid, len);
2107 else
2108 total = uio->uio_resid;
2109
2110 /* Fill the uio with data from the mbufs. */
2111 for (; m != NULL; m = m->m_next) {
2112 length = min(m->m_len, total - progress);
2113
2114 if ((m->m_flags & M_EXTPG) != 0)
2115 error = m_unmapped_uiomove(m, 0, uio, length);
2116 else
2117 error = uiomove(mtod(m, void *), length, uio);
2118 if (error)
2119 return (error);
2120
2121 progress += length;
2122 }
2123
2124 return (0);
2125 }
2126
2127 /*
2128 * Create a writable copy of the mbuf chain. While doing this
2129 * we compact the chain with a goal of producing a chain with
2130 * at most two mbufs. The second mbuf in this chain is likely
2131 * to be a cluster. The primary purpose of this work is to create
2132 * a writable packet for encryption, compression, etc. The
2133 * secondary goal is to linearize the data so the data can be
2134 * passed to crypto hardware in the most efficient manner possible.
2135 */
2136 struct mbuf *
m_unshare(struct mbuf * m0,int how)2137 m_unshare(struct mbuf *m0, int how)
2138 {
2139 struct mbuf *m, *mprev;
2140 struct mbuf *n, *mfirst, *mlast;
2141 int len, off;
2142
2143 mprev = NULL;
2144 for (m = m0; m != NULL; m = mprev->m_next) {
2145 /*
2146 * Regular mbufs are ignored unless there's a cluster
2147 * in front of it that we can use to coalesce. We do
2148 * the latter mainly so later clusters can be coalesced
2149 * also w/o having to handle them specially (i.e. convert
2150 * mbuf+cluster -> cluster). This optimization is heavily
2151 * influenced by the assumption that we're running over
2152 * Ethernet where MCLBYTES is large enough that the max
2153 * packet size will permit lots of coalescing into a
2154 * single cluster. This in turn permits efficient
2155 * crypto operations, especially when using hardware.
2156 */
2157 if ((m->m_flags & M_EXT) == 0) {
2158 if (mprev && (mprev->m_flags & M_EXT) &&
2159 m->m_len <= M_TRAILINGSPACE(mprev)) {
2160 /* XXX: this ignores mbuf types */
2161 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
2162 mtod(m, caddr_t), m->m_len);
2163 mprev->m_len += m->m_len;
2164 mprev->m_next = m->m_next; /* unlink from chain */
2165 m_free(m); /* reclaim mbuf */
2166 } else {
2167 mprev = m;
2168 }
2169 continue;
2170 }
2171 /*
2172 * Writable mbufs are left alone (for now).
2173 */
2174 if (M_WRITABLE(m)) {
2175 mprev = m;
2176 continue;
2177 }
2178
2179 /*
2180 * Not writable, replace with a copy or coalesce with
2181 * the previous mbuf if possible (since we have to copy
2182 * it anyway, we try to reduce the number of mbufs and
2183 * clusters so that future work is easier).
2184 */
2185 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
2186 /* NB: we only coalesce into a cluster or larger */
2187 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
2188 m->m_len <= M_TRAILINGSPACE(mprev)) {
2189 /* XXX: this ignores mbuf types */
2190 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
2191 mtod(m, caddr_t), m->m_len);
2192 mprev->m_len += m->m_len;
2193 mprev->m_next = m->m_next; /* unlink from chain */
2194 m_free(m); /* reclaim mbuf */
2195 continue;
2196 }
2197
2198 /*
2199 * Allocate new space to hold the copy and copy the data.
2200 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
2201 * splitting them into clusters. We could just malloc a
2202 * buffer and make it external but too many device drivers
2203 * don't know how to break up the non-contiguous memory when
2204 * doing DMA.
2205 */
2206 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
2207 if (n == NULL) {
2208 m_freem(m0);
2209 return (NULL);
2210 }
2211 if (m->m_flags & M_PKTHDR) {
2212 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
2213 __func__, m0, m));
2214 m_move_pkthdr(n, m);
2215 }
2216 len = m->m_len;
2217 off = 0;
2218 mfirst = n;
2219 mlast = NULL;
2220 for (;;) {
2221 int cc = min(len, MCLBYTES);
2222 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
2223 n->m_len = cc;
2224 if (mlast != NULL)
2225 mlast->m_next = n;
2226 mlast = n;
2227 #if 0
2228 newipsecstat.ips_clcopied++;
2229 #endif
2230
2231 len -= cc;
2232 if (len <= 0)
2233 break;
2234 off += cc;
2235
2236 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
2237 if (n == NULL) {
2238 m_freem(mfirst);
2239 m_freem(m0);
2240 return (NULL);
2241 }
2242 }
2243 n->m_next = m->m_next;
2244 if (mprev == NULL)
2245 m0 = mfirst; /* new head of chain */
2246 else
2247 mprev->m_next = mfirst; /* replace old mbuf */
2248 m_free(m); /* release old mbuf */
2249 mprev = mfirst;
2250 }
2251 return (m0);
2252 }
2253
2254 #ifdef MBUF_PROFILING
2255
2256 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
2257 struct mbufprofile {
2258 uintmax_t wasted[MP_BUCKETS];
2259 uintmax_t used[MP_BUCKETS];
2260 uintmax_t segments[MP_BUCKETS];
2261 } mbprof;
2262
2263 void
m_profile(struct mbuf * m)2264 m_profile(struct mbuf *m)
2265 {
2266 int segments = 0;
2267 int used = 0;
2268 int wasted = 0;
2269
2270 while (m) {
2271 segments++;
2272 used += m->m_len;
2273 if (m->m_flags & M_EXT) {
2274 wasted += MHLEN - sizeof(m->m_ext) +
2275 m->m_ext.ext_size - m->m_len;
2276 } else {
2277 if (m->m_flags & M_PKTHDR)
2278 wasted += MHLEN - m->m_len;
2279 else
2280 wasted += MLEN - m->m_len;
2281 }
2282 m = m->m_next;
2283 }
2284 /* be paranoid.. it helps */
2285 if (segments > MP_BUCKETS - 1)
2286 segments = MP_BUCKETS - 1;
2287 if (used > 100000)
2288 used = 100000;
2289 if (wasted > 100000)
2290 wasted = 100000;
2291 /* store in the appropriate bucket */
2292 /* don't bother locking. if it's slightly off, so what? */
2293 mbprof.segments[segments]++;
2294 mbprof.used[fls(used)]++;
2295 mbprof.wasted[fls(wasted)]++;
2296 }
2297
2298 static int
mbprof_handler(SYSCTL_HANDLER_ARGS)2299 mbprof_handler(SYSCTL_HANDLER_ARGS)
2300 {
2301 char buf[256];
2302 struct sbuf sb;
2303 int error;
2304 uint64_t *p;
2305
2306 sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
2307
2308 p = &mbprof.wasted[0];
2309 sbuf_printf(&sb,
2310 "wasted:\n"
2311 "%ju %ju %ju %ju %ju %ju %ju %ju "
2312 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2313 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2314 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2315 #ifdef BIG_ARRAY
2316 p = &mbprof.wasted[16];
2317 sbuf_printf(&sb,
2318 "%ju %ju %ju %ju %ju %ju %ju %ju "
2319 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2320 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2321 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2322 #endif
2323 p = &mbprof.used[0];
2324 sbuf_printf(&sb,
2325 "used:\n"
2326 "%ju %ju %ju %ju %ju %ju %ju %ju "
2327 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2328 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2329 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2330 #ifdef BIG_ARRAY
2331 p = &mbprof.used[16];
2332 sbuf_printf(&sb,
2333 "%ju %ju %ju %ju %ju %ju %ju %ju "
2334 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2335 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2336 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2337 #endif
2338 p = &mbprof.segments[0];
2339 sbuf_printf(&sb,
2340 "segments:\n"
2341 "%ju %ju %ju %ju %ju %ju %ju %ju "
2342 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2343 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2344 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2345 #ifdef BIG_ARRAY
2346 p = &mbprof.segments[16];
2347 sbuf_printf(&sb,
2348 "%ju %ju %ju %ju %ju %ju %ju %ju "
2349 "%ju %ju %ju %ju %ju %ju %ju %jju",
2350 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2351 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2352 #endif
2353
2354 error = sbuf_finish(&sb);
2355 sbuf_delete(&sb);
2356 return (error);
2357 }
2358
2359 static int
mbprof_clr_handler(SYSCTL_HANDLER_ARGS)2360 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
2361 {
2362 int clear, error;
2363
2364 clear = 0;
2365 error = sysctl_handle_int(oidp, &clear, 0, req);
2366 if (error || !req->newptr)
2367 return (error);
2368
2369 if (clear) {
2370 bzero(&mbprof, sizeof(mbprof));
2371 }
2372
2373 return (error);
2374 }
2375
2376 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile,
2377 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
2378 mbprof_handler, "A",
2379 "mbuf profiling statistics");
2380
2381 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr,
2382 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
2383 mbprof_clr_handler, "I",
2384 "clear mbuf profiling statistics");
2385 #endif
2386