1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c
34 * We are initializing two zones for Mbufs and Clusters.
35 *
36 */
37
38 #include <stdio.h>
39 #include <string.h>
40 /* #include <sys/param.h> This defines MSIZE 256 */
41 #if !defined(SCTP_SIMPLE_ALLOCATOR)
42 #include "umem.h"
43 #endif
44 #include "user_mbuf.h"
45 #include "user_environment.h"
46 #include "user_atomic.h"
47 #include "netinet/sctp_pcb.h"
48
49 struct mbstat mbstat;
50 #define KIPC_MAX_LINKHDR 4 /* int: max length of link header (see sys/sysclt.h) */
51 #define KIPC_MAX_PROTOHDR 5 /* int: max length of network header (see sys/sysclt.h)*/
52 int max_linkhdr = KIPC_MAX_LINKHDR;
53 int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */
54
55 /*
56 * Zones from which we allocate.
57 */
58 sctp_zone_t zone_mbuf;
59 sctp_zone_t zone_clust;
60 sctp_zone_t zone_ext_refcnt;
61
62 /* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust
63 * and mb_dtor_clust.
64 * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer.
65 * struct mbuf * clust_mb_args; does not work.
66 */
67 struct clust_args clust_mb_args;
68
69
70 /* __Userspace__
71 * Local prototypes.
72 */
73 static int mb_ctor_mbuf(void *, void *, int);
74 static int mb_ctor_clust(void *, void *, int);
75 static void mb_dtor_mbuf(void *, void *);
76 static void mb_dtor_clust(void *, void *);
77
78
79 /***************** Functions taken from user_mbuf.h *************/
80
mbuf_constructor_dup(struct mbuf * m,int pkthdr,short type)81 static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type)
82 {
83 int flags = pkthdr;
84 if (type == MT_NOINIT)
85 return (0);
86
87 m->m_next = NULL;
88 m->m_nextpkt = NULL;
89 m->m_len = 0;
90 m->m_flags = flags;
91 m->m_type = type;
92 if (flags & M_PKTHDR) {
93 m->m_data = m->m_pktdat;
94 m->m_pkthdr.rcvif = NULL;
95 m->m_pkthdr.len = 0;
96 m->m_pkthdr.header = NULL;
97 m->m_pkthdr.csum_flags = 0;
98 m->m_pkthdr.csum_data = 0;
99 m->m_pkthdr.tso_segsz = 0;
100 m->m_pkthdr.ether_vtag = 0;
101 SLIST_INIT(&m->m_pkthdr.tags);
102 } else
103 m->m_data = m->m_dat;
104
105 return (0);
106 }
107
108 /* __Userspace__ */
109 struct mbuf *
m_get(int how,short type)110 m_get(int how, short type)
111 {
112 struct mbuf *mret;
113 #if defined(SCTP_SIMPLE_ALLOCATOR)
114 struct mb_args mbuf_mb_args;
115
116 /* The following setter function is not yet being enclosed within
117 * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
118 * mb_dtor_mbuf. See comment there
119 */
120 mbuf_mb_args.flags = 0;
121 mbuf_mb_args.type = type;
122 #endif
123 /* Mbuf master zone, zone_mbuf, has already been
124 * created in mbuf_initialize() */
125 mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
126 #if defined(SCTP_SIMPLE_ALLOCATOR)
127 mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
128 #endif
129 /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
130
131 /* There are cases when an object available in the current CPU's
132 * loaded magazine and in those cases the object's constructor is not applied.
133 * If that is the case, then we are duplicating constructor initialization here,
134 * so that the mbuf is properly constructed before returning it.
135 */
136 if (mret) {
137 #if USING_MBUF_CONSTRUCTOR
138 if (! (mret->m_type == type) ) {
139 mbuf_constructor_dup(mret, 0, type);
140 }
141 #else
142 mbuf_constructor_dup(mret, 0, type);
143 #endif
144
145 }
146 return mret;
147 }
148
149
150 /* __Userspace__ */
151 struct mbuf *
m_gethdr(int how,short type)152 m_gethdr(int how, short type)
153 {
154 struct mbuf *mret;
155 #if defined(SCTP_SIMPLE_ALLOCATOR)
156 struct mb_args mbuf_mb_args;
157
158 /* The following setter function is not yet being enclosed within
159 * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
160 * mb_dtor_mbuf. See comment there
161 */
162 mbuf_mb_args.flags = M_PKTHDR;
163 mbuf_mb_args.type = type;
164 #endif
165 mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
166 #if defined(SCTP_SIMPLE_ALLOCATOR)
167 mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
168 #endif
169 /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
170 /* There are cases when an object available in the current CPU's
171 * loaded magazine and in those cases the object's constructor is not applied.
172 * If that is the case, then we are duplicating constructor initialization here,
173 * so that the mbuf is properly constructed before returning it.
174 */
175 if (mret) {
176 #if USING_MBUF_CONSTRUCTOR
177 if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) {
178 mbuf_constructor_dup(mret, M_PKTHDR, type);
179 }
180 #else
181 mbuf_constructor_dup(mret, M_PKTHDR, type);
182 #endif
183 }
184 return mret;
185 }
186
187 /* __Userspace__ */
188 struct mbuf *
m_free(struct mbuf * m)189 m_free(struct mbuf *m)
190 {
191
192 struct mbuf *n = m->m_next;
193
194 if (m->m_flags & M_EXT)
195 mb_free_ext(m);
196 else if ((m->m_flags & M_NOFREE) == 0) {
197 #if defined(SCTP_SIMPLE_ALLOCATOR)
198 mb_dtor_mbuf(m, NULL);
199 #endif
200 SCTP_ZONE_FREE(zone_mbuf, m);
201 }
202 /*umem_cache_free(zone_mbuf, m);*/
203 return (n);
204 }
205
206
207 static void
clust_constructor_dup(caddr_t m_clust,struct mbuf * m)208 clust_constructor_dup(caddr_t m_clust, struct mbuf* m)
209 {
210 u_int *refcnt;
211 int type, size;
212
213 if (m == NULL) {
214 return;
215 }
216 /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
217 type = EXT_CLUSTER;
218 size = MCLBYTES;
219
220 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
221 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
222 #if !defined(SCTP_SIMPLE_ALLOCATOR)
223 if (refcnt == NULL) {
224 umem_reap();
225 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
226 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
227 }
228 #endif
229 *refcnt = 1;
230 m->m_ext.ext_buf = (caddr_t)m_clust;
231 m->m_data = m->m_ext.ext_buf;
232 m->m_flags |= M_EXT;
233 m->m_ext.ext_free = NULL;
234 m->m_ext.ext_args = NULL;
235 m->m_ext.ext_size = size;
236 m->m_ext.ext_type = type;
237 m->m_ext.ref_cnt = refcnt;
238 return;
239 }
240
241
242 /* __Userspace__ */
243 void
m_clget(struct mbuf * m,int how)244 m_clget(struct mbuf *m, int how)
245 {
246 caddr_t mclust_ret;
247 #if defined(SCTP_SIMPLE_ALLOCATOR)
248 struct clust_args clust_mb_args_l;
249 #endif
250 if (m->m_flags & M_EXT) {
251 SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m);
252 }
253 m->m_ext.ext_buf = (char *)NULL;
254 #if defined(SCTP_SIMPLE_ALLOCATOR)
255 clust_mb_args_l.parent_mbuf = m;
256 #endif
257 mclust_ret = SCTP_ZONE_GET(zone_clust, char);
258 #if defined(SCTP_SIMPLE_ALLOCATOR)
259 mb_ctor_clust(mclust_ret, &clust_mb_args_l, 0);
260 #endif
261 /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
262 /*
263 On a cluster allocation failure, call umem_reap() and retry.
264 */
265
266 if (mclust_ret == NULL) {
267 #if !defined(SCTP_SIMPLE_ALLOCATOR)
268 /* mclust_ret = SCTP_ZONE_GET(zone_clust, char);
269 mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
270 #else*/
271 umem_reap();
272 mclust_ret = SCTP_ZONE_GET(zone_clust, char);
273 #endif
274 /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
275 if (NULL == mclust_ret) {
276 SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__);
277 }
278 }
279
280 #if USING_MBUF_CONSTRUCTOR
281 if ((m->m_ext.ext_buf == NULL)) {
282 clust_constructor_dup(mclust_ret, m);
283 }
284 #else
285 clust_constructor_dup(mclust_ret, m);
286 #endif
287 }
288
289 /*
290 * Unlink a tag from the list of tags associated with an mbuf.
291 */
292 static __inline void
m_tag_unlink(struct mbuf * m,struct m_tag * t)293 m_tag_unlink(struct mbuf *m, struct m_tag *t)
294 {
295
296 SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
297 }
298
299 /*
300 * Reclaim resources associated with a tag.
301 */
302 static __inline void
m_tag_free(struct m_tag * t)303 m_tag_free(struct m_tag *t)
304 {
305
306 (*t->m_tag_free)(t);
307 }
308
309 /*
310 * Set up the contents of a tag. Note that this does not fill in the free
311 * method; the caller is expected to do that.
312 *
313 * XXX probably should be called m_tag_init, but that was already taken.
314 */
315 static __inline void
m_tag_setup(struct m_tag * t,u_int32_t cookie,int type,int len)316 m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len)
317 {
318
319 t->m_tag_id = type;
320 t->m_tag_len = len;
321 t->m_tag_cookie = cookie;
322 }
323
324 /************ End functions from user_mbuf.h ******************/
325
326
327
328 /************ End functions to substitute umem_cache_alloc and umem_cache_free **************/
329
330 void
mbuf_initialize(void * dummy)331 mbuf_initialize(void *dummy)
332 {
333
334 /*
335 * __Userspace__Configure UMA zones for Mbufs and Clusters.
336 * (TODO: m_getcl() - using packet secondary zone).
337 * There is no provision for trash_init and trash_fini in umem.
338 *
339 */
340 /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
341 mb_ctor_mbuf, mb_dtor_mbuf, NULL,
342 &mbuf_mb_args,
343 NULL, 0);
344 zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/
345 #if defined(SCTP_SIMPLE_ALLOCATOR)
346 SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0);
347 #else
348 zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
349 mb_ctor_mbuf, mb_dtor_mbuf, NULL,
350 NUULL,
351 NULL, 0);
352 #endif
353 /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0,
354 NULL, NULL, NULL,
355 NULL,
356 NULL, 0);*/
357 SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0);
358
359 /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
360 mb_ctor_clust, mb_dtor_clust, NULL,
361 &clust_mb_args,
362 NULL, 0);
363 zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/
364 #if defined(SCTP_SIMPLE_ALLOCATOR)
365 SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0);
366 #else
367 zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
368 mb_ctor_clust, mb_dtor_clust, NULL,
369 &clust_mb_args,
370 NULL, 0);
371 #endif
372
373 /* uma_prealloc() goes here... */
374
375 /* __Userspace__ Add umem_reap here for low memory situation?
376 *
377 */
378
379
380 /*
381 * [Re]set counters and local statistics knobs.
382 *
383 */
384
385 mbstat.m_mbufs = 0;
386 mbstat.m_mclusts = 0;
387 mbstat.m_drain = 0;
388 mbstat.m_msize = MSIZE;
389 mbstat.m_mclbytes = MCLBYTES;
390 mbstat.m_minclsize = MINCLSIZE;
391 mbstat.m_mlen = MLEN;
392 mbstat.m_mhlen = MHLEN;
393 mbstat.m_numtypes = MT_NTYPES;
394
395 mbstat.m_mcfail = mbstat.m_mpfail = 0;
396 mbstat.sf_iocnt = 0;
397 mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
398
399 }
400
401
402
403 /*
404 * __Userspace__
405 *
406 * Constructor for Mbuf master zone. We have a different constructor
407 * for allocating the cluster.
408 *
409 * The 'arg' pointer points to a mb_args structure which
410 * contains call-specific information required to support the
411 * mbuf allocation API. See user_mbuf.h.
412 *
413 * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what
414 * was passed when umem_cache_alloc was called.
415 * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler
416 * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines
417 * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc
418 * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL
419 * flag.
420 *
421 * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc)
422 * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback.
423 * It also mentions that umem_nofail_callback is Evolving.
424 *
425 */
426 static int
mb_ctor_mbuf(void * mem,void * arg,int flgs)427 mb_ctor_mbuf(void *mem, void *arg, int flgs)
428 {
429 #if USING_MBUF_CONSTRUCTOR
430 struct mbuf *m;
431 struct mb_args *args;
432
433 int flags;
434 short type;
435
436 m = (struct mbuf *)mem;
437 args = (struct mb_args *)arg;
438 flags = args->flags;
439 type = args->type;
440
441 /*
442 * The mbuf is initialized later.
443 *
444 */
445 if (type == MT_NOINIT)
446 return (0);
447
448 m->m_next = NULL;
449 m->m_nextpkt = NULL;
450 m->m_len = 0;
451 m->m_flags = flags;
452 m->m_type = type;
453 if (flags & M_PKTHDR) {
454 m->m_data = m->m_pktdat;
455 m->m_pkthdr.rcvif = NULL;
456 m->m_pkthdr.len = 0;
457 m->m_pkthdr.header = NULL;
458 m->m_pkthdr.csum_flags = 0;
459 m->m_pkthdr.csum_data = 0;
460 m->m_pkthdr.tso_segsz = 0;
461 m->m_pkthdr.ether_vtag = 0;
462 SLIST_INIT(&m->m_pkthdr.tags);
463 } else
464 m->m_data = m->m_dat;
465 #endif
466 return (0);
467 }
468
469
470 /*
471 * __Userspace__
472 * The Mbuf master zone destructor.
473 * This would be called in response to umem_cache_destroy
474 * TODO: Recheck if this is what we want to do in this destructor.
475 * (Note: the number of times mb_dtor_mbuf is called is equal to the
476 * number of individual mbufs allocated from zone_mbuf.
477 */
478 static void
mb_dtor_mbuf(void * mem,void * arg)479 mb_dtor_mbuf(void *mem, void *arg)
480 {
481 struct mbuf *m;
482
483 m = (struct mbuf *)mem;
484 if ((m->m_flags & M_PKTHDR) != 0) {
485 m_tag_delete_chain(m, NULL);
486 }
487 }
488
489
490 /* __Userspace__
491 * The Cluster zone constructor.
492 *
493 * Here the 'arg' pointer points to the Mbuf which we
494 * are configuring cluster storage for. If 'arg' is
495 * empty we allocate just the cluster without setting
496 * the mbuf to it. See mbuf.h.
497 */
498 static int
mb_ctor_clust(void * mem,void * arg,int flgs)499 mb_ctor_clust(void *mem, void *arg, int flgs)
500 {
501
502 #if USING_MBUF_CONSTRUCTOR
503 struct mbuf *m;
504 struct clust_args * cla;
505 u_int *refcnt;
506 int type, size;
507 sctp_zone_t zone;
508
509 /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
510 type = EXT_CLUSTER;
511 zone = zone_clust;
512 size = MCLBYTES;
513
514 cla = (struct clust_args *)arg;
515 m = cla->parent_mbuf;
516
517 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
518 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
519 *refcnt = 1;
520
521 if (m != NULL) {
522 m->m_ext.ext_buf = (caddr_t)mem;
523 m->m_data = m->m_ext.ext_buf;
524 m->m_flags |= M_EXT;
525 m->m_ext.ext_free = NULL;
526 m->m_ext.ext_args = NULL;
527 m->m_ext.ext_size = size;
528 m->m_ext.ext_type = type;
529 m->m_ext.ref_cnt = refcnt;
530 }
531 #endif
532 return (0);
533 }
534
535 /* __Userspace__ */
536 static void
mb_dtor_clust(void * mem,void * arg)537 mb_dtor_clust(void *mem, void *arg)
538 {
539
540 /* mem is of type caddr_t. In sys/types.h we have typedef char * caddr_t; */
541 /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times
542 * mb_dtor_clust is called is equal to the number of individual mbufs allocated
543 * from zone_clust. Similarly for mb_dtor_mbuf).
544 * At this point the following:
545 * struct mbuf *m;
546 * m = (struct mbuf *)arg;
547 * assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since m->m_ext.ref_cnt = NULL;
548 * has been done in mb_free_ext().
549 */
550
551 }
552
553
554
555
556 /* Unlink and free a packet tag. */
557 void
m_tag_delete(struct mbuf * m,struct m_tag * t)558 m_tag_delete(struct mbuf *m, struct m_tag *t)
559 {
560 KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t));
561 m_tag_unlink(m, t);
562 m_tag_free(t);
563 }
564
565
566 /* Unlink and free a packet tag chain, starting from given tag. */
567 void
m_tag_delete_chain(struct mbuf * m,struct m_tag * t)568 m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
569 {
570
571 struct m_tag *p, *q;
572
573 KASSERT(m, ("m_tag_delete_chain: null mbuf"));
574 if (t != NULL)
575 p = t;
576 else
577 p = SLIST_FIRST(&m->m_pkthdr.tags);
578 if (p == NULL)
579 return;
580 while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
581 m_tag_delete(m, q);
582 m_tag_delete(m, p);
583 }
584
585 #if 0
586 static void
587 sctp_print_mbuf_chain(struct mbuf *m)
588 {
589 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m);
590 for(; m; m=m->m_next) {
591 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next);
592 if (m->m_flags & M_EXT)
593 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt));
594 }
595 }
596 #endif
597
598 /*
599 * Free an entire chain of mbufs and associated external buffers, if
600 * applicable.
601 */
602 void
m_freem(struct mbuf * mb)603 m_freem(struct mbuf *mb)
604 {
605 while (mb != NULL)
606 mb = m_free(mb);
607 }
608
609 /*
610 * __Userspace__
611 * clean mbufs with M_EXT storage attached to them
612 * if the reference count hits 1.
613 */
614 void
mb_free_ext(struct mbuf * m)615 mb_free_ext(struct mbuf *m)
616 {
617
618 int skipmbuf;
619
620 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
621 KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
622
623 /*
624 * check if the header is embedded in the cluster
625 */
626 skipmbuf = (m->m_flags & M_NOFREE);
627
628 /* Free the external attached storage if this
629 * mbuf is the only reference to it.
630 *__Userspace__ TODO: jumbo frames
631 *
632 */
633 /* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT
634 reduces to here before but the IPHONE malloc commit had changed
635 this to compare to 0 instead of 1 (see next line). Why?
636 . .. this caused a huge memory leak in Linux.
637 */
638 #ifdef IPHONE
639 if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0)
640 #else
641 if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt))
642 #endif
643 {
644 if (m->m_ext.ext_type == EXT_CLUSTER){
645 #if defined(SCTP_SIMPLE_ALLOCATOR)
646 mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args);
647 #endif
648 SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf);
649 SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt);
650 m->m_ext.ref_cnt = NULL;
651 }
652 }
653
654 if (skipmbuf)
655 return;
656
657
658 /* __Userspace__ Also freeing the storage for ref_cnt
659 * Free this mbuf back to the mbuf zone with all m_ext
660 * information purged.
661 */
662 m->m_ext.ext_buf = NULL;
663 m->m_ext.ext_free = NULL;
664 m->m_ext.ext_args = NULL;
665 m->m_ext.ref_cnt = NULL;
666 m->m_ext.ext_size = 0;
667 m->m_ext.ext_type = 0;
668 m->m_flags &= ~M_EXT;
669 #if defined(SCTP_SIMPLE_ALLOCATOR)
670 mb_dtor_mbuf(m, NULL);
671 #endif
672 SCTP_ZONE_FREE(zone_mbuf, m);
673
674 /*umem_cache_free(zone_mbuf, m);*/
675 }
676
677 /*
678 * "Move" mbuf pkthdr from "from" to "to".
679 * "from" must have M_PKTHDR set, and "to" must be empty.
680 */
681 void
m_move_pkthdr(struct mbuf * to,struct mbuf * from)682 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
683 {
684
685 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
686 if ((to->m_flags & M_EXT) == 0)
687 to->m_data = to->m_pktdat;
688 to->m_pkthdr = from->m_pkthdr; /* especially tags */
689 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
690 from->m_flags &= ~M_PKTHDR;
691 }
692
693
694 /*
695 * Rearange an mbuf chain so that len bytes are contiguous
696 * and in the data area of an mbuf (so that mtod and dtom
697 * will work for a structure of size len). Returns the resulting
698 * mbuf chain on success, frees it and returns null on failure.
699 * If there is room, it will add up to max_protohdr-len extra bytes to the
700 * contiguous region in an attempt to avoid being called next time.
701 */
702 struct mbuf *
m_pullup(struct mbuf * n,int len)703 m_pullup(struct mbuf *n, int len)
704 {
705 struct mbuf *m;
706 int count;
707 int space;
708
709 /*
710 * If first mbuf has no cluster, and has room for len bytes
711 * without shifting current data, pullup into it,
712 * otherwise allocate a new mbuf to prepend to the chain.
713 */
714 if ((n->m_flags & M_EXT) == 0 &&
715 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
716 if (n->m_len >= len)
717 return (n);
718 m = n;
719 n = n->m_next;
720 len -= m->m_len;
721 } else {
722 if (len > MHLEN)
723 goto bad;
724 MGET(m, M_NOWAIT, n->m_type);
725 if (m == NULL)
726 goto bad;
727 m->m_len = 0;
728 if (n->m_flags & M_PKTHDR)
729 M_MOVE_PKTHDR(m, n);
730 }
731 space = (int)(&m->m_dat[MLEN] - (m->m_data + m->m_len));
732 do {
733 count = min(min(max(len, max_protohdr), space), n->m_len);
734 memcpy(mtod(m, caddr_t) + m->m_len,mtod(n, caddr_t), (u_int)count);
735 len -= count;
736 m->m_len += count;
737 n->m_len -= count;
738 space -= count;
739 if (n->m_len)
740 n->m_data += count;
741 else
742 n = m_free(n);
743 } while (len > 0 && n);
744 if (len > 0) {
745 (void) m_free(m);
746 goto bad;
747 }
748 m->m_next = n;
749 return (m);
750 bad:
751 m_freem(n);
752 mbstat.m_mpfail++; /* XXX: No consistency. */
753 return (NULL);
754 }
755
756
757 static struct mbuf *
m_dup1(struct mbuf * m,int off,int len,int wait)758 m_dup1(struct mbuf *m, int off, int len, int wait)
759 {
760 struct mbuf *n = NULL;
761 int copyhdr;
762
763 if (len > MCLBYTES)
764 return NULL;
765 if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
766 copyhdr = 1;
767 else
768 copyhdr = 0;
769 if (len >= MINCLSIZE) {
770 if (copyhdr == 1) {
771 m_clget(n, wait); /* TODO: include code for copying the header */
772 m_dup_pkthdr(n, m, wait);
773 } else
774 m_clget(n, wait);
775 } else {
776 if (copyhdr == 1)
777 n = m_gethdr(wait, m->m_type);
778 else
779 n = m_get(wait, m->m_type);
780 }
781 if (!n)
782 return NULL; /* ENOBUFS */
783
784 if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
785 m_free(n);
786 return NULL;
787 }
788 m_copydata(m, off, len, mtod(n, caddr_t));
789 n->m_len = len;
790 return n;
791 }
792
793
794 /* Taken from sys/kern/uipc_mbuf2.c */
795 struct mbuf *
m_pulldown(struct mbuf * m,int off,int len,int * offp)796 m_pulldown(struct mbuf *m, int off, int len, int *offp)
797 {
798 struct mbuf *n, *o;
799 int hlen, tlen, olen;
800 int writable;
801
802 /* check invalid arguments. */
803 KASSERT(m, ("m == NULL in m_pulldown()"));
804 if (len > MCLBYTES) {
805 m_freem(m);
806 return NULL; /* impossible */
807 }
808
809 #ifdef PULLDOWN_DEBUG
810 {
811 struct mbuf *t;
812 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:");
813 for (t = m; t; t = t->m_next)
814 SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len);
815 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n");
816 }
817 #endif
818 n = m;
819 while (n != NULL && off > 0) {
820 if (n->m_len > off)
821 break;
822 off -= n->m_len;
823 n = n->m_next;
824 }
825 /* be sure to point non-empty mbuf */
826 while (n != NULL && n->m_len == 0)
827 n = n->m_next;
828 if (!n) {
829 m_freem(m);
830 return NULL; /* mbuf chain too short */
831 }
832
833 writable = 0;
834 if ((n->m_flags & M_EXT) == 0 ||
835 (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
836 writable = 1;
837
838 /*
839 * the target data is on <n, off>.
840 * if we got enough data on the mbuf "n", we're done.
841 */
842 if ((off == 0 || offp) && len <= n->m_len - off && writable)
843 goto ok;
844
845 /*
846 * when len <= n->m_len - off and off != 0, it is a special case.
847 * len bytes from <n, off> sits in single mbuf, but the caller does
848 * not like the starting position (off).
849 * chop the current mbuf into two pieces, set off to 0.
850 */
851 if (len <= n->m_len - off) {
852 o = m_dup1(n, off, n->m_len - off, M_NOWAIT);
853 if (o == NULL) {
854 m_freem(m);
855 return NULL; /* ENOBUFS */
856 }
857 n->m_len = off;
858 o->m_next = n->m_next;
859 n->m_next = o;
860 n = n->m_next;
861 off = 0;
862 goto ok;
863 }
864 /*
865 * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
866 * and construct contiguous mbuf with m_len == len.
867 * note that hlen + tlen == len, and tlen > 0.
868 */
869 hlen = n->m_len - off;
870 tlen = len - hlen;
871
872 /*
873 * ensure that we have enough trailing data on mbuf chain.
874 * if not, we can do nothing about the chain.
875 */
876 olen = 0;
877 for (o = n->m_next; o != NULL; o = o->m_next)
878 olen += o->m_len;
879 if (hlen + olen < len) {
880 m_freem(m);
881 return NULL; /* mbuf chain too short */
882 }
883
884 /*
885 * easy cases first.
886 * we need to use m_copydata() to get data from <n->m_next, 0>.
887 */
888 if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
889 && writable) {
890 m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
891 n->m_len += tlen;
892 m_adj(n->m_next, tlen);
893 goto ok;
894 }
895
896 if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen
897 && writable) {
898 n->m_next->m_data -= hlen;
899 n->m_next->m_len += hlen;
900 memcpy( mtod(n->m_next, caddr_t), mtod(n, caddr_t) + off,hlen);
901 n->m_len -= hlen;
902 n = n->m_next;
903 off = 0;
904 goto ok;
905 }
906
907 /*
908 * now, we need to do the hard way. don't m_copy as there's no room
909 * on both end.
910 */
911 if (len > MLEN)
912 m_clget(o, M_NOWAIT);
913 /* o = m_getcl(M_NOWAIT, m->m_type, 0);*/
914 else
915 o = m_get(M_NOWAIT, m->m_type);
916 if (!o) {
917 m_freem(m);
918 return NULL; /* ENOBUFS */
919 }
920 /* get hlen from <n, off> into <o, 0> */
921 o->m_len = hlen;
922 memcpy(mtod(o, caddr_t), mtod(n, caddr_t) + off, hlen);
923 n->m_len -= hlen;
924 /* get tlen from <n->m_next, 0> into <o, hlen> */
925 m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
926 o->m_len += tlen;
927 m_adj(n->m_next, tlen);
928 o->m_next = n->m_next;
929 n->m_next = o;
930 n = o;
931 off = 0;
932 ok:
933 #ifdef PULLDOWN_DEBUG
934 {
935 struct mbuf *t;
936 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:");
937 for (t = m; t; t = t->m_next)
938 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len);
939 SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off);
940 }
941 #endif
942 if (offp)
943 *offp = off;
944 return n;
945 }
946
947 /*
948 * Attach the the cluster from *m to *n, set up m_ext in *n
949 * and bump the refcount of the cluster.
950 */
951 static void
mb_dupcl(struct mbuf * n,struct mbuf * m)952 mb_dupcl(struct mbuf *n, struct mbuf *m)
953 {
954 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
955 KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
956 KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
957
958 if (*(m->m_ext.ref_cnt) == 1)
959 *(m->m_ext.ref_cnt) += 1;
960 else
961 atomic_add_int(m->m_ext.ref_cnt, 1);
962 n->m_ext.ext_buf = m->m_ext.ext_buf;
963 n->m_ext.ext_free = m->m_ext.ext_free;
964 n->m_ext.ext_args = m->m_ext.ext_args;
965 n->m_ext.ext_size = m->m_ext.ext_size;
966 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
967 n->m_ext.ext_type = m->m_ext.ext_type;
968 n->m_flags |= M_EXT;
969 }
970
971
972 /*
973 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
974 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
975 * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller.
976 * Note that the copy is read-only, because clusters are not copied,
977 * only their reference counts are incremented.
978 */
979
980 struct mbuf *
m_copym(struct mbuf * m,int off0,int len,int wait)981 m_copym(struct mbuf *m, int off0, int len, int wait)
982 {
983 struct mbuf *n, **np;
984 int off = off0;
985 struct mbuf *top;
986 int copyhdr = 0;
987
988 KASSERT(off >= 0, ("m_copym, negative off %d", off));
989 KASSERT(len >= 0, ("m_copym, negative len %d", len));
990 KASSERT(m != NULL, ("m_copym, m is NULL"));
991
992 #if !defined(INVARIANTS)
993 if (m == NULL) {
994 return (NULL);
995 }
996 #endif
997
998 if (off == 0 && m->m_flags & M_PKTHDR)
999 copyhdr = 1;
1000 while (off > 0) {
1001 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1002 if (off < m->m_len)
1003 break;
1004 off -= m->m_len;
1005 m = m->m_next;
1006 }
1007 np = ⊤
1008 top = 0;
1009 while (len > 0) {
1010 if (m == NULL) {
1011 KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain"));
1012 break;
1013 }
1014 if (copyhdr)
1015 MGETHDR(n, wait, m->m_type);
1016 else
1017 MGET(n, wait, m->m_type);
1018 *np = n;
1019 if (n == NULL)
1020 goto nospace;
1021 if (copyhdr) {
1022 if (!m_dup_pkthdr(n, m, wait))
1023 goto nospace;
1024 if (len == M_COPYALL)
1025 n->m_pkthdr.len -= off0;
1026 else
1027 n->m_pkthdr.len = len;
1028 copyhdr = 0;
1029 }
1030 n->m_len = min(len, m->m_len - off);
1031 if (m->m_flags & M_EXT) {
1032 n->m_data = m->m_data + off;
1033 mb_dupcl(n, m);
1034 } else
1035 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, (u_int)n->m_len);
1036 if (len != M_COPYALL)
1037 len -= n->m_len;
1038 off = 0;
1039 m = m->m_next;
1040 np = &n->m_next;
1041 }
1042 if (top == NULL)
1043 mbstat.m_mcfail++; /* XXX: No consistency. */
1044
1045 return (top);
1046 nospace:
1047 m_freem(top);
1048 mbstat.m_mcfail++; /* XXX: No consistency. */
1049 return (NULL);
1050 }
1051
1052
1053 int
m_tag_copy_chain(struct mbuf * to,struct mbuf * from,int how)1054 m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
1055 {
1056 struct m_tag *p, *t, *tprev = NULL;
1057
1058 KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from));
1059 m_tag_delete_chain(to, NULL);
1060 SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
1061 t = m_tag_copy(p, how);
1062 if (t == NULL) {
1063 m_tag_delete_chain(to, NULL);
1064 return 0;
1065 }
1066 if (tprev == NULL)
1067 SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
1068 else
1069 SLIST_INSERT_AFTER(tprev, t, m_tag_link);
1070 tprev = t;
1071 }
1072 return 1;
1073 }
1074
1075 /*
1076 * Duplicate "from"'s mbuf pkthdr in "to".
1077 * "from" must have M_PKTHDR set, and "to" must be empty.
1078 * In particular, this does a deep copy of the packet tags.
1079 */
1080 int
m_dup_pkthdr(struct mbuf * to,struct mbuf * from,int how)1081 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
1082 {
1083
1084 KASSERT(to, ("m_dup_pkthdr: to is NULL"));
1085 KASSERT(from, ("m_dup_pkthdr: from is NULL"));
1086 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1087 if ((to->m_flags & M_EXT) == 0)
1088 to->m_data = to->m_pktdat;
1089 to->m_pkthdr = from->m_pkthdr;
1090 SLIST_INIT(&to->m_pkthdr.tags);
1091 return (m_tag_copy_chain(to, from, MBTOM(how)));
1092 }
1093
1094 /* Copy a single tag. */
1095 struct m_tag *
m_tag_copy(struct m_tag * t,int how)1096 m_tag_copy(struct m_tag *t, int how)
1097 {
1098 struct m_tag *p;
1099
1100 KASSERT(t, ("m_tag_copy: null tag"));
1101 p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
1102 if (p == NULL)
1103 return (NULL);
1104 memcpy(p + 1, t + 1, t->m_tag_len); /* Copy the data */
1105 return p;
1106 }
1107
1108 /* Get a packet tag structure along with specified data following. */
1109 struct m_tag *
m_tag_alloc(u_int32_t cookie,int type,int len,int wait)1110 m_tag_alloc(u_int32_t cookie, int type, int len, int wait)
1111 {
1112 struct m_tag *t;
1113
1114 if (len < 0)
1115 return NULL;
1116 t = malloc(len + sizeof(struct m_tag));
1117 if (t == NULL)
1118 return NULL;
1119 m_tag_setup(t, cookie, type, len);
1120 t->m_tag_free = m_tag_free_default;
1121 return t;
1122 }
1123
1124 /* Free a packet tag. */
1125 void
m_tag_free_default(struct m_tag * t)1126 m_tag_free_default(struct m_tag *t)
1127 {
1128 free(t);
1129 }
1130
1131 /*
1132 * Copy data from a buffer back into the indicated mbuf chain,
1133 * starting "off" bytes from the beginning, extending the mbuf
1134 * chain if necessary.
1135 */
1136 void
m_copyback(struct mbuf * m0,int off,int len,caddr_t cp)1137 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1138 {
1139 int mlen;
1140 struct mbuf *m = m0, *n;
1141 int totlen = 0;
1142
1143 if (m0 == NULL)
1144 return;
1145 while (off > (mlen = m->m_len)) {
1146 off -= mlen;
1147 totlen += mlen;
1148 if (m->m_next == NULL) {
1149 n = m_get(M_NOWAIT, m->m_type);
1150 if (n == NULL)
1151 goto out;
1152 memset(mtod(n, caddr_t), 0, MLEN);
1153 n->m_len = min(MLEN, len + off);
1154 m->m_next = n;
1155 }
1156 m = m->m_next;
1157 }
1158 while (len > 0) {
1159 mlen = min (m->m_len - off, len);
1160 memcpy(off + mtod(m, caddr_t), cp, (u_int)mlen);
1161 cp += mlen;
1162 len -= mlen;
1163 mlen += off;
1164 off = 0;
1165 totlen += mlen;
1166 if (len == 0)
1167 break;
1168 if (m->m_next == NULL) {
1169 n = m_get(M_NOWAIT, m->m_type);
1170 if (n == NULL)
1171 break;
1172 n->m_len = min(MLEN, len);
1173 m->m_next = n;
1174 }
1175 m = m->m_next;
1176 }
1177 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1178 m->m_pkthdr.len = totlen;
1179 }
1180
1181
1182 /*
1183 * Lesser-used path for M_PREPEND:
1184 * allocate new mbuf to prepend to chain,
1185 * copy junk along.
1186 */
1187 struct mbuf *
m_prepend(struct mbuf * m,int len,int how)1188 m_prepend(struct mbuf *m, int len, int how)
1189 {
1190 struct mbuf *mn;
1191
1192 if (m->m_flags & M_PKTHDR)
1193 MGETHDR(mn, how, m->m_type);
1194 else
1195 MGET(mn, how, m->m_type);
1196 if (mn == NULL) {
1197 m_freem(m);
1198 return (NULL);
1199 }
1200 if (m->m_flags & M_PKTHDR)
1201 M_MOVE_PKTHDR(mn, m);
1202 mn->m_next = m;
1203 m = mn;
1204 if (m->m_flags & M_PKTHDR) {
1205 if (len < MHLEN)
1206 MH_ALIGN(m, len);
1207 } else {
1208 if (len < MLEN)
1209 M_ALIGN(m, len);
1210 }
1211 m->m_len = len;
1212 return (m);
1213 }
1214
1215 /*
1216 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1217 * continuing for "len" bytes, into the indicated buffer.
1218 */
1219 void
m_copydata(const struct mbuf * m,int off,int len,caddr_t cp)1220 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1221 {
1222 u_int count;
1223
1224 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1225 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1226 while (off > 0) {
1227 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1228 if (off < m->m_len)
1229 break;
1230 off -= m->m_len;
1231 m = m->m_next;
1232 }
1233 while (len > 0) {
1234 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1235 count = min(m->m_len - off, len);
1236 memcpy(cp, mtod(m, caddr_t) + off, count);
1237 len -= count;
1238 cp += count;
1239 off = 0;
1240 m = m->m_next;
1241 }
1242 }
1243
1244
1245 /*
1246 * Concatenate mbuf chain n to m.
1247 * Both chains must be of the same type (e.g. MT_DATA).
1248 * Any m_pkthdr is not updated.
1249 */
1250 void
m_cat(struct mbuf * m,struct mbuf * n)1251 m_cat(struct mbuf *m, struct mbuf *n)
1252 {
1253 while (m->m_next)
1254 m = m->m_next;
1255 while (n) {
1256 if (m->m_flags & M_EXT ||
1257 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1258 /* just join the two chains */
1259 m->m_next = n;
1260 return;
1261 }
1262 /* splat the data from one into the other */
1263 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), (u_int)n->m_len);
1264 m->m_len += n->m_len;
1265 n = m_free(n);
1266 }
1267 }
1268
1269
1270 void
m_adj(struct mbuf * mp,int req_len)1271 m_adj(struct mbuf *mp, int req_len)
1272 {
1273 int len = req_len;
1274 struct mbuf *m;
1275 int count;
1276
1277 if ((m = mp) == NULL)
1278 return;
1279 if (len >= 0) {
1280 /*
1281 * Trim from head.
1282 */
1283 while (m != NULL && len > 0) {
1284 if (m->m_len <= len) {
1285 len -= m->m_len;
1286 m->m_len = 0;
1287 m = m->m_next;
1288 } else {
1289 m->m_len -= len;
1290 m->m_data += len;
1291 len = 0;
1292 }
1293 }
1294 m = mp;
1295 if (mp->m_flags & M_PKTHDR)
1296 m->m_pkthdr.len -= (req_len - len);
1297 } else {
1298 /*
1299 * Trim from tail. Scan the mbuf chain,
1300 * calculating its length and finding the last mbuf.
1301 * If the adjustment only affects this mbuf, then just
1302 * adjust and return. Otherwise, rescan and truncate
1303 * after the remaining size.
1304 */
1305 len = -len;
1306 count = 0;
1307 for (;;) {
1308 count += m->m_len;
1309 if (m->m_next == (struct mbuf *)0)
1310 break;
1311 m = m->m_next;
1312 }
1313 if (m->m_len >= len) {
1314 m->m_len -= len;
1315 if (mp->m_flags & M_PKTHDR)
1316 mp->m_pkthdr.len -= len;
1317 return;
1318 }
1319 count -= len;
1320 if (count < 0)
1321 count = 0;
1322 /*
1323 * Correct length for chain is "count".
1324 * Find the mbuf with last data, adjust its length,
1325 * and toss data from remaining mbufs on chain.
1326 */
1327 m = mp;
1328 if (m->m_flags & M_PKTHDR)
1329 m->m_pkthdr.len = count;
1330 for (; m; m = m->m_next) {
1331 if (m->m_len >= count) {
1332 m->m_len = count;
1333 if (m->m_next != NULL) {
1334 m_freem(m->m_next);
1335 m->m_next = NULL;
1336 }
1337 break;
1338 }
1339 count -= m->m_len;
1340 }
1341 }
1342 }
1343
1344
1345 /* m_split is used within sctp_handle_cookie_echo. */
1346
1347 /*
1348 * Partition an mbuf chain in two pieces, returning the tail --
1349 * all but the first len0 bytes. In case of failure, it returns NULL and
1350 * attempts to restore the chain to its original state.
1351 *
1352 * Note that the resulting mbufs might be read-only, because the new
1353 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1354 * the "breaking point" happens to lie within a cluster mbuf. Use the
1355 * M_WRITABLE() macro to check for this case.
1356 */
1357 struct mbuf *
m_split(struct mbuf * m0,int len0,int wait)1358 m_split(struct mbuf *m0, int len0, int wait)
1359 {
1360 struct mbuf *m, *n;
1361 u_int len = len0, remain;
1362
1363 /* MBUF_CHECKSLEEP(wait); */
1364 for (m = m0; m && (int)len > m->m_len; m = m->m_next)
1365 len -= m->m_len;
1366 if (m == NULL)
1367 return (NULL);
1368 remain = m->m_len - len;
1369 if (m0->m_flags & M_PKTHDR) {
1370 MGETHDR(n, wait, m0->m_type);
1371 if (n == NULL)
1372 return (NULL);
1373 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1374 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1375 m0->m_pkthdr.len = len0;
1376 if (m->m_flags & M_EXT)
1377 goto extpacket;
1378 if (remain > MHLEN) {
1379 /* m can't be the lead packet */
1380 MH_ALIGN(n, 0);
1381 n->m_next = m_split(m, len, wait);
1382 if (n->m_next == NULL) {
1383 (void) m_free(n);
1384 return (NULL);
1385 } else {
1386 n->m_len = 0;
1387 return (n);
1388 }
1389 } else
1390 MH_ALIGN(n, remain);
1391 } else if (remain == 0) {
1392 n = m->m_next;
1393 m->m_next = NULL;
1394 return (n);
1395 } else {
1396 MGET(n, wait, m->m_type);
1397 if (n == NULL)
1398 return (NULL);
1399 M_ALIGN(n, remain);
1400 }
1401 extpacket:
1402 if (m->m_flags & M_EXT) {
1403 n->m_data = m->m_data + len;
1404 mb_dupcl(n, m);
1405 } else {
1406 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
1407 }
1408 n->m_len = remain;
1409 m->m_len = len;
1410 n->m_next = m->m_next;
1411 m->m_next = NULL;
1412 return (n);
1413 }
1414
1415
1416
1417
1418 int
pack_send_buffer(caddr_t buffer,struct mbuf * mb)1419 pack_send_buffer(caddr_t buffer, struct mbuf* mb){
1420
1421 int count_to_copy;
1422 int total_count_copied = 0;
1423 int offset = 0;
1424
1425 do {
1426 count_to_copy = mb->m_len;
1427 memcpy(buffer+offset, mtod(mb, caddr_t), count_to_copy);
1428 offset += count_to_copy;
1429 total_count_copied += count_to_copy;
1430 mb = mb->m_next;
1431 } while(mb);
1432
1433 return (total_count_copied);
1434 }
1435