xref: /original-bsd/sys/kern/uipc_mbuf.c (revision 990ad5b1)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)uipc_mbuf.c	7.19 (Berkeley) 04/20/91
8  */
9 
10 #include "param.h"
11 #include "proc.h"
12 #include "malloc.h"
13 #include "map.h"
14 #define MBTYPES
15 #include "mbuf.h"
16 #include "kernel.h"
17 #include "syslog.h"
18 #include "domain.h"
19 #include "protosw.h"
20 #include "vm/vm.h"
21 
22 extern	vm_map_t mb_map;
23 struct	mbuf *mbutl;
24 char	*mclrefcnt;
25 
26 mbinit()
27 {
28 	int s;
29 
30 #if CLBYTES < 4096
31 #define NCL_INIT	(4096/CLBYTES)
32 #else
33 #define NCL_INIT	1
34 #endif
35 	s = splimp();
36 	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
37 		goto bad;
38 	splx(s);
39 	return;
40 bad:
41 	panic("mbinit");
42 }
43 
44 /*
45  * Allocate some number of mbuf clusters
46  * and place on cluster free list.
47  * Must be called at splimp.
48  */
49 /* ARGSUSED */
50 m_clalloc(ncl, canwait)
51 	register int ncl;
52 {
53 	int npg, mbx;
54 	register caddr_t p;
55 	register int i;
56 	static int logged;
57 
58 	npg = ncl * CLSIZE;
59 	p = (caddr_t)kmem_malloc(mb_map, ctob(npg), canwait);
60 	if (p == NULL) {
61 		if (logged == 0) {
62 			logged++;
63 			log(LOG_ERR, "mb_map full\n");
64 		}
65 		return (0);
66 	}
67 	ncl = ncl * CLBYTES / MCLBYTES;
68 	for (i = 0; i < ncl; i++) {
69 		((union mcluster *)p)->mcl_next = mclfree;
70 		mclfree = (union mcluster *)p;
71 		p += MCLBYTES;
72 		mbstat.m_clfree++;
73 	}
74 	mbstat.m_clusters += ncl;
75 	return (1);
76 }
77 
78 /*
79  * When MGET failes, ask protocols to free space when short of memory,
80  * then re-attempt to allocate an mbuf.
81  */
82 struct mbuf *
83 m_retry(i, t)
84 	int i, t;
85 {
86 	register struct mbuf *m;
87 
88 	m_reclaim();
89 #define m_retry(i, t)	(struct mbuf *)0
90 	MGET(m, i, t);
91 #undef m_retry
92 	return (m);
93 }
94 
95 /*
96  * As above; retry an MGETHDR.
97  */
98 struct mbuf *
99 m_retryhdr(i, t)
100 	int i, t;
101 {
102 	register struct mbuf *m;
103 
104 	m_reclaim();
105 #define m_retryhdr(i, t) (struct mbuf *)0
106 	MGETHDR(m, i, t);
107 #undef m_retryhdr
108 	return (m);
109 }
110 
111 m_reclaim()
112 {
113 	register struct domain *dp;
114 	register struct protosw *pr;
115 	int s = splimp();
116 
117 	for (dp = domains; dp; dp = dp->dom_next)
118 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
119 			if (pr->pr_drain)
120 				(*pr->pr_drain)();
121 	splx(s);
122 	mbstat.m_drain++;
123 }
124 
125 /*
126  * Space allocation routines.
127  * These are also available as macros
128  * for critical paths.
129  */
130 struct mbuf *
131 m_get(canwait, type)
132 	int canwait, type;
133 {
134 	register struct mbuf *m;
135 
136 	MGET(m, canwait, type);
137 	return (m);
138 }
139 
140 struct mbuf *
141 m_gethdr(canwait, type)
142 	int canwait, type;
143 {
144 	register struct mbuf *m;
145 
146 	MGETHDR(m, canwait, type);
147 	return (m);
148 }
149 
150 struct mbuf *
151 m_getclr(canwait, type)
152 	int canwait, type;
153 {
154 	register struct mbuf *m;
155 
156 	MGET(m, canwait, type);
157 	if (m == 0)
158 		return (0);
159 	bzero(mtod(m, caddr_t), MLEN);
160 	return (m);
161 }
162 
163 struct mbuf *
164 m_free(m)
165 	struct mbuf *m;
166 {
167 	register struct mbuf *n;
168 
169 	MFREE(m, n);
170 	return (n);
171 }
172 
173 m_freem(m)
174 	register struct mbuf *m;
175 {
176 	register struct mbuf *n;
177 
178 	if (m == NULL)
179 		return;
180 	do {
181 		MFREE(m, n);
182 	} while (m = n);
183 }
184 
185 /*
186  * Mbuffer utility routines.
187  */
188 
189 /*
190  * Lesser-used path for M_PREPEND:
191  * allocate new mbuf to prepend to chain,
192  * copy junk along.
193  */
194 struct mbuf *
195 m_prepend(m, len, how)
196 	register struct mbuf *m;
197 	int len, how;
198 {
199 	struct mbuf *mn;
200 
201 	MGET(mn, how, m->m_type);
202 	if (mn == (struct mbuf *)NULL) {
203 		m_freem(m);
204 		return ((struct mbuf *)NULL);
205 	}
206 	if (m->m_flags & M_PKTHDR) {
207 		M_COPY_PKTHDR(mn, m);
208 		m->m_flags &= ~M_PKTHDR;
209 	}
210 	mn->m_next = m;
211 	m = mn;
212 	if (len < MHLEN)
213 		MH_ALIGN(m, len);
214 	m->m_len = len;
215 	return (m);
216 }
217 
218 /*
219  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
220  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
221  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
222  */
223 int MCFail;
224 
225 struct mbuf *
226 m_copym(m, off0, len, wait)
227 	register struct mbuf *m;
228 	int off0, wait;
229 	register int len;
230 {
231 	register struct mbuf *n, **np;
232 	register int off = off0;
233 	struct mbuf *top;
234 	int copyhdr = 0;
235 
236 	if (off < 0 || len < 0)
237 		panic("m_copym");
238 	if (off == 0 && m->m_flags & M_PKTHDR)
239 		copyhdr = 1;
240 	while (off > 0) {
241 		if (m == 0)
242 			panic("m_copym");
243 		if (off < m->m_len)
244 			break;
245 		off -= m->m_len;
246 		m = m->m_next;
247 	}
248 	np = &top;
249 	top = 0;
250 	while (len > 0) {
251 		if (m == 0) {
252 			if (len != M_COPYALL)
253 				panic("m_copym");
254 			break;
255 		}
256 		MGET(n, wait, m->m_type);
257 		*np = n;
258 		if (n == 0)
259 			goto nospace;
260 		if (copyhdr) {
261 			M_COPY_PKTHDR(n, m);
262 			if (len == M_COPYALL)
263 				n->m_pkthdr.len -= off0;
264 			else
265 				n->m_pkthdr.len = len;
266 			copyhdr = 0;
267 		}
268 		n->m_len = MIN(len, m->m_len - off);
269 		if (m->m_flags & M_EXT) {
270 			n->m_data = m->m_data + off;
271 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
272 			n->m_ext = m->m_ext;
273 			n->m_flags |= M_EXT;
274 		} else
275 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
276 			    (unsigned)n->m_len);
277 		if (len != M_COPYALL)
278 			len -= n->m_len;
279 		off = 0;
280 		m = m->m_next;
281 		np = &n->m_next;
282 	}
283 	if (top == 0)
284 		MCFail++;
285 	return (top);
286 nospace:
287 	m_freem(top);
288 	MCFail++;
289 	return (0);
290 }
291 
292 /*
293  * Copy data from an mbuf chain starting "off" bytes from the beginning,
294  * continuing for "len" bytes, into the indicated buffer.
295  */
296 m_copydata(m, off, len, cp)
297 	register struct mbuf *m;
298 	register int off;
299 	register int len;
300 	caddr_t cp;
301 {
302 	register unsigned count;
303 
304 	if (off < 0 || len < 0)
305 		panic("m_copydata");
306 	while (off > 0) {
307 		if (m == 0)
308 			panic("m_copydata");
309 		if (off < m->m_len)
310 			break;
311 		off -= m->m_len;
312 		m = m->m_next;
313 	}
314 	while (len > 0) {
315 		if (m == 0)
316 			panic("m_copydata");
317 		count = MIN(m->m_len - off, len);
318 		bcopy(mtod(m, caddr_t) + off, cp, count);
319 		len -= count;
320 		cp += count;
321 		off = 0;
322 		m = m->m_next;
323 	}
324 }
325 
326 /*
327  * Concatenate mbuf chain n to m.
328  * Both chains must be of the same type (e.g. MT_DATA).
329  * Any m_pkthdr is not updated.
330  */
331 m_cat(m, n)
332 	register struct mbuf *m, *n;
333 {
334 	while (m->m_next)
335 		m = m->m_next;
336 	while (n) {
337 		if (m->m_flags & M_EXT ||
338 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
339 			/* just join the two chains */
340 			m->m_next = n;
341 			return;
342 		}
343 		/* splat the data from one into the other */
344 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
345 		    (u_int)n->m_len);
346 		m->m_len += n->m_len;
347 		n = m_free(n);
348 	}
349 }
350 
351 m_adj(mp, req_len)
352 	struct mbuf *mp;
353 {
354 	register int len = req_len;
355 	register struct mbuf *m;
356 	register count;
357 
358 	if ((m = mp) == NULL)
359 		return;
360 	if (len >= 0) {
361 		/*
362 		 * Trim from head.
363 		 */
364 		while (m != NULL && len > 0) {
365 			if (m->m_len <= len) {
366 				len -= m->m_len;
367 				m->m_len = 0;
368 				m = m->m_next;
369 			} else {
370 				m->m_len -= len;
371 				m->m_data += len;
372 				len = 0;
373 			}
374 		}
375 		m = mp;
376 		if (mp->m_flags & M_PKTHDR)
377 			m->m_pkthdr.len -= (req_len - len);
378 	} else {
379 		/*
380 		 * Trim from tail.  Scan the mbuf chain,
381 		 * calculating its length and finding the last mbuf.
382 		 * If the adjustment only affects this mbuf, then just
383 		 * adjust and return.  Otherwise, rescan and truncate
384 		 * after the remaining size.
385 		 */
386 		len = -len;
387 		count = 0;
388 		for (;;) {
389 			count += m->m_len;
390 			if (m->m_next == (struct mbuf *)0)
391 				break;
392 			m = m->m_next;
393 		}
394 		if (m->m_len >= len) {
395 			m->m_len -= len;
396 			if ((mp = m)->m_flags & M_PKTHDR)
397 				m->m_pkthdr.len -= len;
398 			return;
399 		}
400 		count -= len;
401 		if (count < 0)
402 			count = 0;
403 		/*
404 		 * Correct length for chain is "count".
405 		 * Find the mbuf with last data, adjust its length,
406 		 * and toss data from remaining mbufs on chain.
407 		 */
408 		m = mp;
409 		if (m->m_flags & M_PKTHDR)
410 			m->m_pkthdr.len = count;
411 		for (; m; m = m->m_next) {
412 			if (m->m_len >= count) {
413 				m->m_len = count;
414 				break;
415 			}
416 			count -= m->m_len;
417 		}
418 		while (m = m->m_next)
419 			m->m_len = 0;
420 	}
421 }
422 
423 /*
424  * Rearange an mbuf chain so that len bytes are contiguous
425  * and in the data area of an mbuf (so that mtod and dtom
426  * will work for a structure of size len).  Returns the resulting
427  * mbuf chain on success, frees it and returns null on failure.
428  * If there is room, it will add up to max_protohdr-len extra bytes to the
429  * contiguous region in an attempt to avoid being called next time.
430  */
431 int MPFail;
432 
433 struct mbuf *
434 m_pullup(n, len)
435 	register struct mbuf *n;
436 	int len;
437 {
438 	register struct mbuf *m;
439 	register int count;
440 	int space;
441 
442 	/*
443 	 * If first mbuf has no cluster, and has room for len bytes
444 	 * without shifting current data, pullup into it,
445 	 * otherwise allocate a new mbuf to prepend to the chain.
446 	 */
447 	if ((n->m_flags & M_EXT) == 0 &&
448 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
449 		if (n->m_len >= len)
450 			return (n);
451 		m = n;
452 		n = n->m_next;
453 		len -= m->m_len;
454 	} else {
455 		if (len > MHLEN)
456 			goto bad;
457 		MGET(m, M_DONTWAIT, n->m_type);
458 		if (m == 0)
459 			goto bad;
460 		m->m_len = 0;
461 		if (n->m_flags & M_PKTHDR) {
462 			M_COPY_PKTHDR(m, n);
463 			n->m_flags &= ~M_PKTHDR;
464 		}
465 	}
466 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
467 	do {
468 		count = min(min(max(len, max_protohdr), space), n->m_len);
469 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
470 		  (unsigned)count);
471 		len -= count;
472 		m->m_len += count;
473 		n->m_len -= count;
474 		space -= count;
475 		if (n->m_len)
476 			n->m_data += count;
477 		else
478 			n = m_free(n);
479 	} while (len > 0 && n);
480 	if (len > 0) {
481 		(void) m_free(m);
482 		goto bad;
483 	}
484 	m->m_next = n;
485 	return (m);
486 bad:
487 	m_freem(n);
488 	MPFail++;
489 	return (0);
490 }
491