xref: /original-bsd/sys/kern/uipc_mbuf.c (revision dd262573)
1 /*
2  * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)uipc_mbuf.c	7.18 (Berkeley) 12/05/90
8  */
9 
10 #include "param.h"
11 #include "user.h"
12 #include "proc.h"
13 #include "cmap.h"
14 #include "malloc.h"
15 #include "map.h"
16 #define MBTYPES
17 #include "mbuf.h"
18 #include "kernel.h"
19 #include "syslog.h"
20 #include "domain.h"
21 #include "protosw.h"
22 #include "../vm/vm_param.h"
23 #include "../vm/vm_map.h"
24 
25 extern vm_map_t mb_map;
26 struct mbuf *mbutl;
27 char *mclrefcnt;
28 
29 mbinit()
30 {
31 	int s;
32 
33 #if CLBYTES < 4096
34 #define NCL_INIT	(4096/CLBYTES)
35 #else
36 #define NCL_INIT	1
37 #endif
38 	s = splimp();
39 	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
40 		goto bad;
41 	splx(s);
42 	return;
43 bad:
44 	panic("mbinit");
45 }
46 
47 /*
48  * Allocate some number of mbuf clusters
49  * and place on cluster free list.
50  * Must be called at splimp.
51  */
52 /* ARGSUSED */
53 m_clalloc(ncl, canwait)
54 	register int ncl;
55 {
56 	int npg, mbx;
57 	register caddr_t p;
58 	register int i;
59 	static int logged;
60 
61 	npg = ncl * CLSIZE;
62 	p = (caddr_t)kmem_malloc(mb_map, ctob(npg), canwait);
63 	if (p == NULL) {
64 		if (logged == 0) {
65 			logged++;
66 			log(LOG_ERR, "mb_map full\n");
67 		}
68 		return (0);
69 	}
70 	ncl = ncl * CLBYTES / MCLBYTES;
71 	for (i = 0; i < ncl; i++) {
72 		((union mcluster *)p)->mcl_next = mclfree;
73 		mclfree = (union mcluster *)p;
74 		p += MCLBYTES;
75 		mbstat.m_clfree++;
76 	}
77 	mbstat.m_clusters += ncl;
78 	return (1);
79 }
80 
81 /*
82  * When MGET failes, ask protocols to free space when short of memory,
83  * then re-attempt to allocate an mbuf.
84  */
85 struct mbuf *
86 m_retry(i, t)
87 	int i, t;
88 {
89 	register struct mbuf *m;
90 
91 	m_reclaim();
92 #define m_retry(i, t)	(struct mbuf *)0
93 	MGET(m, i, t);
94 #undef m_retry
95 	return (m);
96 }
97 
98 /*
99  * As above; retry an MGETHDR.
100  */
101 struct mbuf *
102 m_retryhdr(i, t)
103 	int i, t;
104 {
105 	register struct mbuf *m;
106 
107 	m_reclaim();
108 #define m_retryhdr(i, t) (struct mbuf *)0
109 	MGETHDR(m, i, t);
110 #undef m_retryhdr
111 	return (m);
112 }
113 
114 m_reclaim()
115 {
116 	register struct domain *dp;
117 	register struct protosw *pr;
118 	int s = splimp();
119 
120 	for (dp = domains; dp; dp = dp->dom_next)
121 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
122 			if (pr->pr_drain)
123 				(*pr->pr_drain)();
124 	splx(s);
125 	mbstat.m_drain++;
126 }
127 
128 /*
129  * Space allocation routines.
130  * These are also available as macros
131  * for critical paths.
132  */
133 struct mbuf *
134 m_get(canwait, type)
135 	int canwait, type;
136 {
137 	register struct mbuf *m;
138 
139 	MGET(m, canwait, type);
140 	return (m);
141 }
142 
143 struct mbuf *
144 m_gethdr(canwait, type)
145 	int canwait, type;
146 {
147 	register struct mbuf *m;
148 
149 	MGETHDR(m, canwait, type);
150 	return (m);
151 }
152 
153 struct mbuf *
154 m_getclr(canwait, type)
155 	int canwait, type;
156 {
157 	register struct mbuf *m;
158 
159 	MGET(m, canwait, type);
160 	if (m == 0)
161 		return (0);
162 	bzero(mtod(m, caddr_t), MLEN);
163 	return (m);
164 }
165 
166 struct mbuf *
167 m_free(m)
168 	struct mbuf *m;
169 {
170 	register struct mbuf *n;
171 
172 	MFREE(m, n);
173 	return (n);
174 }
175 
176 m_freem(m)
177 	register struct mbuf *m;
178 {
179 	register struct mbuf *n;
180 
181 	if (m == NULL)
182 		return;
183 	do {
184 		MFREE(m, n);
185 	} while (m = n);
186 }
187 
188 /*
189  * Mbuffer utility routines.
190  */
191 
192 /*
193  * Lesser-used path for M_PREPEND:
194  * allocate new mbuf to prepend to chain,
195  * copy junk along.
196  */
197 struct mbuf *
198 m_prepend(m, len, how)
199 	register struct mbuf *m;
200 	int len, how;
201 {
202 	struct mbuf *mn;
203 
204 	MGET(mn, how, m->m_type);
205 	if (mn == (struct mbuf *)NULL) {
206 		m_freem(m);
207 		return ((struct mbuf *)NULL);
208 	}
209 	if (m->m_flags & M_PKTHDR) {
210 		M_COPY_PKTHDR(mn, m);
211 		m->m_flags &= ~M_PKTHDR;
212 	}
213 	mn->m_next = m;
214 	m = mn;
215 	if (len < MHLEN)
216 		MH_ALIGN(m, len);
217 	m->m_len = len;
218 	return (m);
219 }
220 
221 /*
222  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
223  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
224  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
225  */
226 int MCFail;
227 
228 struct mbuf *
229 m_copym(m, off0, len, wait)
230 	register struct mbuf *m;
231 	int off0, wait;
232 	register int len;
233 {
234 	register struct mbuf *n, **np;
235 	register int off = off0;
236 	struct mbuf *top;
237 	int copyhdr = 0;
238 
239 	if (off < 0 || len < 0)
240 		panic("m_copym");
241 	if (off == 0 && m->m_flags & M_PKTHDR)
242 		copyhdr = 1;
243 	while (off > 0) {
244 		if (m == 0)
245 			panic("m_copym");
246 		if (off < m->m_len)
247 			break;
248 		off -= m->m_len;
249 		m = m->m_next;
250 	}
251 	np = &top;
252 	top = 0;
253 	while (len > 0) {
254 		if (m == 0) {
255 			if (len != M_COPYALL)
256 				panic("m_copym");
257 			break;
258 		}
259 		MGET(n, wait, m->m_type);
260 		*np = n;
261 		if (n == 0)
262 			goto nospace;
263 		if (copyhdr) {
264 			M_COPY_PKTHDR(n, m);
265 			if (len == M_COPYALL)
266 				n->m_pkthdr.len -= off0;
267 			else
268 				n->m_pkthdr.len = len;
269 			copyhdr = 0;
270 		}
271 		n->m_len = MIN(len, m->m_len - off);
272 		if (m->m_flags & M_EXT) {
273 			n->m_data = m->m_data + off;
274 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
275 			n->m_ext = m->m_ext;
276 			n->m_flags |= M_EXT;
277 		} else
278 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
279 			    (unsigned)n->m_len);
280 		if (len != M_COPYALL)
281 			len -= n->m_len;
282 		off = 0;
283 		m = m->m_next;
284 		np = &n->m_next;
285 	}
286 	if (top == 0)
287 		MCFail++;
288 	return (top);
289 nospace:
290 	m_freem(top);
291 	MCFail++;
292 	return (0);
293 }
294 
295 /*
296  * Copy data from an mbuf chain starting "off" bytes from the beginning,
297  * continuing for "len" bytes, into the indicated buffer.
298  */
299 m_copydata(m, off, len, cp)
300 	register struct mbuf *m;
301 	register int off;
302 	register int len;
303 	caddr_t cp;
304 {
305 	register unsigned count;
306 
307 	if (off < 0 || len < 0)
308 		panic("m_copydata");
309 	while (off > 0) {
310 		if (m == 0)
311 			panic("m_copydata");
312 		if (off < m->m_len)
313 			break;
314 		off -= m->m_len;
315 		m = m->m_next;
316 	}
317 	while (len > 0) {
318 		if (m == 0)
319 			panic("m_copydata");
320 		count = MIN(m->m_len - off, len);
321 		bcopy(mtod(m, caddr_t) + off, cp, count);
322 		len -= count;
323 		cp += count;
324 		off = 0;
325 		m = m->m_next;
326 	}
327 }
328 
329 /*
330  * Concatenate mbuf chain n to m.
331  * Both chains must be of the same type (e.g. MT_DATA).
332  * Any m_pkthdr is not updated.
333  */
334 m_cat(m, n)
335 	register struct mbuf *m, *n;
336 {
337 	while (m->m_next)
338 		m = m->m_next;
339 	while (n) {
340 		if (m->m_flags & M_EXT ||
341 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
342 			/* just join the two chains */
343 			m->m_next = n;
344 			return;
345 		}
346 		/* splat the data from one into the other */
347 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
348 		    (u_int)n->m_len);
349 		m->m_len += n->m_len;
350 		n = m_free(n);
351 	}
352 }
353 
354 m_adj(mp, req_len)
355 	struct mbuf *mp;
356 {
357 	register int len = req_len;
358 	register struct mbuf *m;
359 	register count;
360 
361 	if ((m = mp) == NULL)
362 		return;
363 	if (len >= 0) {
364 		/*
365 		 * Trim from head.
366 		 */
367 		while (m != NULL && len > 0) {
368 			if (m->m_len <= len) {
369 				len -= m->m_len;
370 				m->m_len = 0;
371 				m = m->m_next;
372 			} else {
373 				m->m_len -= len;
374 				m->m_data += len;
375 				len = 0;
376 			}
377 		}
378 		m = mp;
379 		if (mp->m_flags & M_PKTHDR)
380 			m->m_pkthdr.len -= (req_len - len);
381 	} else {
382 		/*
383 		 * Trim from tail.  Scan the mbuf chain,
384 		 * calculating its length and finding the last mbuf.
385 		 * If the adjustment only affects this mbuf, then just
386 		 * adjust and return.  Otherwise, rescan and truncate
387 		 * after the remaining size.
388 		 */
389 		len = -len;
390 		count = 0;
391 		for (;;) {
392 			count += m->m_len;
393 			if (m->m_next == (struct mbuf *)0)
394 				break;
395 			m = m->m_next;
396 		}
397 		if (m->m_len >= len) {
398 			m->m_len -= len;
399 			if ((mp = m)->m_flags & M_PKTHDR)
400 				m->m_pkthdr.len -= len;
401 			return;
402 		}
403 		count -= len;
404 		if (count < 0)
405 			count = 0;
406 		/*
407 		 * Correct length for chain is "count".
408 		 * Find the mbuf with last data, adjust its length,
409 		 * and toss data from remaining mbufs on chain.
410 		 */
411 		m = mp;
412 		if (m->m_flags & M_PKTHDR)
413 			m->m_pkthdr.len = count;
414 		for (; m; m = m->m_next) {
415 			if (m->m_len >= count) {
416 				m->m_len = count;
417 				break;
418 			}
419 			count -= m->m_len;
420 		}
421 		while (m = m->m_next)
422 			m->m_len = 0;
423 	}
424 }
425 
426 /*
427  * Rearange an mbuf chain so that len bytes are contiguous
428  * and in the data area of an mbuf (so that mtod and dtom
429  * will work for a structure of size len).  Returns the resulting
430  * mbuf chain on success, frees it and returns null on failure.
431  * If there is room, it will add up to max_protohdr-len extra bytes to the
432  * contiguous region in an attempt to avoid being called next time.
433  */
434 int MPFail;
435 
436 struct mbuf *
437 m_pullup(n, len)
438 	register struct mbuf *n;
439 	int len;
440 {
441 	register struct mbuf *m;
442 	register int count;
443 	int space;
444 
445 	/*
446 	 * If first mbuf has no cluster, and has room for len bytes
447 	 * without shifting current data, pullup into it,
448 	 * otherwise allocate a new mbuf to prepend to the chain.
449 	 */
450 	if ((n->m_flags & M_EXT) == 0 &&
451 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
452 		if (n->m_len >= len)
453 			return (n);
454 		m = n;
455 		n = n->m_next;
456 		len -= m->m_len;
457 	} else {
458 		if (len > MHLEN)
459 			goto bad;
460 		MGET(m, M_DONTWAIT, n->m_type);
461 		if (m == 0)
462 			goto bad;
463 		m->m_len = 0;
464 		if (n->m_flags & M_PKTHDR) {
465 			M_COPY_PKTHDR(m, n);
466 			n->m_flags &= ~M_PKTHDR;
467 		}
468 	}
469 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
470 	do {
471 		count = min(min(max(len, max_protohdr), space), n->m_len);
472 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
473 		  (unsigned)count);
474 		len -= count;
475 		m->m_len += count;
476 		n->m_len -= count;
477 		space -= count;
478 		if (n->m_len)
479 			n->m_data += count;
480 		else
481 			n = m_free(n);
482 	} while (len > 0 && n);
483 	if (len > 0) {
484 		(void) m_free(m);
485 		goto bad;
486 	}
487 	m->m_next = n;
488 	return (m);
489 bad:
490 	m_freem(n);
491 	MPFail++;
492 	return (0);
493 }
494