xref: /original-bsd/sys/kern/uipc_mbuf.c (revision 6131e5cb)
1 /*
2  * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)uipc_mbuf.c	7.10 (Berkeley) 06/29/88
18  */
19 
20 #include "../machine/pte.h"
21 
22 #include "param.h"
23 #include "dir.h"
24 #include "user.h"
25 #include "proc.h"
26 #include "cmap.h"
27 #include "map.h"
28 #include "mbuf.h"
29 #include "vm.h"
30 #include "kernel.h"
31 #include "syslog.h"
32 #include "domain.h"
33 #include "protosw.h"
34 
35 mbinit()
36 {
37 	int s;
38 
39 #if CLBYTES < 4096
40 #define NCL_INIT	(4096/CLBYTES)
41 #else
42 #define NCL_INIT	1
43 #endif
44 	s = splimp();
45 	if (m_clalloc(NCL_INIT, MPG_MBUFS, M_DONTWAIT) == 0)
46 		goto bad;
47 	if (m_clalloc(NCL_INIT, MPG_CLUSTERS, M_DONTWAIT) == 0)
48 		goto bad;
49 	splx(s);
50 	return;
51 bad:
52 	panic("mbinit");
53 }
54 
55 /*
56  * Must be called at splimp.
57  */
58 /* ARGSUSED */
59 caddr_t
60 m_clalloc(ncl, how, canwait)
61 	register int ncl;
62 	int how;
63 {
64 	int npg, mbx;
65 	register struct mbuf *m;
66 	register int i;
67 	static int logged;
68 
69 	npg = ncl * CLSIZE;
70 	mbx = rmalloc(mbmap, (long)npg);
71 	if (mbx == 0) {
72 		if (logged == 0) {
73 			logged++;
74 			log(LOG_ERR, "mbuf map full\n");
75 		}
76 		return (0);
77 	}
78 	m = cltom(mbx * NBPG / MCLBYTES);
79 	if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) {
80 		rmfree(mbmap, (long)npg, (long)mbx);
81 		return (0);
82 	}
83 	vmaccess(&Mbmap[mbx], (caddr_t)m, npg);
84 	switch (how) {
85 
86 	case MPG_CLUSTERS:
87 		ncl = ncl * CLBYTES / MCLBYTES;
88 		for (i = 0; i < ncl; i++) {
89 			m->m_off = 0;
90 			m->m_next = mclfree;
91 			mclfree = m;
92 			m += MCLBYTES / sizeof (*m);
93 			mbstat.m_clfree++;
94 		}
95 		mbstat.m_clusters += ncl;
96 		break;
97 
98 	case MPG_MBUFS:
99 		for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) {
100 			m->m_off = 0;
101 			m->m_type = MT_DATA;
102 			mbstat.m_mtypes[MT_DATA]++;
103 			mbstat.m_mbufs++;
104 			(void) m_free(m);
105 			m++;
106 		}
107 		break;
108 	}
109 	return ((caddr_t)m);
110 }
111 
112 /*
113  * Must be called at splimp.
114  */
115 m_expand(canwait)
116 	int canwait;
117 {
118 	register struct domain *dp;
119 	register struct protosw *pr;
120 	int tries;
121 
122 	for (tries = 0;; ) {
123 		if (m_clalloc(1, MPG_MBUFS, canwait))
124 			return (1);
125 		if (canwait == 0 || tries++)
126 			return (0);
127 
128 		/* ask protocols to free space */
129 		for (dp = domains; dp; dp = dp->dom_next)
130 			for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW;
131 			    pr++)
132 				if (pr->pr_drain)
133 					(*pr->pr_drain)();
134 		mbstat.m_drain++;
135 	}
136 }
137 
138 /* NEED SOME WAY TO RELEASE SPACE */
139 
140 /*
141  * Space allocation routines.
142  * These are also available as macros
143  * for critical paths.
144  */
145 struct mbuf *
146 m_get(canwait, type)
147 	int canwait, type;
148 {
149 	register struct mbuf *m;
150 
151 	MGET(m, canwait, type);
152 	return (m);
153 }
154 
155 struct mbuf *
156 m_getclr(canwait, type)
157 	int canwait, type;
158 {
159 	register struct mbuf *m;
160 
161 	MGET(m, canwait, type);
162 	if (m == 0)
163 		return (0);
164 	bzero(mtod(m, caddr_t), MLEN);
165 	return (m);
166 }
167 
168 struct mbuf *
169 m_free(m)
170 	struct mbuf *m;
171 {
172 	register struct mbuf *n;
173 
174 	MFREE(m, n);
175 	return (n);
176 }
177 
178 /*
179  * Get more mbufs; called from MGET macro if mfree list is empty.
180  * Must be called at splimp.
181  */
182 /*ARGSUSED*/
183 struct mbuf *
184 m_more(canwait, type)
185 	int canwait, type;
186 {
187 	register struct mbuf *m;
188 
189 	while (m_expand(canwait) == 0) {
190 		if (canwait == M_WAIT) {
191 			mbstat.m_wait++;
192 			m_want++;
193 			sleep((caddr_t)&mfree, PZERO - 1);
194 			if (mfree)
195 				break;
196 		} else {
197 			mbstat.m_drops++;
198 			return (NULL);
199 		}
200 	}
201 #define m_more(x,y) (panic("m_more"), (struct mbuf *)0)
202 	MGET(m, canwait, type);
203 #undef m_more
204 	return (m);
205 }
206 
207 m_freem(m)
208 	register struct mbuf *m;
209 {
210 	register struct mbuf *n;
211 	register int s;
212 
213 	if (m == NULL)
214 		return;
215 	s = splimp();
216 	do {
217 		MFREE(m, n);
218 	} while (m = n);
219 	splx(s);
220 }
221 
222 /*
223  * Mbuffer utility routines.
224  */
225 
226 /*
227 /*
228  * Make a copy of an mbuf chain starting "off" bytes from the beginning,
229  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
230  * Should get M_WAIT/M_DONTWAIT from caller.
231  */
232 struct mbuf *
233 m_copy(m, off, len)
234 	register struct mbuf *m;
235 	int off;
236 	register int len;
237 {
238 	register struct mbuf *n, **np;
239 	struct mbuf *top, *p;
240 
241 	if (len == 0)
242 		return (0);
243 	if (off < 0 || len < 0)
244 		panic("m_copy");
245 	while (off > 0) {
246 		if (m == 0)
247 			panic("m_copy");
248 		if (off < m->m_len)
249 			break;
250 		off -= m->m_len;
251 		m = m->m_next;
252 	}
253 	np = &top;
254 	top = 0;
255 	while (len > 0) {
256 		if (m == 0) {
257 			if (len != M_COPYALL)
258 				panic("m_copy");
259 			break;
260 		}
261 		MGET(n, M_DONTWAIT, m->m_type);
262 		*np = n;
263 		if (n == 0)
264 			goto nospace;
265 		n->m_len = MIN(len, m->m_len - off);
266 		if (m->m_off > MMAXOFF) {
267 			p = mtod(m, struct mbuf *);
268 			n->m_off = ((int)p - (int)n) + off;
269 			mclrefcnt[mtocl(p)]++;
270 		} else
271 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
272 			    (unsigned)n->m_len);
273 		if (len != M_COPYALL)
274 			len -= n->m_len;
275 		off = 0;
276 		m = m->m_next;
277 		np = &n->m_next;
278 	}
279 	return (top);
280 nospace:
281 	m_freem(top);
282 	return (0);
283 }
284 
285 /*
286  * Copy data from an mbuf chain starting "off" bytes from the beginning,
287  * continuing for "len" bytes, into the indicated buffer.
288  */
289 m_copydata(m, off, len, cp)
290 	register struct mbuf *m;
291 	register int off;
292 	register int len;
293 	caddr_t cp;
294 {
295 	register unsigned count;
296 
297 	if (off < 0 || len < 0)
298 		panic("m_copydata");
299 	while (off > 0) {
300 		if (m == 0)
301 			panic("m_copydata");
302 		if (off < m->m_len)
303 			break;
304 		off -= m->m_len;
305 		m = m->m_next;
306 	}
307 	while (len > 0) {
308 		if (m == 0)
309 			panic("m_copydata");
310 		count = MIN(m->m_len - off, len);
311 		bcopy(mtod(m, caddr_t) + off, cp, count);
312 		len -= count;
313 		cp += count;
314 		off = 0;
315 		m = m->m_next;
316 	}
317 }
318 
319 m_cat(m, n)
320 	register struct mbuf *m, *n;
321 {
322 	while (m->m_next)
323 		m = m->m_next;
324 	while (n) {
325 		if (m->m_off >= MMAXOFF ||
326 		    m->m_off + m->m_len + n->m_len > MMAXOFF) {
327 			/* just join the two chains */
328 			m->m_next = n;
329 			return;
330 		}
331 		/* splat the data from one into the other */
332 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
333 		    (u_int)n->m_len);
334 		m->m_len += n->m_len;
335 		n = m_free(n);
336 	}
337 }
338 
339 m_adj(mp, len)
340 	struct mbuf *mp;
341 	register int len;
342 {
343 	register struct mbuf *m;
344 	register count;
345 
346 	if ((m = mp) == NULL)
347 		return;
348 	if (len >= 0) {
349 		while (m != NULL && len > 0) {
350 			if (m->m_len <= len) {
351 				len -= m->m_len;
352 				m->m_len = 0;
353 				m = m->m_next;
354 			} else {
355 				m->m_len -= len;
356 				m->m_off += len;
357 				break;
358 			}
359 		}
360 	} else {
361 		/*
362 		 * Trim from tail.  Scan the mbuf chain,
363 		 * calculating its length and finding the last mbuf.
364 		 * If the adjustment only affects this mbuf, then just
365 		 * adjust and return.  Otherwise, rescan and truncate
366 		 * after the remaining size.
367 		 */
368 		len = -len;
369 		count = 0;
370 		for (;;) {
371 			count += m->m_len;
372 			if (m->m_next == (struct mbuf *)0)
373 				break;
374 			m = m->m_next;
375 		}
376 		if (m->m_len >= len) {
377 			m->m_len -= len;
378 			return;
379 		}
380 		count -= len;
381 		/*
382 		 * Correct length for chain is "count".
383 		 * Find the mbuf with last data, adjust its length,
384 		 * and toss data from remaining mbufs on chain.
385 		 */
386 		for (m = mp; m; m = m->m_next) {
387 			if (m->m_len >= count) {
388 				m->m_len = count;
389 				break;
390 			}
391 			count -= m->m_len;
392 		}
393 		while (m = m->m_next)
394 			m->m_len = 0;
395 	}
396 }
397 
398 /*
399  * Rearange an mbuf chain so that len bytes are contiguous
400  * and in the data area of an mbuf (so that mtod and dtom
401  * will work for a structure of size len).  Returns the resulting
402  * mbuf chain on success, frees it and returns null on failure.
403  * If there is room, it will add up to MPULL_EXTRA bytes to the
404  * contiguous region in an attempt to avoid being called next time.
405  */
406 struct mbuf *
407 m_pullup(n, len)
408 	register struct mbuf *n;
409 	int len;
410 {
411 	register struct mbuf *m;
412 	register int count;
413 	int space;
414 
415 	if (n->m_off + len <= MMAXOFF && n->m_next) {
416 		m = n;
417 		n = n->m_next;
418 		len -= m->m_len;
419 	} else {
420 		if (len > MLEN)
421 			goto bad;
422 		MGET(m, M_DONTWAIT, n->m_type);
423 		if (m == 0)
424 			goto bad;
425 		m->m_len = 0;
426 	}
427 	space = MMAXOFF - m->m_off;
428 	do {
429 		count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len);
430 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len,
431 		  (unsigned)count);
432 		len -= count;
433 		m->m_len += count;
434 		n->m_len -= count;
435 		if (n->m_len)
436 			n->m_off += count;
437 		else
438 			n = m_free(n);
439 	} while (len > 0 && n);
440 	if (len > 0) {
441 		(void) m_free(m);
442 		goto bad;
443 	}
444 	m->m_next = n;
445 	return (m);
446 bad:
447 	m_freem(n);
448 	return (0);
449 }
450