xref: /original-bsd/sys/kern/uipc_mbuf.c (revision f4ab8a4c)
1 /*	uipc_mbuf.c	1.32	82/03/13	*/
2 
3 #include "../h/param.h"
4 #include "../h/dir.h"
5 #include "../h/user.h"
6 #include "../h/proc.h"
7 #include "../h/pte.h"
8 #include "../h/cmap.h"
9 #include "../h/map.h"
10 #include "../h/mbuf.h"
11 #include "../net/in_systm.h"		/* XXX */
12 #include "../h/vm.h"
13 
14 mbinit()
15 {
16 
17 COUNT(MBINIT);
18 	if (m_reserve(32) == 0)
19 		goto bad;
20 	if (m_clalloc(4, MPG_MBUFS) == 0)
21 		goto bad;
22 	if (m_clalloc(32, MPG_CLUSTERS) == 0)
23 		goto bad;
24 	return;
25 bad:
26 	panic("mbinit");
27 }
28 
29 caddr_t
30 m_clalloc(ncl, how)
31 	register int ncl;
32 	int how;
33 {
34 	int npg, mbx;
35 	register struct mbuf *m;
36 	register int i;
37 	int s;
38 
39 COUNT(M_CLALLOC);
40 	npg = ncl * CLSIZE;
41 	s = splimp();		/* careful: rmalloc isn't reentrant */
42 	mbx = rmalloc(mbmap, npg);
43 	splx(s);
44 	if (mbx == 0)
45 		return (0);
46 	m = cltom(mbx / CLSIZE);
47 	if (memall(&Mbmap[mbx], ncl * CLSIZE, proc, CSYS) == 0)
48 		return (0);
49 	vmaccess(&Mbmap[mbx], (caddr_t)m, npg);
50 	switch (how) {
51 
52 	case MPG_CLUSTERS:
53 		s = splimp();
54 		for (i = 0; i < ncl; i++) {
55 			m->m_off = 0;
56 			m->m_next = mclfree;
57 			mclfree = m;
58 			m += CLBYTES / sizeof (*m);
59 			nmclfree++;
60 		}
61 		mbstat.m_clusters += ncl;
62 		splx(s);
63 		break;
64 
65 	case MPG_MBUFS:
66 		for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) {
67 			m->m_off = 0;
68 			m->m_free = 0;
69 			(void) m_free(m);
70 			m++;
71 		}
72 		mbstat.m_clusters += ncl;
73 		break;
74 	}
75 	return ((caddr_t)m);
76 }
77 
78 m_pgfree(addr, n)
79 	caddr_t addr;
80 	int n;
81 {
82 
83 COUNT(M_PGFREE);
84 #ifdef lint
85 	addr = addr; n = n;
86 #endif
87 }
88 
89 m_expand()
90 {
91 
92 COUNT(M_EXPAND);
93 	if (mbstat.m_bufs >= mbstat.m_hiwat)
94 		return (0);
95 	if (m_clalloc(1, MPG_MBUFS) == 0)
96 		goto steal;
97 	return (1);
98 steal:
99 	/* should ask protocols to free code */
100 	return (0);
101 }
102 
103 /* NEED SOME WAY TO RELEASE SPACE */
104 
105 /*
106  * Space reservation routines
107  */
108 m_reserve(mbufs)
109 	int mbufs;
110 {
111 
112 	if (mbstat.m_lowat + (mbufs>>1) > (NMBCLUSTERS-32) * CLBYTES)
113 		return (0);
114 	mbstat.m_hiwat += mbufs;
115 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
116 	return (1);
117 }
118 
119 m_release(mbufs)
120 	int mbufs;
121 {
122 
123 	mbstat.m_hiwat -= mbufs;
124 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
125 }
126 
127 /*
128  * Space allocation routines.
129  * These are also available as macros
130  * for critical paths.
131  */
132 struct mbuf *
133 m_get(canwait)
134 	int canwait;
135 {
136 	register struct mbuf *m;
137 
138 COUNT(M_GET);
139 	MGET(m, canwait);
140 	return (m);
141 }
142 
143 struct mbuf *
144 m_getclr(canwait)
145 	int canwait;
146 {
147 	register struct mbuf *m;
148 
149 COUNT(M_GETCLR);
150 	m = m_get(canwait);
151 	if (m == 0)
152 		return (0);
153 	m->m_off = MMINOFF;
154 	bzero(mtod(m, caddr_t), MLEN);
155 	return (m);
156 }
157 
158 struct mbuf *
159 m_free(m)
160 	struct mbuf *m;
161 {
162 	register struct mbuf *n;
163 
164 COUNT(M_FREE);
165 	MFREE(m, n);
166 	return (n);
167 }
168 
169 /*ARGSUSED*/
170 struct mbuf *
171 m_more(type)
172 	int type;
173 {
174 	register struct mbuf *m;
175 
176 COUNT(M_MORE);
177 	if (!m_expand()) {
178 		mbstat.m_drops++;
179 		return (NULL);
180 	}
181 #define m_more(x) (panic("m_more"), (struct mbuf *)0)
182 	MGET(m, type);
183 #undef m_more
184 	return (m);
185 }
186 
187 m_freem(m)
188 	register struct mbuf *m;
189 {
190 	register struct mbuf *n;
191 	register int s;
192 
193 COUNT(M_FREEM);
194 	if (m == NULL)
195 		return;
196 	s = splimp();
197 	do {
198 		MFREE(m, n);
199 	} while (m = n);
200 	splx(s);
201 }
202 
203 /*
204  * Mbuffer utility routines.
205  */
206 struct mbuf *
207 m_copy(m, off, len)
208 	register struct mbuf *m;
209 	int off;
210 	register int len;
211 {
212 	register struct mbuf *n, **np;
213 	struct mbuf *top, *p;
214 COUNT(M_COPY);
215 
216 	if (len == 0)
217 		return (0);
218 	if (off < 0 || len < 0)
219 		panic("m_copy");
220 	while (off > 0) {
221 		if (m == 0)
222 			panic("m_copy");
223 		if (off < m->m_len)
224 			break;
225 		off -= m->m_len;
226 		m = m->m_next;
227 	}
228 	np = &top;
229 	top = 0;
230 	while (len > 0) {
231 		if (m == 0) {
232 			if (len != M_COPYALL)
233 				panic("m_copy");
234 			break;
235 		}
236 		MGET(n, 1);
237 		*np = n;
238 		if (n == 0)
239 			goto nospace;
240 		n->m_len = MIN(len, m->m_len - off);
241 		if (m->m_off > MMAXOFF) {
242 			p = mtod(m, struct mbuf *);
243 			n->m_off = ((int)p - (int)n) + off;
244 			mclrefcnt[mtocl(p)]++;
245 		} else {
246 			n->m_off = MMINOFF;
247 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
248 			    (unsigned)n->m_len);
249 		}
250 		if (len != M_COPYALL)
251 			len -= n->m_len;
252 		off = 0;
253 		m = m->m_next;
254 		np = &n->m_next;
255 	}
256 	return (top);
257 nospace:
258 	m_freem(top);
259 	return (0);
260 }
261 
262 m_cat(m, n)
263 	register struct mbuf *m, *n;
264 {
265 	while (m->m_next)
266 		m = m->m_next;
267 	while (n) {
268 		if (m->m_off >= MMAXOFF ||
269 		    m->m_off + m->m_len + n->m_len > MMAXOFF) {
270 			/* just join the two chains */
271 			m->m_next = n;
272 			return;
273 		}
274 		/* splat the data from one into the other */
275 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
276 		    (u_int)n->m_len);
277 		m->m_len += n->m_len;
278 		n = m_free(n);
279 	}
280 }
281 
282 m_adj(mp, len)
283 	struct mbuf *mp;
284 	register int len;
285 {
286 	register struct mbuf *m, *n;
287 
288 COUNT(M_ADJ);
289 	if ((m = mp) == NULL)
290 		return;
291 	if (len >= 0) {
292 		while (m != NULL && len > 0) {
293 			if (m->m_len <= len) {
294 				len -= m->m_len;
295 				m->m_len = 0;
296 				m = m->m_next;
297 			} else {
298 				m->m_len -= len;
299 				m->m_off += len;
300 				break;
301 			}
302 		}
303 	} else {
304 		/* a 2 pass algorithm might be better */
305 		len = -len;
306 		while (len > 0 && m->m_len != 0) {
307 			while (m != NULL && m->m_len != 0) {
308 				n = m;
309 				m = m->m_next;
310 			}
311 			if (n->m_len <= len) {
312 				len -= n->m_len;
313 				n->m_len = 0;
314 				m = mp;
315 			} else {
316 				n->m_len -= len;
317 				break;
318 			}
319 		}
320 	}
321 }
322 
323 struct mbuf *
324 m_pullup(m0, len)
325 	struct mbuf *m0;
326 	int len;
327 {
328 	register struct mbuf *m, *n;
329 	int count;
330 
331 	n = m0;
332 	if (len > MLEN)
333 		goto bad;
334 	MGET(m, 0);
335 	if (m == 0)
336 		goto bad;
337 	m->m_off = MMINOFF;
338 	m->m_len = 0;
339 	do {
340 		count = MIN(MLEN - m->m_len, len);
341 		if (count > n->m_len)
342 			count = n->m_len;
343 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len,
344 		  (unsigned)count);
345 		len -= count;
346 		m->m_len += count;
347 		n->m_off += count;
348 		n->m_len -= count;
349 		if (n->m_len)
350 			break;
351 		n = m_free(n);
352 	} while (n);
353 	if (len) {
354 		(void) m_free(m);
355 		goto bad;
356 	}
357 	m->m_next = n;
358 	return (m);
359 bad:
360 	m_freem(n);
361 	return (0);
362 }
363