xref: /original-bsd/sys/kern/uipc_mbuf.c (revision 424c3edd)
1 /*	uipc_mbuf.c	1.30	82/03/09	*/
2 
3 #include "../h/param.h"
4 #include "../h/dir.h"
5 #include "../h/user.h"
6 #include "../h/proc.h"
7 #include "../h/pte.h"
8 #include "../h/cmap.h"
9 #include "../h/map.h"
10 #include "../h/mbuf.h"
11 #include "../net/in_systm.h"		/* XXX */
12 #include "../h/vm.h"
13 
14 mbinit()
15 {
16 
17 COUNT(MBINIT);
18 	if (m_reserve(32) == 0)
19 		goto bad;
20 	if (m_clalloc(4, MPG_MBUFS) == 0)
21 		goto bad;
22 	if (m_clalloc(32, MPG_CLUSTERS) == 0)
23 		goto bad;
24 	return;
25 bad:
26 	panic("mbinit");
27 }
28 
29 caddr_t
30 m_clalloc(ncl, how)
31 	register int ncl;
32 	int how;
33 {
34 	int npg, mbx;
35 	register struct mbuf *m;
36 	register int i;
37 	int s;
38 
39 COUNT(M_CLALLOC);
40 	npg = ncl * CLSIZE;
41 	s = splimp();		/* careful: rmalloc isn't reentrant */
42 	mbx = rmalloc(mbmap, npg);
43 	splx(s);
44 	if (mbx == 0)
45 		return (0);
46 	m = cltom(mbx / CLSIZE);
47 	if (memall(&Mbmap[mbx], ncl * CLSIZE, proc, CSYS) == 0)
48 		return (0);
49 	vmaccess(&Mbmap[mbx], (caddr_t)m, npg);
50 	switch (how) {
51 
52 	case MPG_CLUSTERS:
53 		s = splimp();
54 		for (i = 0; i < ncl; i++) {
55 			m->m_off = 0;
56 			m->m_next = mclfree;
57 			mclfree = m;
58 			m += CLBYTES / sizeof (*m);
59 			nmclfree++;
60 		}
61 		mbstat.m_clusters += ncl;
62 		splx(s);
63 		break;
64 
65 	case MPG_MBUFS:
66 		for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) {
67 			m->m_off = 0;
68 			m->m_free = 0;
69 			(void) m_free(m);
70 			m++;
71 		}
72 		mbstat.m_clusters += ncl;
73 		break;
74 	}
75 	return ((caddr_t)m);
76 }
77 
78 m_pgfree(addr, n)
79 	caddr_t addr;
80 	int n;
81 {
82 
83 COUNT(M_PGFREE);
84 }
85 
86 m_expand()
87 {
88 
89 COUNT(M_EXPAND);
90 	if (mbstat.m_bufs >= mbstat.m_hiwat)
91 		return (0);
92 	if (m_clalloc(1, MPG_MBUFS) == 0)
93 		goto steal;
94 	return (1);
95 steal:
96 	/* should ask protocols to free code */
97 	return (0);
98 }
99 
100 /* NEED SOME WAY TO RELEASE SPACE */
101 
102 /*
103  * Space reservation routines
104  */
105 m_reserve(mbufs)
106 	int mbufs;
107 {
108 
109 	if (mbstat.m_lowat + (mbufs>>1) > (NMBCLUSTERS-32) * CLBYTES)
110 		return (0);
111 	mbstat.m_hiwat += mbufs;
112 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
113 	return (1);
114 }
115 
116 m_release(mbufs)
117 	int mbufs;
118 {
119 
120 	mbstat.m_hiwat -= mbufs;
121 	mbstat.m_lowat = mbstat.m_hiwat >> 1;
122 }
123 
124 /*
125  * Space allocation routines.
126  * These are also available as macros
127  * for critical paths.
128  */
129 struct mbuf *
130 m_get(canwait)
131 	int canwait;
132 {
133 	register struct mbuf *m;
134 
135 COUNT(M_GET);
136 	MGET(m, canwait);
137 	return (m);
138 }
139 
140 struct mbuf *
141 m_getclr(canwait)
142 	int canwait;
143 {
144 	register struct mbuf *m;
145 
146 COUNT(M_GETCLR);
147 	m = m_get(canwait);
148 	if (m == 0)
149 		return (0);
150 	m->m_off = MMINOFF;
151 	bzero(mtod(m, caddr_t), MLEN);
152 	return (m);
153 }
154 
155 struct mbuf *
156 m_free(m)
157 	struct mbuf *m;
158 {
159 	register struct mbuf *n;
160 
161 COUNT(M_FREE);
162 	MFREE(m, n);
163 	return (n);
164 }
165 
166 /*ARGSUSED*/
167 struct mbuf *
168 m_more(type)
169 	int type;
170 {
171 	register struct mbuf *m;
172 
173 COUNT(M_MORE);
174 	if (!m_expand()) {
175 		mbstat.m_drops++;
176 		return (NULL);
177 	}
178 #define m_more(x) (panic("m_more"), (struct mbuf *)0)
179 	MGET(m, type);
180 #undef m_more
181 	return (m);
182 }
183 
184 m_freem(m)
185 	register struct mbuf *m;
186 {
187 	register struct mbuf *n;
188 	register int s;
189 
190 COUNT(M_FREEM);
191 	if (m == NULL)
192 		return;
193 	s = splimp();
194 	do {
195 		MFREE(m, n);
196 	} while (m = n);
197 	splx(s);
198 }
199 
200 /*
201  * Mbuffer utility routines.
202  */
203 struct mbuf *
204 m_copy(m, off, len)
205 	register struct mbuf *m;
206 	int off;
207 	register int len;
208 {
209 	register struct mbuf *n, **np;
210 	struct mbuf *top, *p;
211 COUNT(M_COPY);
212 
213 	if (len == 0)
214 		return (0);
215 	if (off < 0 || len < 0)
216 		panic("m_copy");
217 	while (off > 0) {
218 		if (m == 0)
219 			panic("m_copy");
220 		if (off < m->m_len)
221 			break;
222 		off -= m->m_len;
223 		m = m->m_next;
224 	}
225 	np = &top;
226 	top = 0;
227 	while (len > 0) {
228 		if (m == 0) {
229 			if (len != M_COPYALL)
230 				panic("m_copy");
231 			break;
232 		}
233 		MGET(n, 1);
234 		*np = n;
235 		if (n == 0)
236 			goto nospace;
237 		n->m_len = MIN(len, m->m_len - off);
238 		if (m->m_off > MMAXOFF) {
239 			p = mtod(m, struct mbuf *);
240 			n->m_off = ((int)p - (int)n) + off;
241 			mclrefcnt[mtocl(p)]++;
242 		} else {
243 			n->m_off = MMINOFF;
244 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
245 			    (unsigned)n->m_len);
246 		}
247 		if (len != M_COPYALL)
248 			len -= n->m_len;
249 		off = 0;
250 		m = m->m_next;
251 		np = &n->m_next;
252 	}
253 	return (top);
254 nospace:
255 	m_freem(top);
256 	return (0);
257 }
258 
259 m_cat(m, n)
260 	register struct mbuf *m, *n;
261 {
262 	while (m->m_next)
263 		m = m->m_next;
264 	while (n) {
265 		if (m->m_off >= MMAXOFF ||
266 		    m->m_off + m->m_len + n->m_len > MMAXOFF) {
267 			/* just join the two chains */
268 			m->m_next = n;
269 			return;
270 		}
271 		/* splat the data from one into the other */
272 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
273 		    (u_int)n->m_len);
274 		m->m_len += n->m_len;
275 		n = m_free(n);
276 	}
277 }
278 
279 m_adj(mp, len)
280 	struct mbuf *mp;
281 	register int len;
282 {
283 	register struct mbuf *m, *n;
284 
285 COUNT(M_ADJ);
286 	if ((m = mp) == NULL)
287 		return;
288 	if (len >= 0) {
289 		while (m != NULL && len > 0) {
290 			if (m->m_len <= len) {
291 				len -= m->m_len;
292 				m->m_len = 0;
293 				m = m->m_next;
294 			} else {
295 				m->m_len -= len;
296 				m->m_off += len;
297 				break;
298 			}
299 		}
300 	} else {
301 		/* a 2 pass algorithm might be better */
302 		len = -len;
303 		while (len > 0 && m->m_len != 0) {
304 			while (m != NULL && m->m_len != 0) {
305 				n = m;
306 				m = m->m_next;
307 			}
308 			if (n->m_len <= len) {
309 				len -= n->m_len;
310 				n->m_len = 0;
311 				m = mp;
312 			} else {
313 				n->m_len -= len;
314 				break;
315 			}
316 		}
317 	}
318 }
319 
320 struct mbuf *
321 m_pullup(m0, len)
322 	struct mbuf *m0;
323 	int len;
324 {
325 	register struct mbuf *m, *n;
326 	int cnt;
327 
328 	n = m0;
329 	if (len > MLEN)
330 		goto bad;
331 	MGET(m, 0);
332 	if (m == 0)
333 		goto bad;
334 	m->m_off = MMINOFF;
335 	m->m_len = 0;
336 	do {
337 		cnt = MIN(MLEN - m->m_len, len);
338 		if (cnt > n->m_len)
339 			cnt = n->m_len;
340 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, cnt);
341 		len -= cnt;
342 		m->m_len += cnt;
343 		n->m_off += cnt;
344 		n->m_len -= cnt;
345 		if (n->m_len)
346 			break;
347 		n = m_free(n);
348 	} while (n);
349 	if (len) {
350 		(void) m_free(m);
351 		goto bad;
352 	}
353 	m->m_next = n;
354 	return (m);
355 bad:
356 	m_freem(n);
357 	return (0);
358 }
359