1 /* uipc_mbuf.c 1.40 82/10/21 */ 2 3 #include "../h/param.h" 4 #include "../h/dir.h" 5 #include "../h/user.h" 6 #include "../h/proc.h" 7 #include "../h/pte.h" 8 #include "../h/cmap.h" 9 #include "../h/map.h" 10 #include "../h/mbuf.h" 11 #include "../h/vm.h" 12 #include "../h/kernel.h" 13 14 mbinit() 15 { 16 17 if (m_clalloc(4, MPG_MBUFS) == 0) 18 goto bad; 19 if (m_clalloc(32, MPG_CLUSTERS) == 0) 20 goto bad; 21 return; 22 bad: 23 panic("mbinit"); 24 } 25 26 caddr_t 27 m_clalloc(ncl, how) 28 register int ncl; 29 int how; 30 { 31 int npg, mbx; 32 register struct mbuf *m; 33 register int i; 34 int s; 35 36 npg = ncl * CLSIZE; 37 s = splimp(); /* careful: rmalloc isn't reentrant */ 38 mbx = rmalloc(mbmap, (long)npg); 39 splx(s); 40 if (mbx == 0) 41 return (0); 42 m = cltom(mbx / CLSIZE); 43 if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) 44 return (0); 45 vmaccess(&Mbmap[mbx], (caddr_t)m, npg); 46 switch (how) { 47 48 case MPG_CLUSTERS: 49 s = splimp(); 50 for (i = 0; i < ncl; i++) { 51 m->m_off = 0; 52 m->m_next = mclfree; 53 mclfree = m; 54 m += CLBYTES / sizeof (*m); 55 mbstat.m_clfree++; 56 } 57 mbstat.m_clusters += ncl; 58 splx(s); 59 break; 60 61 case MPG_MBUFS: 62 for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) { 63 m->m_off = 0; 64 m->m_free = 0; 65 mbstat.m_mbufs++; 66 (void) m_free(m); 67 m++; 68 } 69 break; 70 } 71 return ((caddr_t)m); 72 } 73 74 m_pgfree(addr, n) 75 caddr_t addr; 76 int n; 77 { 78 79 #ifdef lint 80 addr = addr; n = n; 81 #endif 82 } 83 84 m_expand() 85 { 86 87 if (m_clalloc(1, MPG_MBUFS) == 0) 88 goto steal; 89 return (1); 90 steal: 91 /* should ask protocols to free code */ 92 return (0); 93 } 94 95 /* NEED SOME WAY TO RELEASE SPACE */ 96 97 /* 98 * Space allocation routines. 99 * These are also available as macros 100 * for critical paths. 101 */ 102 struct mbuf * 103 m_get(canwait) 104 int canwait; 105 { 106 register struct mbuf *m; 107 108 MGET(m, canwait); 109 return (m); 110 } 111 112 struct mbuf * 113 m_getclr(canwait) 114 int canwait; 115 { 116 register struct mbuf *m; 117 118 m = m_get(canwait); 119 if (m == 0) 120 return (0); 121 bzero(mtod(m, caddr_t), MLEN); 122 return (m); 123 } 124 125 struct mbuf * 126 m_free(m) 127 struct mbuf *m; 128 { 129 register struct mbuf *n; 130 131 MFREE(m, n); 132 return (n); 133 } 134 135 /*ARGSUSED*/ 136 struct mbuf * 137 m_more(type) 138 int type; 139 { 140 register struct mbuf *m; 141 142 if (!m_expand()) { 143 mbstat.m_drops++; 144 return (NULL); 145 } 146 #define m_more(x) (panic("m_more"), (struct mbuf *)0) 147 MGET(m, type); 148 #undef m_more 149 return (m); 150 } 151 152 m_freem(m) 153 register struct mbuf *m; 154 { 155 register struct mbuf *n; 156 register int s; 157 158 if (m == NULL) 159 return; 160 s = splimp(); 161 do { 162 MFREE(m, n); 163 } while (m = n); 164 splx(s); 165 } 166 167 /* 168 * Mbuffer utility routines. 169 */ 170 struct mbuf * 171 m_copy(m, off, len) 172 register struct mbuf *m; 173 int off; 174 register int len; 175 { 176 register struct mbuf *n, **np; 177 struct mbuf *top, *p; 178 179 if (len == 0) 180 return (0); 181 if (off < 0 || len < 0) 182 panic("m_copy"); 183 while (off > 0) { 184 if (m == 0) 185 panic("m_copy"); 186 if (off < m->m_len) 187 break; 188 off -= m->m_len; 189 m = m->m_next; 190 } 191 np = ⊤ 192 top = 0; 193 while (len > 0) { 194 if (m == 0) { 195 if (len != M_COPYALL) 196 panic("m_copy"); 197 break; 198 } 199 MGET(n, 1); 200 *np = n; 201 if (n == 0) 202 goto nospace; 203 n->m_len = MIN(len, m->m_len - off); 204 if (m->m_off > MMAXOFF) { 205 p = mtod(m, struct mbuf *); 206 n->m_off = ((int)p - (int)n) + off; 207 mclrefcnt[mtocl(p)]++; 208 } else 209 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 210 (unsigned)n->m_len); 211 if (len != M_COPYALL) 212 len -= n->m_len; 213 off = 0; 214 m = m->m_next; 215 np = &n->m_next; 216 } 217 return (top); 218 nospace: 219 m_freem(top); 220 return (0); 221 } 222 223 m_cat(m, n) 224 register struct mbuf *m, *n; 225 { 226 while (m->m_next) 227 m = m->m_next; 228 while (n) { 229 if (m->m_off >= MMAXOFF || 230 m->m_off + m->m_len + n->m_len > MMAXOFF) { 231 /* just join the two chains */ 232 m->m_next = n; 233 return; 234 } 235 /* splat the data from one into the other */ 236 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 237 (u_int)n->m_len); 238 m->m_len += n->m_len; 239 n = m_free(n); 240 } 241 } 242 243 m_adj(mp, len) 244 struct mbuf *mp; 245 register int len; 246 { 247 register struct mbuf *m, *n; 248 249 if ((m = mp) == NULL) 250 return; 251 if (len >= 0) { 252 while (m != NULL && len > 0) { 253 if (m->m_len <= len) { 254 len -= m->m_len; 255 m->m_len = 0; 256 m = m->m_next; 257 } else { 258 m->m_len -= len; 259 m->m_off += len; 260 break; 261 } 262 } 263 } else { 264 /* a 2 pass algorithm might be better */ 265 len = -len; 266 while (len > 0 && m->m_len != 0) { 267 while (m != NULL && m->m_len != 0) { 268 n = m; 269 m = m->m_next; 270 } 271 if (n->m_len <= len) { 272 len -= n->m_len; 273 n->m_len = 0; 274 m = mp; 275 } else { 276 n->m_len -= len; 277 break; 278 } 279 } 280 } 281 } 282 283 struct mbuf * 284 m_pullup(m0, len) 285 struct mbuf *m0; 286 int len; 287 { 288 register struct mbuf *m, *n; 289 int count; 290 291 n = m0; 292 if (len > MLEN) 293 goto bad; 294 MGET(m, M_DONTWAIT); 295 if (m == 0) 296 goto bad; 297 m->m_len = 0; 298 do { 299 count = MIN(MLEN - m->m_len, len); 300 if (count > n->m_len) 301 count = n->m_len; 302 bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, 303 (unsigned)count); 304 len -= count; 305 m->m_len += count; 306 n->m_off += count; 307 n->m_len -= count; 308 if (n->m_len) 309 break; 310 n = m_free(n); 311 } while (n); 312 if (len) { 313 (void) m_free(m); 314 goto bad; 315 } 316 m->m_next = n; 317 return (m); 318 bad: 319 m_freem(n); 320 return (0); 321 } 322