1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95 32 * $FreeBSD: src/sys/sys/mbuf.h,v 1.44.2.17 2003/04/15 06:15:02 silby Exp $ 33 */ 34 35 #ifndef _SYS_MBUF_H_ 36 #define _SYS_MBUF_H_ 37 38 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 39 40 #ifndef _SYS_TYPES_H_ 41 #include <sys/types.h> 42 #endif 43 #ifndef _SYS_PARAM_H_ 44 #include <sys/param.h> 45 #endif 46 #ifndef _SYS_QUEUE_H_ 47 #include <sys/queue.h> 48 #endif 49 #ifndef _NET_NETISR_H_ 50 #include <net/netisr.h> 51 #endif 52 #ifndef _NET_ETHERNET_H_ 53 #include <net/ethernet.h> 54 #endif 55 56 /* 57 * Mbufs are of a single size MSIZE, which includes overhead. 58 * An mbuf may add a single "mbuf cluster" of size MCLBYTES, which has 59 * no additional overhead and is used instead of the internal data area; 60 * this is done when at least MINCLSIZE of data must be stored. 61 */ 62 #define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */ 63 #define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */ 64 #define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */ 65 #define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */ 66 67 /* 68 * Macros for type conversion: 69 * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type. 70 * mtodoff(m, t, off) -- Convert mbuf pointer at the specified offset to data 71 * pointer of correct type. 72 */ 73 #define mtod(m, t) ((t)((m)->m_data)) 74 #define mtodoff(m, t, off) ((t)((m)->m_data + (off))) 75 76 /* 77 * Header present at the beginning of every mbuf. 78 */ 79 struct m_hdr { 80 struct mbuf *mh_next; /* next buffer in chain */ 81 union { 82 struct mbuf *mh_nextpkt; /* next chain in queue/record */ 83 STAILQ_ENTRY(mbuf) mh_stailqpkt; 84 }; 85 caddr_t mh_data; /* location of data */ 86 int mh_len; /* amount of data in this mbuf */ 87 int mh_flags; /* flags; see below */ 88 short mh_type; /* type of data in this mbuf */ 89 short mh_pad; /* padding */ 90 /* XXX implicit 4 bytes padding on x86_64 */ 91 #ifdef MBUF_DEBUG 92 const char *mh_lastfunc; 93 #endif 94 union { 95 struct netmsg_packet mhm_pkt; /* hardware->proto stack msg */ 96 struct netmsg_pru_send mhm_snd; /* usrspace->proto stack msg */ 97 struct netmsg_inarp mhm_arp; /* proto stack arpinput msg */ 98 struct netmsg_ctlinput mhm_ctl; /* proto stack ctlinput msg */ 99 struct netmsg_genpkt mhm_gen; /* generic pkt send/recv msg */ 100 struct netmsg_forward mhm_fwd; /* forwarding msg */ 101 } mh_msgu; 102 }; 103 #define mh_netmsg mh_msgu.mhm_pkt 104 #define mh_sndmsg mh_msgu.mhm_snd 105 #define mh_arpmsg mh_msgu.mhm_arp 106 #define mh_ctlmsg mh_msgu.mhm_ctl 107 #define mh_genmsg mh_msgu.mhm_gen 108 #define mh_fwdmsg mh_msgu.mhm_fwd 109 110 /* pf stuff */ 111 struct pkthdr_pf { 112 void *hdr; /* saved hdr pos in mbuf, for ECN */ 113 void *statekey; /* pf stackside statekey */ 114 u_int rtableid; /* alternate routing table id */ 115 uint32_t qid; /* queue id */ 116 uint16_t tag; /* tag id */ 117 uint8_t flags; 118 uint8_t routed; 119 uint32_t state_hash; /* identifies 'connections' */ 120 uint8_t ecn_af; /* for altq_red */ 121 uint8_t unused01; 122 uint8_t unused02; 123 uint8_t unused03; 124 /* XXX implicit 4 bytes padding on x86_64 */ 125 }; 126 127 /* pkthdr_pf.flags */ 128 #define PF_TAG_GENERATED 0x01 129 #define PF_TAG_FRAGCACHE 0x02 130 #define PF_TAG_TRANSLATE_LOCALHOST 0x04 131 #define PF_TAG_DIVERTED 0x08 132 #define PF_TAG_DIVERTED_PACKET 0x10 133 #define PF_TAG_REROUTE 0x20 134 135 /* 136 * Packet tag structure (see below for details). 137 */ 138 struct m_tag { 139 SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */ 140 uint16_t m_tag_id; /* Tag ID */ 141 uint16_t m_tag_len; /* Length of data */ 142 uint32_t m_tag_cookie; /* ABI/Module ID */ 143 }; 144 145 SLIST_HEAD(packet_tags, m_tag); 146 147 /* 148 * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set. 149 * 150 * Be careful: The fields have been carefully ordered to avoid hidden padding. 151 * Keep this in mind, when adding or removing fields! 152 */ 153 struct pkthdr { 154 struct ifnet *rcvif; /* rcv interface */ 155 struct packet_tags tags; /* list of packet tags */ 156 157 /* variables for ip and tcp reassembly */ 158 void *header; /* pointer to packet header */ 159 int len; /* total packet length */ 160 161 /* variables for hardware checksum */ 162 int csum_flags; /* flags regarding checksum */ 163 int csum_data; /* data field used by csum routines */ 164 uint16_t csum_iphlen; /* IP header length */ 165 /* valid if CSUM IP|UDP|TCP|TSO */ 166 uint8_t csum_thlen; /* TCP/UDP header length */ 167 /* valid if CSUM UDP|TCP|TSO */ 168 uint8_t csum_lhlen; /* link header length */ 169 170 uint16_t tso_segsz; /* TSO segment size */ 171 uint16_t ether_vlantag; /* ethernet 802.1p+q vlan tag */ 172 173 uint16_t hash; /* packet hash */ 174 uint16_t unused1; /* reserved for route table id */ 175 uint32_t unused2; /* reserved for CoDel timestamp */ 176 177 uint16_t wlan_seqno; /* IEEE 802.11 seq no. */ 178 /* 179 * Valid if BRIDGE_MBUF_TAGGED is set in fw_flags, records 180 * the original ether source address (if compatible). 181 */ 182 uint8_t ether_br_shost[ETHER_ADDR_LEN]; 183 184 /* firewall flags */ 185 uint32_t fw_flags; /* flags for FW */ 186 187 /* variables for PF processing */ 188 struct pkthdr_pf pf; /* structure for PF */ 189 }; 190 191 /* 192 * Description of external storage mapped into mbuf; valid only if M_EXT is set. 193 */ 194 struct m_ext { 195 caddr_t ext_buf; /* start of buffer */ 196 void (*ext_free)(void *); 197 u_int ext_size; /* size of buffer, for ext_free */ 198 void (*ext_ref)(void *); 199 void *ext_arg; 200 }; 201 202 /* 203 * The core of the mbuf object along with some shortcut defines for 204 * practical purposes. 205 */ 206 struct mbuf { 207 struct m_hdr m_hdr; 208 union { 209 struct { 210 struct pkthdr MH_pkthdr; /* M_PKTHDR set */ 211 union { 212 struct m_ext MH_ext; /* M_EXT set */ 213 char MH_databuf[MHLEN]; 214 } MH_dat; 215 } MH; 216 char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */ 217 } M_dat; 218 }; 219 #define m_next m_hdr.mh_next 220 #define m_len m_hdr.mh_len 221 #define m_data m_hdr.mh_data 222 #define m_type m_hdr.mh_type 223 #define m_flags m_hdr.mh_flags 224 #define m_nextpkt m_hdr.mh_nextpkt 225 #define m_stailqpkt m_hdr.mh_stailqpkt 226 #define m_pkthdr M_dat.MH.MH_pkthdr 227 #define m_ext M_dat.MH.MH_dat.MH_ext 228 #define m_pktdat M_dat.MH.MH_dat.MH_databuf 229 #define m_dat M_dat.M_databuf 230 231 /* 232 * Code that uses m_act should be converted to use m_nextpkt 233 * instead; m_act is historical and deprecated. 234 */ 235 #define m_act m_nextpkt 236 237 /* 238 * mbuf flags. 239 */ 240 #define M_EXT 0x0001 /* has associated external storage */ 241 #define M_PKTHDR 0x0002 /* start of record */ 242 #define M_EOR 0x0004 /* end of record */ 243 #define M_PROTO1 0x0008 /* protocol-specific */ 244 #define M_PROTO2 0x0010 /* protocol-specific */ 245 #define M_PROTO3 0x0020 /* protocol-specific */ 246 #define M_PROTO4 0x0040 /* protocol-specific */ 247 #define M_PROTO5 0x0080 /* protocol-specific */ 248 249 /* 250 * mbuf pkthdr flags (also stored in m_flags). 251 */ 252 #define M_BCAST 0x0100 /* send/received as link-level broadcast */ 253 #define M_MCAST 0x0200 /* send/received as link-level multicast */ 254 #define M_FRAG 0x0400 /* packet is a fragment of a larger packet */ 255 #define M_FIRSTFRAG 0x0800 /* packet is first fragment */ 256 #define M_LASTFRAG 0x1000 /* packet is last fragment */ 257 #define M_CLCACHE 0x2000 /* mbuf allocated from the cluster cache */ 258 #define M_EXT_CLUSTER 0x4000 /* standard cluster else special */ 259 #define M_PHCACHE 0x8000 /* mbuf allocated from the pkt header cache */ 260 #define M_UNUSED16 0x10000 /* was: notification event (SCTP) */ 261 #define M_VLANTAG 0x20000 /* ether_vlantag is valid */ 262 #define M_MPLSLABELED 0x40000 /* packet is mpls labeled */ 263 #define M_LENCHECKED 0x80000 /* packet proto lengths are checked */ 264 #define M_HASH 0x100000 /* hash field in pkthdr is valid */ 265 #define M_PROTO6 0x200000 /* protocol-specific */ 266 #define M_PROTO7 0x400000 /* protocol-specific */ 267 #define M_PROTO8 0x800000 /* protocol-specific */ 268 #define M_CKHASH 0x1000000 /* hash needs software verification */ 269 #define M_PRIO 0x2000000 /* high priority mbuf */ 270 #define M_SOLOCKED 0x4000000 /* locked by userland for read() */ 271 272 /* 273 * Flags to purge when crossing layers. 274 */ 275 #define M_PROTOFLAGS (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO4|M_PROTO5 | \ 276 M_PROTO6|M_PROTO7|M_PROTO8) 277 278 /* 279 * Flags copied when copying m_pkthdr. 280 */ 281 #define M_COPYFLAGS (M_PKTHDR|M_EOR|M_PROTOFLAGS | \ 282 M_BCAST|M_MCAST|M_FRAG|M_FIRSTFRAG|M_LASTFRAG | \ 283 M_VLANTAG|M_MPLSLABELED | \ 284 M_LENCHECKED|M_HASH|M_CKHASH|M_PRIO) 285 286 /* 287 * Flags indicating hw checksum support and sw checksum requirements. 288 */ 289 #define CSUM_IP 0x0001 /* will csum IP */ 290 #define CSUM_TCP 0x0002 /* will csum TCP */ 291 #define CSUM_UDP 0x0004 /* will csum UDP */ 292 #define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */ 293 #define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */ 294 295 #define CSUM_IP_CHECKED 0x0100 /* did csum IP */ 296 #define CSUM_IP_VALID 0x0200 /* ... the csum is valid */ 297 #define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */ 298 #define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */ 299 #define CSUM_FRAG_NOT_CHECKED 0x1000 /* did _not_ csum fragment 300 * NB: This flag is only used 301 * by IP defragmenter. 302 */ 303 #define CSUM_TSO 0x2000 /* will do TCP segmentation */ 304 305 #define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP) 306 #define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */ 307 308 /* 309 * Flags indicating PF processing status 310 */ 311 #define FW_MBUF_GENERATED 0x00000001 312 #define PF_MBUF_STRUCTURE 0x00000002 /* m_pkthdr.pf valid */ 313 #define PF_MBUF_ROUTED 0x00000004 /* pf_routed field is valid */ 314 #define PF_MBUF_TAGGED 0x00000008 315 #define IPFW_MBUF_CONTINUE 0x00000010 316 #define XX_MBUF_UNUSED20 0x00000020 317 #define IPFORWARD_MBUF_TAGGED 0x00000040 318 #define DUMMYNET_MBUF_TAGGED 0x00000080 319 #define BRIDGE_MBUF_TAGGED 0x00000100 320 #define FW_MBUF_REDISPATCH 0x00000200 321 #define FW_MBUF_PRIVATE1 0x00000400 322 #define FW_MBUF_PRIVATE2 0x00000800 323 #define IPFW_MBUF_GENERATED FW_MBUF_GENERATED 324 325 /* 326 * mbuf types. 327 */ 328 #define MT_FREE 0 /* should be on free list */ 329 #define MT_DATA 1 /* dynamic (data) allocation */ 330 #define MT_HEADER 2 /* packet header */ 331 #define MT_SONAME 3 /* socket name */ 332 /* 4 was MT_TAG */ 333 #define MT_CONTROL 5 /* extra-data protocol message */ 334 #define MT_OOBDATA 6 /* expedited data */ 335 #define MT_NTYPES 7 /* number of mbuf types for mbtypes[] */ 336 337 /* 338 * General mbuf allocator statistics structure. 339 * 340 * NOTE: Make sure this struct's size is multiple cache line size. 341 */ 342 struct mbstat { 343 u_long m_mbufs; /* mbufs obtained from page pool */ 344 u_long m_clusters; /* clusters obtained from page pool */ 345 u_long m_jclusters; /* jclusters obtained from page pool */ 346 u_long m_clfree; /* free clusters */ 347 u_long m_drops; /* times failed to find space */ 348 u_long m_wait; /* times waited for space */ 349 u_long m_drain; /* times drained protocols for space */ 350 u_long m_mcfail; /* times m_copym failed */ 351 u_long m_mpfail; /* times m_pullup failed */ 352 u_long m_msize; /* length of an mbuf */ 353 u_long m_mclbytes; /* length of an mbuf cluster */ 354 u_long m_mjumpagesize; /* length of a jumbo mbuf cluster */ 355 u_long m_minclsize; /* min length of data to allocate a cluster */ 356 u_long m_mlen; /* length of data in an mbuf */ 357 u_long m_mhlen; /* length of data in a header mbuf */ 358 u_long m_pad; /* pad to cache line size (64B) */ 359 }; 360 361 /* 362 * objcache(9) ocflags sanitizing 363 */ 364 #define MB_OCFLAG(how) ((how) & M_WAITOK ? M_WAITOK : M_NOWAIT) 365 366 /* 367 * These are identifying numbers passed to the m_mballoc_wait function, 368 * allowing us to determine whether the call came from an MGETHDR or 369 * an MGET. 370 */ 371 #define MGETHDR_C 1 372 #define MGET_C 2 373 374 /* 375 * mbuf allocation/deallocation macros (YYY deprecated, too big): 376 * 377 * MGET(struct mbuf *m, int how, int type) 378 * allocates an mbuf and initializes it to contain internal data. 379 * 380 * MGETHDR(struct mbuf *m, int how, int type) 381 * allocates an mbuf and initializes it to contain a packet header 382 * and internal data. 383 */ 384 #define MGET(m, how, type) do { \ 385 (m) = m_get((how), (type)); \ 386 } while (0) 387 388 #define MGETHDR(m, how, type) do { \ 389 (m) = m_gethdr((how), (type)); \ 390 } while (0) 391 392 /* 393 * MCLGET adds such clusters to a normal mbuf. The flag M_EXT is set upon 394 * success. 395 * Deprecated. Use m_getcl() or m_getl() instead. 396 */ 397 #define MCLGET(m, how) do { \ 398 m_mclget((m), (how)); \ 399 } while (0) 400 401 /* 402 * NB: M_COPY_PKTHDR is deprecated; use either M_MOVE_PKTHDR 403 * or m_dup_pkthdr. 404 */ 405 /* 406 * Move mbuf pkthdr from "from" to "to". 407 * from should have M_PKTHDR set, and to must be empty. 408 * from no longer has a pkthdr after this operation. 409 */ 410 #define M_MOVE_PKTHDR(_to, _from) m_move_pkthdr((_to), (_from)) 411 412 /* 413 * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place 414 * an object of the specified size at the end of the mbuf, longword aligned. 415 */ 416 #define M_ALIGN(m, len) do { \ 417 (m)->m_data += rounddown2(MLEN - (len), sizeof(long)); \ 418 } while (0) 419 420 /* 421 * As above, for mbufs allocated with m_gethdr/MGETHDR 422 * or initialized by M_COPY_PKTHDR. 423 */ 424 #define MH_ALIGN(m, len) do { \ 425 (m)->m_data += rounddown2(MHLEN - (len), sizeof(long)); \ 426 } while (0) 427 428 /* 429 * Check if we can write to an mbuf. 430 */ 431 #define M_EXT_WRITABLE(m) (m_sharecount(m) == 1) 432 #define M_WRITABLE(m) (!((m)->m_flags & M_EXT) || M_EXT_WRITABLE(m)) 433 434 /* 435 * Check if the supplied mbuf has a packet header, or else panic. 436 */ 437 #define M_ASSERTPKTHDR(m) \ 438 KASSERT(m != NULL && m->m_flags & M_PKTHDR, \ 439 ("%s: invalid mbuf or no mbuf packet header!", __func__)) 440 441 /* 442 * Compute the amount of space available before the current start of data. 443 * The M_EXT_WRITABLE() is a temporary, conservative safety measure: the burden 444 * of checking writability of the mbuf data area rests solely with the caller. 445 */ 446 #define M_LEADINGSPACE(m) \ 447 ((m)->m_flags & M_EXT ? \ 448 (M_EXT_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \ 449 (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \ 450 (m)->m_data - (m)->m_dat) 451 452 /* 453 * Compute the amount of space available after the end of data in an mbuf. 454 * The M_WRITABLE() is a temporary, conservative safety measure: the burden 455 * of checking writability of the mbuf data area rests solely with the caller. 456 */ 457 #define M_TRAILINGSPACE(m) \ 458 ((m)->m_flags & M_EXT ? \ 459 (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \ 460 - ((m)->m_data + (m)->m_len) : 0) : \ 461 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len)) 462 463 /* 464 * Arrange to prepend space of size plen to mbuf m. 465 * If a new mbuf must be allocated, how specifies whether to wait. 466 * If how is M_NOWAIT and allocation fails, the original mbuf chain 467 * is freed and m is set to NULL. 468 */ 469 #define M_PREPEND(m, plen, how) do { \ 470 struct mbuf **_mmp = &(m); \ 471 struct mbuf *_mm = *_mmp; \ 472 int _mplen = (plen); \ 473 int __mhow = (how); \ 474 \ 475 if (M_LEADINGSPACE(_mm) >= _mplen) { \ 476 _mm->m_data -= _mplen; \ 477 _mm->m_len += _mplen; \ 478 } else \ 479 _mm = m_prepend(_mm, _mplen, __mhow); \ 480 if (_mm != NULL && (_mm->m_flags & M_PKTHDR)) \ 481 _mm->m_pkthdr.len += _mplen; \ 482 *_mmp = _mm; \ 483 } while (0) 484 485 /* Length to m_copym() to copy all. */ 486 #define M_COPYALL 1000000000 487 488 /* Compatibility with 4.3 */ 489 #define m_copy(m, o, l) m_copym((m), (o), (l), M_NOWAIT) 490 491 #ifdef _KERNEL 492 493 extern u_int m_clalloc_wid; /* mbuf cluster wait count */ 494 extern u_int m_mballoc_wid; /* mbuf wait count */ 495 extern int max_linkhdr; /* largest link-level header */ 496 extern int max_protohdr; /* largest protocol header */ 497 extern int max_hdr; /* largest link+protocol header */ 498 extern int max_datalen; /* MHLEN - max_hdr */ 499 extern int nmbclusters; 500 extern int nmbufs; 501 502 struct uio; 503 504 void mcl_inclimit(int); 505 void mjcl_inclimit(int); 506 void mb_inclimit(int); 507 void m_adj(struct mbuf *, int); 508 void m_align(struct mbuf *, int); 509 int m_apply(struct mbuf *, int, int, 510 int (*)(void *, void *, u_int), void *); 511 int m_append(struct mbuf *, int, const void *); 512 void m_cat(struct mbuf *, struct mbuf *); 513 u_int m_countm(struct mbuf *m, struct mbuf **lastm, u_int *mbcnt); 514 void m_copyback(struct mbuf *, int, int, const void *); 515 int m_copyback2(struct mbuf *, int, int, const void *, int); 516 void m_copydata(const struct mbuf *, int, int, void *); 517 struct mbuf *m_copym(const struct mbuf *, int, int, int); 518 struct mbuf *m_copypacket(struct mbuf *, int); 519 struct mbuf *m_defrag(struct mbuf *, int); 520 struct mbuf *m_defrag_nofree(struct mbuf *, int); 521 struct mbuf *m_devget(void *, int, int, struct ifnet *); 522 struct mbuf *m_dup(struct mbuf *, int); 523 struct mbuf *m_dup_data(struct mbuf *, int); 524 int m_dup_pkthdr(struct mbuf *, const struct mbuf *, int); 525 void m_extadd(struct mbuf *, void *, u_int, void (*)(void *), 526 void (*)(void *), void *); 527 #ifdef MBUF_DEBUG 528 struct mbuf *_m_free(struct mbuf *, const char *name); 529 void _m_freem(struct mbuf *, const char *name); 530 #else 531 struct mbuf *m_free(struct mbuf *); 532 void m_freem(struct mbuf *); 533 #endif 534 struct mbuf *m_get(int, int); 535 struct mbuf *m_getc(int len, int how, int type); 536 struct mbuf *m_getcl(int how, short type, int flags); 537 struct mbuf *m_getjcl(int how, short type, int flags, size_t size); 538 struct mbuf *m_getclr(int, int); 539 struct mbuf *m_gethdr(int, int); 540 struct mbuf *m_getm(struct mbuf *, int, int, int); 541 struct mbuf *m_getptr(struct mbuf *, int, int *); 542 struct mbuf *m_last(struct mbuf *m); 543 u_int m_lengthm(struct mbuf *m, struct mbuf **lastm); 544 void m_move_pkthdr(struct mbuf *, struct mbuf *); 545 struct mbuf *m_prepend(struct mbuf *, int, int); 546 void m_print(const struct mbuf *m); 547 struct mbuf *m_pulldown(struct mbuf *, int, int, int *); 548 struct mbuf *m_pullup(struct mbuf *, int); 549 struct mbuf *m_split(struct mbuf *, int, int); 550 struct mbuf *m_uiomove(struct uio *); 551 struct mbuf *m_unshare(struct mbuf *, int); 552 void m_mclget(struct mbuf *m, int how); 553 int m_sharecount(struct mbuf *m); 554 void m_chtype(struct mbuf *m, int type); 555 int m_devpad(struct mbuf *m, int padto); 556 557 #ifdef MBUF_DEBUG 558 559 void mbuftrackid(struct mbuf *, int); 560 561 #define m_free(m) _m_free(m, __func__) 562 #define m_freem(m) _m_freem(m, __func__) 563 564 #else 565 566 #define mbuftrackid(m, id) /* empty */ 567 568 #endif 569 570 static __inline void 571 m_sethash(struct mbuf *m, uint16_t hash) 572 { 573 m->m_flags |= M_HASH; 574 m->m_pkthdr.hash = hash; 575 } 576 577 /* 578 * Allocate the right type of mbuf for the desired total length. 579 * The mbuf returned does not necessarily cover the entire requested length. 580 * This function follows mbuf chaining policy of allowing MINCLSIZE 581 * amount of chained mbufs. 582 */ 583 static __inline struct mbuf * 584 m_getl(int len, int how, int type, int flags, int *psize) 585 { 586 struct mbuf *m; 587 int size; 588 589 if (len >= MINCLSIZE) { 590 m = m_getcl(how, type, flags); 591 size = MCLBYTES; 592 } else if (flags & M_PKTHDR) { 593 m = m_gethdr(how, type); 594 size = MHLEN; 595 } else { 596 m = m_get(how, type); 597 size = MLEN; 598 } 599 if (psize != NULL) 600 *psize = size; 601 return (m); 602 } 603 604 static __inline struct mbuf * 605 m_getlj(int len, int how, int type, int flags, int *psize) 606 { 607 if (len > MCLBYTES) { 608 struct mbuf *m; 609 610 m = m_getjcl(how, type, flags, MJUMPAGESIZE); 611 if (psize != NULL) 612 *psize = MJUMPAGESIZE; 613 return m; 614 } 615 return m_getl(len, how, type, flags, psize); 616 } 617 618 /* 619 * Get a single mbuf that covers the requested number of bytes. 620 * This function does not create mbuf chains. It explicitly marks 621 * places in the code that abuse mbufs for contiguous data buffers. 622 */ 623 static __inline struct mbuf * 624 m_getb(int len, int how, int type, int flags) 625 { 626 struct mbuf *m; 627 int mbufsize = (flags & M_PKTHDR) ? MHLEN : MLEN; 628 629 if (len > mbufsize) 630 m = m_getcl(how, type, flags); 631 else if (flags & M_PKTHDR) 632 m = m_gethdr(how, type); 633 else 634 m = m_get(how, type); 635 return (m); 636 } 637 638 639 /* 640 * Mbuf tags 641 * 642 * Packets may have annotations attached by affixing a list 643 * of "packet tags" to the pkthdr structure. Packet tags are 644 * dynamically allocated semi-opaque data structures that have 645 * a fixed header (struct m_tag) that specifies the size of the 646 * memory block and a <cookie,type> pair that identifies it. 647 * The cookie is a 32-bit unique unsigned value used to identify 648 * a module or ABI. By convention this value is chose as the 649 * date+time that the module is created, expressed as the number of 650 * seconds since the epoch (e.g. using date -u +'%s'). The type value 651 * is an ABI/module-specific value that identifies a particular annotation 652 * and is private to the module. For compatibility with systems 653 * like openbsd that define packet tags w/o an ABI/module cookie, 654 * the value PACKET_ABI_COMPAT is used to implement m_tag_get and 655 * m_tag_find compatibility shim functions and several tag types are 656 * defined below. Users that do not require compatibility should use 657 * a private cookie value so that packet tag-related definitions 658 * can be maintained privately. 659 * 660 * Note that the packet tag returned by m_tag_alloc has the default 661 * memory alignment implemented by kmalloc. To reference private data 662 * one can use a construct like: 663 * 664 * struct m_tag *mtag = m_tag_alloc(...); 665 * struct foo *p = m_tag_data(mtag); 666 * 667 * if the alignment of struct m_tag is sufficient for referencing members 668 * of struct foo. Otherwise it is necessary to embed struct m_tag within 669 * the private data structure to insure proper alignment; e.g. 670 * 671 * struct foo { 672 * struct m_tag tag; 673 * ... 674 * }; 675 * struct foo *p = (struct foo *)m_tag_alloc(...); 676 * struct m_tag *mtag = &p->tag; 677 */ 678 679 #define PACKET_TAG_NONE 0 /* Nadda */ 680 #define PACKET_TAG_ENCAP 6 /* Encap. processing */ 681 /* struct ifnet *, the GIF interface */ 682 #define PACKET_TAG_IPV6_INPUT 8 /* IPV6 input processing */ 683 /* struct ip6aux */ 684 #define PACKET_TAG_IPFW_DIVERT 9 /* divert info */ 685 /* struct divert_info */ 686 #define PACKET_TAG_DUMMYNET 15 /* dummynet info */ 687 /* struct dn_pkt */ 688 #define PACKET_TAG_IPFORWARD 18 /* ipforward info */ 689 /* struct sockaddr_in */ 690 #define PACKET_TAG_IPSRCRT 27 /* IP srcrt opts */ 691 /* struct ip_srcrt_opt */ 692 #define PACKET_TAG_CARP 28 /* CARP info */ 693 /* struct ifnet */ 694 #define PACKET_TAG_PF 29 /* PF info */ 695 /* struct pf_mtag */ 696 #define PACKET_TAG_PF_DIVERT 0x0200 /* pf(4) diverted packet */ 697 698 /* Packet tag routines */ 699 struct m_tag *m_tag_alloc(uint32_t, int, int, int); 700 void m_tag_free(struct m_tag *); 701 void m_tag_prepend(struct mbuf *, struct m_tag *); 702 void m_tag_unlink(struct mbuf *, struct m_tag *); 703 void m_tag_delete(struct mbuf *, struct m_tag *); 704 void m_tag_delete_chain(struct mbuf *); 705 struct m_tag *m_tag_locate(struct mbuf *, uint32_t, int, struct m_tag *); 706 struct m_tag *m_tag_copy(struct m_tag *, int); 707 int m_tag_copy_chain(struct mbuf *, const struct mbuf *, int); 708 void m_tag_init(struct mbuf *); 709 struct m_tag *m_tag_first(struct mbuf *); 710 struct m_tag *m_tag_next(struct mbuf *, struct m_tag *); 711 712 /* these are for openbsd compatibility */ 713 #define MTAG_ABI_COMPAT 0 /* compatibility ABI */ 714 715 static __inline void * 716 m_tag_data(struct m_tag *tag) 717 { 718 return ((void *)(tag + 1)); 719 } 720 721 static __inline struct m_tag * 722 m_tag_get(int type, int length, int mflags) 723 { 724 return m_tag_alloc(MTAG_ABI_COMPAT, type, length, mflags); 725 } 726 727 static __inline struct m_tag * 728 m_tag_find(struct mbuf *m, int type, struct m_tag *start) 729 { 730 return m_tag_locate(m, MTAG_ABI_COMPAT, type, start); 731 } 732 733 734 /* 735 * Mbuf queue routines 736 */ 737 738 struct mbufq { 739 STAILQ_HEAD(, mbuf) mq_head; 740 int mq_len; 741 int mq_maxlen; 742 }; 743 744 static inline void 745 mbufq_init(struct mbufq *mq, int maxlen) 746 { 747 STAILQ_INIT(&mq->mq_head); 748 mq->mq_maxlen = maxlen; 749 mq->mq_len = 0; 750 } 751 752 static inline struct mbuf * 753 mbufq_flush(struct mbufq *mq) 754 { 755 struct mbuf *m; 756 757 m = STAILQ_FIRST(&mq->mq_head); 758 STAILQ_INIT(&mq->mq_head); 759 mq->mq_len = 0; 760 return (m); 761 } 762 763 static inline void 764 mbufq_drain(struct mbufq *mq) 765 { 766 struct mbuf *m, *n; 767 768 n = mbufq_flush(mq); 769 while ((m = n) != NULL) { 770 n = STAILQ_NEXT(m, m_stailqpkt); 771 m_freem(m); 772 } 773 } 774 775 static inline struct mbuf * 776 mbufq_first(const struct mbufq *mq) 777 { 778 return (STAILQ_FIRST(&mq->mq_head)); 779 } 780 781 static inline struct mbuf * 782 mbufq_last(const struct mbufq *mq) 783 { 784 return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt)); 785 } 786 787 static inline int 788 mbufq_full(const struct mbufq *mq) 789 { 790 return (mq->mq_len >= mq->mq_maxlen); 791 } 792 793 static inline int 794 mbufq_len(const struct mbufq *mq) 795 { 796 return (mq->mq_len); 797 } 798 799 static inline int 800 mbufq_enqueue(struct mbufq *mq, struct mbuf *m) 801 { 802 if (mbufq_full(mq)) 803 return (ENOBUFS); 804 STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt); 805 mq->mq_len++; 806 return (0); 807 } 808 809 static inline struct mbuf * 810 mbufq_dequeue(struct mbufq *mq) 811 { 812 struct mbuf *m; 813 814 m = STAILQ_FIRST(&mq->mq_head); 815 if (m) { 816 STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt); 817 m->m_nextpkt = NULL; 818 mq->mq_len--; 819 } 820 return (m); 821 } 822 823 static inline void 824 mbufq_prepend(struct mbufq *mq, struct mbuf *m) 825 { 826 STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt); 827 mq->mq_len++; 828 } 829 830 #endif /* _KERNEL */ 831 832 #endif /* _KERNEL || _KERNEL_STRUCTURES */ 833 #endif /* !_SYS_MBUF_H_ */ 834