The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sys/mbuf.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1988, 1993
    3  *      The Regents of the University of California.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. Neither the name of the University nor the names of its contributors
   15  *    may be used to endorse or promote products derived from this software
   16  *    without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  *      @(#)mbuf.h      8.5 (Berkeley) 2/19/95
   31  * $FreeBSD: releng/11.2/sys/sys/mbuf.h 331722 2018-03-29 02:50:57Z eadler $
   32  */
   33 
   34 #ifndef _SYS_MBUF_H_
   35 #define _SYS_MBUF_H_
   36 
   37 /* XXX: These includes suck. Sorry! */
   38 #include <sys/queue.h>
   39 #ifdef _KERNEL
   40 #include <sys/systm.h>
   41 #include <vm/uma.h>
   42 #ifdef WITNESS
   43 #include <sys/lock.h>
   44 #endif
   45 #endif
   46 
   47 #ifdef _KERNEL
   48 #include <sys/sdt.h>
   49 
   50 #define MBUF_PROBE1(probe, arg0)                                        \
   51         SDT_PROBE1(sdt, , , probe, arg0)
   52 #define MBUF_PROBE2(probe, arg0, arg1)                                  \
   53         SDT_PROBE2(sdt, , , probe, arg0, arg1)
   54 #define MBUF_PROBE3(probe, arg0, arg1, arg2)                            \
   55         SDT_PROBE3(sdt, , , probe, arg0, arg1, arg2)
   56 #define MBUF_PROBE4(probe, arg0, arg1, arg2, arg3)                      \
   57         SDT_PROBE4(sdt, , , probe, arg0, arg1, arg2, arg3)
   58 #define MBUF_PROBE5(probe, arg0, arg1, arg2, arg3, arg4)                \
   59         SDT_PROBE5(sdt, , , probe, arg0, arg1, arg2, arg3, arg4)
   60 
   61 SDT_PROBE_DECLARE(sdt, , , m__init);
   62 SDT_PROBE_DECLARE(sdt, , , m__gethdr);
   63 SDT_PROBE_DECLARE(sdt, , , m__get);
   64 SDT_PROBE_DECLARE(sdt, , , m__getcl);
   65 SDT_PROBE_DECLARE(sdt, , , m__clget);
   66 SDT_PROBE_DECLARE(sdt, , , m__cljget);
   67 SDT_PROBE_DECLARE(sdt, , , m__cljset);
   68 SDT_PROBE_DECLARE(sdt, , , m__free);
   69 SDT_PROBE_DECLARE(sdt, , , m__freem);
   70 
   71 #endif /* _KERNEL */
   72 
   73 /*
   74  * Mbufs are of a single size, MSIZE (sys/param.h), which includes overhead.
   75  * An mbuf may add a single "mbuf cluster" of size MCLBYTES (also in
   76  * sys/param.h), which has no additional overhead and is used instead of the
   77  * internal data area; this is done when at least MINCLSIZE of data must be
   78  * stored.  Additionally, it is possible to allocate a separate buffer
   79  * externally and attach it to the mbuf in a way similar to that of mbuf
   80  * clusters.
   81  *
   82  * NB: These calculation do not take actual compiler-induced alignment and
   83  * padding inside the complete struct mbuf into account.  Appropriate
   84  * attention is required when changing members of struct mbuf.
   85  *
   86  * MLEN is data length in a normal mbuf.
   87  * MHLEN is data length in an mbuf with pktheader.
   88  * MINCLSIZE is a smallest amount of data that should be put into cluster.
   89  *
   90  * Compile-time assertions in uipc_mbuf.c test these values to ensure that
   91  * they are sensible.
   92  */
   93 struct mbuf;
   94 #define MHSIZE          offsetof(struct mbuf, m_dat)
   95 #define MPKTHSIZE       offsetof(struct mbuf, m_pktdat)
   96 #define MLEN            ((int)(MSIZE - MHSIZE))
   97 #define MHLEN           ((int)(MSIZE - MPKTHSIZE))
   98 #define MINCLSIZE       (MHLEN + 1)
   99 
  100 #ifdef _KERNEL
  101 /*-
  102  * Macro for type conversion: convert mbuf pointer to data pointer of correct
  103  * type:
  104  *
  105  * mtod(m, t)   -- Convert mbuf pointer to data pointer of correct type.
  106  * mtodo(m, o) -- Same as above but with offset 'o' into data.
  107  */
  108 #define mtod(m, t)      ((t)((m)->m_data))
  109 #define mtodo(m, o)     ((void *)(((m)->m_data) + (o)))
  110 
  111 /*
  112  * Argument structure passed to UMA routines during mbuf and packet
  113  * allocations.
  114  */
  115 struct mb_args {
  116         int     flags;  /* Flags for mbuf being allocated */
  117         short   type;   /* Type of mbuf being allocated */
  118 };
  119 #endif /* _KERNEL */
  120 
  121 /*
  122  * Packet tag structure (see below for details).
  123  */
  124 struct m_tag {
  125         SLIST_ENTRY(m_tag)      m_tag_link;     /* List of packet tags */
  126         u_int16_t               m_tag_id;       /* Tag ID */
  127         u_int16_t               m_tag_len;      /* Length of data */
  128         u_int32_t               m_tag_cookie;   /* ABI/Module ID */
  129         void                    (*m_tag_free)(struct m_tag *);
  130 };
  131 
  132 /*
  133  * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set.
  134  * Size ILP32: 48
  135  *       LP64: 56
  136  * Compile-time assertions in uipc_mbuf.c test these values to ensure that
  137  * they are correct.
  138  */
  139 struct pkthdr {
  140         struct ifnet    *rcvif;         /* rcv interface */
  141         SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */
  142         int32_t          len;           /* total packet length */
  143 
  144         /* Layer crossing persistent information. */
  145         uint32_t         flowid;        /* packet's 4-tuple system */
  146         uint64_t         csum_flags;    /* checksum and offload features */
  147         uint16_t         fibnum;        /* this packet should use this fib */
  148         uint8_t          cosqos;        /* class/quality of service */
  149         uint8_t          rsstype;       /* hash type */
  150         uint8_t          l2hlen;        /* layer 2 header length */
  151         uint8_t          l3hlen;        /* layer 3 header length */
  152         uint8_t          l4hlen;        /* layer 4 header length */
  153         uint8_t          l5hlen;        /* layer 5 header length */
  154         union {
  155                 uint8_t  eight[8];
  156                 uint16_t sixteen[4];
  157                 uint32_t thirtytwo[2];
  158                 uint64_t sixtyfour[1];
  159                 uintptr_t unintptr[1];
  160                 void    *ptr;
  161         } PH_per;
  162 
  163         /* Layer specific non-persistent local storage for reassembly, etc. */
  164         union {
  165                 uint8_t  eight[8];
  166                 uint16_t sixteen[4];
  167                 uint32_t thirtytwo[2];
  168                 uint64_t sixtyfour[1];
  169                 uintptr_t unintptr[1];
  170                 void    *ptr;
  171         } PH_loc;
  172 };
  173 #define ether_vtag      PH_per.sixteen[0]
  174 #define PH_vt           PH_per
  175 #define vt_nrecs        sixteen[0]
  176 #define tso_segsz       PH_per.sixteen[1]
  177 #define csum_phsum      PH_per.sixteen[2]
  178 #define csum_data       PH_per.thirtytwo[1]
  179 
  180 /*
  181  * Description of external storage mapped into mbuf; valid only if M_EXT is
  182  * set.
  183  * Size ILP32: 28
  184  *       LP64: 48
  185  * Compile-time assertions in uipc_mbuf.c test these values to ensure that
  186  * they are correct.
  187  */
  188 struct m_ext {
  189         union {
  190                 volatile u_int   ext_count;     /* value of ref count info */
  191                 volatile u_int  *ext_cnt;       /* pointer to ref count info */
  192         };
  193         caddr_t          ext_buf;       /* start of buffer */
  194         uint32_t         ext_size;      /* size of buffer, for ext_free */
  195         uint32_t         ext_type:8,    /* type of external storage */
  196                          ext_flags:24;  /* external storage mbuf flags */
  197         void            (*ext_free)     /* free routine if not the usual */
  198                             (struct mbuf *, void *, void *);
  199         void            *ext_arg1;      /* optional argument pointer */
  200         void            *ext_arg2;      /* optional argument pointer */
  201 };
  202 
  203 /*
  204  * The core of the mbuf object along with some shortcut defines for practical
  205  * purposes.
  206  */
  207 struct mbuf {
  208         /*
  209          * Header present at the beginning of every mbuf.
  210          * Size ILP32: 24
  211          *      LP64: 32
  212          * Compile-time assertions in uipc_mbuf.c test these values to ensure
  213          * that they are correct.
  214          */
  215         union { /* next buffer in chain */
  216                 struct mbuf             *m_next;
  217                 SLIST_ENTRY(mbuf)       m_slist;
  218                 STAILQ_ENTRY(mbuf)      m_stailq;
  219         };
  220         union { /* next chain in queue/record */
  221                 struct mbuf             *m_nextpkt;
  222                 SLIST_ENTRY(mbuf)       m_slistpkt;
  223                 STAILQ_ENTRY(mbuf)      m_stailqpkt;
  224         };
  225         caddr_t          m_data;        /* location of data */
  226         int32_t          m_len;         /* amount of data in this mbuf */
  227         uint32_t         m_type:8,      /* type of data in this mbuf */
  228                          m_flags:24;    /* flags; see below */
  229 #if !defined(__LP64__)
  230         uint32_t         m_pad;         /* pad for 64bit alignment */
  231 #endif
  232 
  233         /*
  234          * A set of optional headers (packet header, external storage header)
  235          * and internal data storage.  Historically, these arrays were sized
  236          * to MHLEN (space left after a packet header) and MLEN (space left
  237          * after only a regular mbuf header); they are now variable size in
  238          * order to support future work on variable-size mbufs.
  239          */
  240         union {
  241                 struct {
  242                         struct pkthdr   m_pkthdr;       /* M_PKTHDR set */
  243                         union {
  244                                 struct m_ext    m_ext;  /* M_EXT set */
  245                                 char            m_pktdat[0];
  246                         };
  247                 };
  248                 char    m_dat[0];                       /* !M_PKTHDR, !M_EXT */
  249         };
  250 };
  251 
  252 /*
  253  * mbuf flags of global significance and layer crossing.
  254  * Those of only protocol/layer specific significance are to be mapped
  255  * to M_PROTO[1-12] and cleared at layer handoff boundaries.
  256  * NB: Limited to the lower 24 bits.
  257  */
  258 #define M_EXT           0x00000001 /* has associated external storage */
  259 #define M_PKTHDR        0x00000002 /* start of record */
  260 #define M_EOR           0x00000004 /* end of record */
  261 #define M_RDONLY        0x00000008 /* associated data is marked read-only */
  262 #define M_BCAST         0x00000010 /* send/received as link-level broadcast */
  263 #define M_MCAST         0x00000020 /* send/received as link-level multicast */
  264 #define M_PROMISC       0x00000040 /* packet was not for us */
  265 #define M_VLANTAG       0x00000080 /* ether_vtag is valid */
  266 #define M_UNUSED_8      0x00000100 /* --available-- */
  267 #define M_NOFREE        0x00000200 /* do not free mbuf, embedded in cluster */
  268 
  269 #define M_PROTO1        0x00001000 /* protocol-specific */
  270 #define M_PROTO2        0x00002000 /* protocol-specific */
  271 #define M_PROTO3        0x00004000 /* protocol-specific */
  272 #define M_PROTO4        0x00008000 /* protocol-specific */
  273 #define M_PROTO5        0x00010000 /* protocol-specific */
  274 #define M_PROTO6        0x00020000 /* protocol-specific */
  275 #define M_PROTO7        0x00040000 /* protocol-specific */
  276 #define M_PROTO8        0x00080000 /* protocol-specific */
  277 #define M_PROTO9        0x00100000 /* protocol-specific */
  278 #define M_PROTO10       0x00200000 /* protocol-specific */
  279 #define M_PROTO11       0x00400000 /* protocol-specific */
  280 #define M_PROTO12       0x00800000 /* protocol-specific */
  281 
  282 #define MB_DTOR_SKIP    0x1     /* don't pollute the cache by touching a freed mbuf */
  283 
  284 /*
  285  * Flags to purge when crossing layers.
  286  */
  287 #define M_PROTOFLAGS \
  288     (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO4|M_PROTO5|M_PROTO6|M_PROTO7|M_PROTO8|\
  289      M_PROTO9|M_PROTO10|M_PROTO11|M_PROTO12)
  290 
  291 /*
  292  * Flags preserved when copying m_pkthdr.
  293  */
  294 #define M_COPYFLAGS \
  295     (M_PKTHDR|M_EOR|M_RDONLY|M_BCAST|M_MCAST|M_PROMISC|M_VLANTAG| \
  296      M_PROTOFLAGS)
  297 
  298 /*
  299  * Mbuf flag description for use with printf(9) %b identifier.
  300  */
  301 #define M_FLAG_BITS \
  302     "\2\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY\5M_BCAST\6M_MCAST" \
  303     "\7M_PROMISC\10M_VLANTAG"
  304 #define M_FLAG_PROTOBITS \
  305     "\15M_PROTO1\16M_PROTO2\17M_PROTO3\20M_PROTO4\21M_PROTO5" \
  306     "\22M_PROTO6\23M_PROTO7\24M_PROTO8\25M_PROTO9\26M_PROTO10" \
  307     "\27M_PROTO11\30M_PROTO12"
  308 #define M_FLAG_PRINTF (M_FLAG_BITS M_FLAG_PROTOBITS)
  309 
  310 /*
  311  * Network interface cards are able to hash protocol fields (such as IPv4
  312  * addresses and TCP port numbers) classify packets into flows.  These flows
  313  * can then be used to maintain ordering while delivering packets to the OS
  314  * via parallel input queues, as well as to provide a stateless affinity
  315  * model.  NIC drivers can pass up the hash via m->m_pkthdr.flowid, and set
  316  * m_flag fields to indicate how the hash should be interpreted by the
  317  * network stack.
  318  *
  319  * Most NICs support RSS, which provides ordering and explicit affinity, and
  320  * use the hash m_flag bits to indicate what header fields were covered by
  321  * the hash.  M_HASHTYPE_OPAQUE and M_HASHTYPE_OPAQUE_HASH can be set by non-
  322  * RSS cards or configurations that provide an opaque flow identifier, allowing
  323  * for ordering and distribution without explicit affinity.  Additionally,
  324  * M_HASHTYPE_OPAQUE_HASH indicates that the flow identifier has hash
  325  * properties.
  326  */
  327 #define M_HASHTYPE_HASHPROP             0x80    /* has hash properties */
  328 #define M_HASHTYPE_HASH(t)              (M_HASHTYPE_HASHPROP | (t))
  329 /* Microsoft RSS standard hash types */
  330 #define M_HASHTYPE_NONE                 0
  331 #define M_HASHTYPE_RSS_IPV4             M_HASHTYPE_HASH(1) /* IPv4 2-tuple */
  332 #define M_HASHTYPE_RSS_TCP_IPV4         M_HASHTYPE_HASH(2) /* TCPv4 4-tuple */
  333 #define M_HASHTYPE_RSS_IPV6             M_HASHTYPE_HASH(3) /* IPv6 2-tuple */
  334 #define M_HASHTYPE_RSS_TCP_IPV6         M_HASHTYPE_HASH(4) /* TCPv6 4-tuple */
  335 #define M_HASHTYPE_RSS_IPV6_EX          M_HASHTYPE_HASH(5) /* IPv6 2-tuple +
  336                                                             * ext hdrs */
  337 #define M_HASHTYPE_RSS_TCP_IPV6_EX      M_HASHTYPE_HASH(6) /* TCPv6 4-tuple +
  338                                                             * ext hdrs */
  339 /* Non-standard RSS hash types */
  340 #define M_HASHTYPE_RSS_UDP_IPV4         M_HASHTYPE_HASH(7) /* IPv4 UDP 4-tuple*/
  341 #define M_HASHTYPE_RSS_UDP_IPV4_EX      M_HASHTYPE_HASH(8) /* IPv4 UDP 4-tuple +
  342                                                             * ext hdrs */
  343 #define M_HASHTYPE_RSS_UDP_IPV6         M_HASHTYPE_HASH(9) /* IPv6 UDP 4-tuple*/
  344 #define M_HASHTYPE_RSS_UDP_IPV6_EX      M_HASHTYPE_HASH(10)/* IPv6 UDP 4-tuple +
  345                                                             * ext hdrs */
  346 
  347 #define M_HASHTYPE_OPAQUE               63      /* ordering, not affinity */
  348 #define M_HASHTYPE_OPAQUE_HASH          M_HASHTYPE_HASH(M_HASHTYPE_OPAQUE)
  349                                                 /* ordering+hash, not affinity*/
  350 
  351 #define M_HASHTYPE_CLEAR(m)     ((m)->m_pkthdr.rsstype = 0)
  352 #define M_HASHTYPE_GET(m)       ((m)->m_pkthdr.rsstype)
  353 #define M_HASHTYPE_SET(m, v)    ((m)->m_pkthdr.rsstype = (v))
  354 #define M_HASHTYPE_TEST(m, v)   (M_HASHTYPE_GET(m) == (v))
  355 #define M_HASHTYPE_ISHASH(m)    (M_HASHTYPE_GET(m) & M_HASHTYPE_HASHPROP)
  356 
  357 /*
  358  * COS/QOS class and quality of service tags.
  359  * It uses DSCP code points as base.
  360  */
  361 #define QOS_DSCP_CS0            0x00
  362 #define QOS_DSCP_DEF            QOS_DSCP_CS0
  363 #define QOS_DSCP_CS1            0x20
  364 #define QOS_DSCP_AF11           0x28
  365 #define QOS_DSCP_AF12           0x30
  366 #define QOS_DSCP_AF13           0x38
  367 #define QOS_DSCP_CS2            0x40
  368 #define QOS_DSCP_AF21           0x48
  369 #define QOS_DSCP_AF22           0x50
  370 #define QOS_DSCP_AF23           0x58
  371 #define QOS_DSCP_CS3            0x60
  372 #define QOS_DSCP_AF31           0x68
  373 #define QOS_DSCP_AF32           0x70
  374 #define QOS_DSCP_AF33           0x78
  375 #define QOS_DSCP_CS4            0x80
  376 #define QOS_DSCP_AF41           0x88
  377 #define QOS_DSCP_AF42           0x90
  378 #define QOS_DSCP_AF43           0x98
  379 #define QOS_DSCP_CS5            0xa0
  380 #define QOS_DSCP_EF             0xb8
  381 #define QOS_DSCP_CS6            0xc0
  382 #define QOS_DSCP_CS7            0xe0
  383 
  384 /*
  385  * External mbuf storage buffer types.
  386  */
  387 #define EXT_CLUSTER     1       /* mbuf cluster */
  388 #define EXT_SFBUF       2       /* sendfile(2)'s sf_buf */
  389 #define EXT_JUMBOP      3       /* jumbo cluster page sized */
  390 #define EXT_JUMBO9      4       /* jumbo cluster 9216 bytes */
  391 #define EXT_JUMBO16     5       /* jumbo cluster 16184 bytes */
  392 #define EXT_PACKET      6       /* mbuf+cluster from packet zone */
  393 #define EXT_MBUF        7       /* external mbuf reference (M_IOVEC) */
  394 #define EXT_SFBUF_NOCACHE 8     /* sendfile(2)'s sf_buf not to be cached */
  395 
  396 #define EXT_VENDOR1     224     /* for vendor-internal use */
  397 #define EXT_VENDOR2     225     /* for vendor-internal use */
  398 #define EXT_VENDOR3     226     /* for vendor-internal use */
  399 #define EXT_VENDOR4     227     /* for vendor-internal use */
  400 
  401 #define EXT_EXP1        244     /* for experimental use */
  402 #define EXT_EXP2        245     /* for experimental use */
  403 #define EXT_EXP3        246     /* for experimental use */
  404 #define EXT_EXP4        247     /* for experimental use */
  405 
  406 #define EXT_NET_DRV     252     /* custom ext_buf provided by net driver(s) */
  407 #define EXT_MOD_TYPE    253     /* custom module's ext_buf type */
  408 #define EXT_DISPOSABLE  254     /* can throw this buffer away w/page flipping */
  409 #define EXT_EXTREF      255     /* has externally maintained ext_cnt ptr */
  410 
  411 /*
  412  * Flags for external mbuf buffer types.
  413  * NB: limited to the lower 24 bits.
  414  */
  415 #define EXT_FLAG_EMBREF         0x000001        /* embedded ext_count */
  416 #define EXT_FLAG_EXTREF         0x000002        /* external ext_cnt, notyet */
  417 
  418 #define EXT_FLAG_NOFREE         0x000010        /* don't free mbuf to pool, notyet */
  419 
  420 #define EXT_FLAG_VENDOR1        0x010000        /* for vendor-internal use */
  421 #define EXT_FLAG_VENDOR2        0x020000        /* for vendor-internal use */
  422 #define EXT_FLAG_VENDOR3        0x040000        /* for vendor-internal use */
  423 #define EXT_FLAG_VENDOR4        0x080000        /* for vendor-internal use */
  424 
  425 #define EXT_FLAG_EXP1           0x100000        /* for experimental use */
  426 #define EXT_FLAG_EXP2           0x200000        /* for experimental use */
  427 #define EXT_FLAG_EXP3           0x400000        /* for experimental use */
  428 #define EXT_FLAG_EXP4           0x800000        /* for experimental use */
  429 
  430 /*
  431  * EXT flag description for use with printf(9) %b identifier.
  432  */
  433 #define EXT_FLAG_BITS \
  434     "\2\1EXT_FLAG_EMBREF\2EXT_FLAG_EXTREF\5EXT_FLAG_NOFREE" \
  435     "\21EXT_FLAG_VENDOR1\22EXT_FLAG_VENDOR2\23EXT_FLAG_VENDOR3" \
  436     "\24EXT_FLAG_VENDOR4\25EXT_FLAG_EXP1\26EXT_FLAG_EXP2\27EXT_FLAG_EXP3" \
  437     "\30EXT_FLAG_EXP4"
  438 
  439 /*
  440  * External reference/free functions.
  441  */
  442 void sf_ext_free(void *, void *);
  443 void sf_ext_free_nocache(void *, void *);
  444 
  445 /*
  446  * Flags indicating checksum, segmentation and other offload work to be
  447  * done, or already done, by hardware or lower layers.  It is split into
  448  * separate inbound and outbound flags.
  449  *
  450  * Outbound flags that are set by upper protocol layers requesting lower
  451  * layers, or ideally the hardware, to perform these offloading tasks.
  452  * For outbound packets this field and its flags can be directly tested
  453  * against ifnet if_hwassist.
  454  */
  455 #define CSUM_IP                 0x00000001      /* IP header checksum offload */
  456 #define CSUM_IP_UDP             0x00000002      /* UDP checksum offload */
  457 #define CSUM_IP_TCP             0x00000004      /* TCP checksum offload */
  458 #define CSUM_IP_SCTP            0x00000008      /* SCTP checksum offload */
  459 #define CSUM_IP_TSO             0x00000010      /* TCP segmentation offload */
  460 #define CSUM_IP_ISCSI           0x00000020      /* iSCSI checksum offload */
  461 
  462 #define CSUM_IP6_UDP            0x00000200      /* UDP checksum offload */
  463 #define CSUM_IP6_TCP            0x00000400      /* TCP checksum offload */
  464 #define CSUM_IP6_SCTP           0x00000800      /* SCTP checksum offload */
  465 #define CSUM_IP6_TSO            0x00001000      /* TCP segmentation offload */
  466 #define CSUM_IP6_ISCSI          0x00002000      /* iSCSI checksum offload */
  467 
  468 /* Inbound checksum support where the checksum was verified by hardware. */
  469 #define CSUM_L3_CALC            0x01000000      /* calculated layer 3 csum */
  470 #define CSUM_L3_VALID           0x02000000      /* checksum is correct */
  471 #define CSUM_L4_CALC            0x04000000      /* calculated layer 4 csum */
  472 #define CSUM_L4_VALID           0x08000000      /* checksum is correct */
  473 #define CSUM_L5_CALC            0x10000000      /* calculated layer 5 csum */
  474 #define CSUM_L5_VALID           0x20000000      /* checksum is correct */
  475 #define CSUM_COALESCED          0x40000000      /* contains merged segments */
  476 
  477 /*
  478  * CSUM flag description for use with printf(9) %b identifier.
  479  */
  480 #define CSUM_BITS \
  481     "\2\1CSUM_IP\2CSUM_IP_UDP\3CSUM_IP_TCP\4CSUM_IP_SCTP\5CSUM_IP_TSO" \
  482     "\6CSUM_IP_ISCSI" \
  483     "\12CSUM_IP6_UDP\13CSUM_IP6_TCP\14CSUM_IP6_SCTP\15CSUM_IP6_TSO" \
  484     "\16CSUM_IP6_ISCSI" \
  485     "\31CSUM_L3_CALC\32CSUM_L3_VALID\33CSUM_L4_CALC\34CSUM_L4_VALID" \
  486     "\35CSUM_L5_CALC\36CSUM_L5_VALID\37CSUM_COALESCED"
  487 
  488 /* CSUM flags compatibility mappings. */
  489 #define CSUM_IP_CHECKED         CSUM_L3_CALC
  490 #define CSUM_IP_VALID           CSUM_L3_VALID
  491 #define CSUM_DATA_VALID         CSUM_L4_VALID
  492 #define CSUM_PSEUDO_HDR         CSUM_L4_CALC
  493 #define CSUM_SCTP_VALID         CSUM_L4_VALID
  494 #define CSUM_DELAY_DATA         (CSUM_TCP|CSUM_UDP)
  495 #define CSUM_DELAY_IP           CSUM_IP         /* Only v4, no v6 IP hdr csum */
  496 #define CSUM_DELAY_DATA_IPV6    (CSUM_TCP_IPV6|CSUM_UDP_IPV6)
  497 #define CSUM_DATA_VALID_IPV6    CSUM_DATA_VALID
  498 #define CSUM_TCP                CSUM_IP_TCP
  499 #define CSUM_UDP                CSUM_IP_UDP
  500 #define CSUM_SCTP               CSUM_IP_SCTP
  501 #define CSUM_TSO                (CSUM_IP_TSO|CSUM_IP6_TSO)
  502 #define CSUM_UDP_IPV6           CSUM_IP6_UDP
  503 #define CSUM_TCP_IPV6           CSUM_IP6_TCP
  504 #define CSUM_SCTP_IPV6          CSUM_IP6_SCTP
  505 
  506 /*
  507  * mbuf types describing the content of the mbuf (including external storage).
  508  */
  509 #define MT_NOTMBUF      0       /* USED INTERNALLY ONLY! Object is not mbuf */
  510 #define MT_DATA         1       /* dynamic (data) allocation */
  511 #define MT_HEADER       MT_DATA /* packet header, use M_PKTHDR instead */
  512 
  513 #define MT_VENDOR1      4       /* for vendor-internal use */
  514 #define MT_VENDOR2      5       /* for vendor-internal use */
  515 #define MT_VENDOR3      6       /* for vendor-internal use */
  516 #define MT_VENDOR4      7       /* for vendor-internal use */
  517 
  518 #define MT_SONAME       8       /* socket name */
  519 
  520 #define MT_EXP1         9       /* for experimental use */
  521 #define MT_EXP2         10      /* for experimental use */
  522 #define MT_EXP3         11      /* for experimental use */
  523 #define MT_EXP4         12      /* for experimental use */
  524 
  525 #define MT_CONTROL      14      /* extra-data protocol message */
  526 #define MT_OOBDATA      15      /* expedited data  */
  527 #define MT_NTYPES       16      /* number of mbuf types for mbtypes[] */
  528 
  529 #define MT_NOINIT       255     /* Not a type but a flag to allocate
  530                                    a non-initialized mbuf */
  531 
  532 /*
  533  * String names of mbuf-related UMA(9) and malloc(9) types.  Exposed to
  534  * !_KERNEL so that monitoring tools can look up the zones with
  535  * libmemstat(3).
  536  */
  537 #define MBUF_MEM_NAME           "mbuf"
  538 #define MBUF_CLUSTER_MEM_NAME   "mbuf_cluster"
  539 #define MBUF_PACKET_MEM_NAME    "mbuf_packet"
  540 #define MBUF_JUMBOP_MEM_NAME    "mbuf_jumbo_page"
  541 #define MBUF_JUMBO9_MEM_NAME    "mbuf_jumbo_9k"
  542 #define MBUF_JUMBO16_MEM_NAME   "mbuf_jumbo_16k"
  543 #define MBUF_TAG_MEM_NAME       "mbuf_tag"
  544 #define MBUF_EXTREFCNT_MEM_NAME "mbuf_ext_refcnt"
  545 
  546 #ifdef _KERNEL
  547 
  548 #ifdef WITNESS
  549 #define MBUF_CHECKSLEEP(how) do {                                       \
  550         if (how == M_WAITOK)                                            \
  551                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,         \
  552                     "Sleeping in \"%s\"", __func__);                    \
  553 } while (0)
  554 #else
  555 #define MBUF_CHECKSLEEP(how)
  556 #endif
  557 
  558 /*
  559  * Network buffer allocation API
  560  *
  561  * The rest of it is defined in kern/kern_mbuf.c
  562  */
  563 extern uma_zone_t       zone_mbuf;
  564 extern uma_zone_t       zone_clust;
  565 extern uma_zone_t       zone_pack;
  566 extern uma_zone_t       zone_jumbop;
  567 extern uma_zone_t       zone_jumbo9;
  568 extern uma_zone_t       zone_jumbo16;
  569 
  570 void             mb_dupcl(struct mbuf *, struct mbuf *);
  571 void             mb_free_ext(struct mbuf *);
  572 void             m_adj(struct mbuf *, int);
  573 int              m_apply(struct mbuf *, int, int,
  574                     int (*)(void *, void *, u_int), void *);
  575 int              m_append(struct mbuf *, int, c_caddr_t);
  576 void             m_cat(struct mbuf *, struct mbuf *);
  577 void             m_catpkt(struct mbuf *, struct mbuf *);
  578 int              m_clget(struct mbuf *m, int how);
  579 void            *m_cljget(struct mbuf *m, int how, int size);
  580 struct mbuf     *m_collapse(struct mbuf *, int, int);
  581 void             m_copyback(struct mbuf *, int, int, c_caddr_t);
  582 void             m_copydata(const struct mbuf *, int, int, caddr_t);
  583 struct mbuf     *m_copym(struct mbuf *, int, int, int);
  584 struct mbuf     *m_copypacket(struct mbuf *, int);
  585 void             m_copy_pkthdr(struct mbuf *, struct mbuf *);
  586 struct mbuf     *m_copyup(struct mbuf *, int, int);
  587 struct mbuf     *m_defrag(struct mbuf *, int);
  588 void             m_demote_pkthdr(struct mbuf *);
  589 void             m_demote(struct mbuf *, int, int);
  590 struct mbuf     *m_devget(char *, int, int, struct ifnet *,
  591                     void (*)(char *, caddr_t, u_int));
  592 struct mbuf     *m_dup(const struct mbuf *, int);
  593 int              m_dup_pkthdr(struct mbuf *, const struct mbuf *, int);
  594 void             m_extadd(struct mbuf *, caddr_t, u_int,
  595                     void (*)(struct mbuf *, void *, void *), void *, void *,
  596                     int, int);
  597 u_int            m_fixhdr(struct mbuf *);
  598 struct mbuf     *m_fragment(struct mbuf *, int, int);
  599 void             m_freem(struct mbuf *);
  600 struct mbuf     *m_get2(int, int, short, int);
  601 struct mbuf     *m_getjcl(int, short, int, int);
  602 struct mbuf     *m_getm2(struct mbuf *, int, int, short, int);
  603 struct mbuf     *m_getptr(struct mbuf *, int, int *);
  604 u_int            m_length(struct mbuf *, struct mbuf **);
  605 int              m_mbuftouio(struct uio *, struct mbuf *, int);
  606 void             m_move_pkthdr(struct mbuf *, struct mbuf *);
  607 int              m_pkthdr_init(struct mbuf *, int);
  608 struct mbuf     *m_prepend(struct mbuf *, int, int);
  609 void             m_print(const struct mbuf *, int);
  610 struct mbuf     *m_pulldown(struct mbuf *, int, int, int *);
  611 struct mbuf     *m_pullup(struct mbuf *, int);
  612 int              m_sanity(struct mbuf *, int);
  613 struct mbuf     *m_split(struct mbuf *, int, int);
  614 struct mbuf     *m_uiotombuf(struct uio *, int, int, int, int);
  615 struct mbuf     *m_unshare(struct mbuf *, int);
  616 
  617 static __inline int
  618 m_gettype(int size)
  619 {
  620         int type;
  621 
  622         switch (size) {
  623         case MSIZE:
  624                 type = EXT_MBUF;
  625                 break;
  626         case MCLBYTES:
  627                 type = EXT_CLUSTER;
  628                 break;
  629 #if MJUMPAGESIZE != MCLBYTES
  630         case MJUMPAGESIZE:
  631                 type = EXT_JUMBOP;
  632                 break;
  633 #endif
  634         case MJUM9BYTES:
  635                 type = EXT_JUMBO9;
  636                 break;
  637         case MJUM16BYTES:
  638                 type = EXT_JUMBO16;
  639                 break;
  640         default:
  641                 panic("%s: invalid cluster size %d", __func__, size);
  642         }
  643 
  644         return (type);
  645 }
  646 
  647 /*
  648  * Associated an external reference counted buffer with an mbuf.
  649  */
  650 static __inline void
  651 m_extaddref(struct mbuf *m, caddr_t buf, u_int size, u_int *ref_cnt,
  652     void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2)
  653 {
  654 
  655         KASSERT(ref_cnt != NULL, ("%s: ref_cnt not provided", __func__));
  656 
  657         atomic_add_int(ref_cnt, 1);
  658         m->m_flags |= M_EXT;
  659         m->m_ext.ext_buf = buf;
  660         m->m_ext.ext_cnt = ref_cnt;
  661         m->m_data = m->m_ext.ext_buf;
  662         m->m_ext.ext_size = size;
  663         m->m_ext.ext_free = freef;
  664         m->m_ext.ext_arg1 = arg1;
  665         m->m_ext.ext_arg2 = arg2;
  666         m->m_ext.ext_type = EXT_EXTREF;
  667         m->m_ext.ext_flags = 0;
  668 }
  669 
  670 static __inline uma_zone_t
  671 m_getzone(int size)
  672 {
  673         uma_zone_t zone;
  674 
  675         switch (size) {
  676         case MCLBYTES:
  677                 zone = zone_clust;
  678                 break;
  679 #if MJUMPAGESIZE != MCLBYTES
  680         case MJUMPAGESIZE:
  681                 zone = zone_jumbop;
  682                 break;
  683 #endif
  684         case MJUM9BYTES:
  685                 zone = zone_jumbo9;
  686                 break;
  687         case MJUM16BYTES:
  688                 zone = zone_jumbo16;
  689                 break;
  690         default:
  691                 panic("%s: invalid cluster size %d", __func__, size);
  692         }
  693 
  694         return (zone);
  695 }
  696 
  697 /*
  698  * Initialize an mbuf with linear storage.
  699  *
  700  * Inline because the consumer text overhead will be roughly the same to
  701  * initialize or call a function with this many parameters and M_PKTHDR
  702  * should go away with constant propagation for !MGETHDR.
  703  */
  704 static __inline int
  705 m_init(struct mbuf *m, int how, short type, int flags)
  706 {
  707         int error;
  708 
  709         m->m_next = NULL;
  710         m->m_nextpkt = NULL;
  711         m->m_data = m->m_dat;
  712         m->m_len = 0;
  713         m->m_flags = flags;
  714         m->m_type = type;
  715         if (flags & M_PKTHDR)
  716                 error = m_pkthdr_init(m, how);
  717         else
  718                 error = 0;
  719 
  720         MBUF_PROBE5(m__init, m, how, type, flags, error);
  721         return (error);
  722 }
  723 
  724 static __inline struct mbuf *
  725 m_get(int how, short type)
  726 {
  727         struct mbuf *m;
  728         struct mb_args args;
  729 
  730         args.flags = 0;
  731         args.type = type;
  732         m = uma_zalloc_arg(zone_mbuf, &args, how);
  733         MBUF_PROBE3(m__get, how, type, m);
  734         return (m);
  735 }
  736 
  737 static __inline struct mbuf *
  738 m_gethdr(int how, short type)
  739 {
  740         struct mbuf *m;
  741         struct mb_args args;
  742 
  743         args.flags = M_PKTHDR;
  744         args.type = type;
  745         m = uma_zalloc_arg(zone_mbuf, &args, how);
  746         MBUF_PROBE3(m__gethdr, how, type, m);
  747         return (m);
  748 }
  749 
  750 static __inline struct mbuf *
  751 m_getcl(int how, short type, int flags)
  752 {
  753         struct mbuf *m;
  754         struct mb_args args;
  755 
  756         args.flags = flags;
  757         args.type = type;
  758         m = uma_zalloc_arg(zone_pack, &args, how);
  759         MBUF_PROBE4(m__getcl, how, type, flags, m);
  760         return (m);
  761 }
  762 
  763 /*
  764  * XXX: m_cljset() is a dangerous API.  One must attach only a new,
  765  * unreferenced cluster to an mbuf(9).  It is not possible to assert
  766  * that, so care can be taken only by users of the API.
  767  */
  768 static __inline void
  769 m_cljset(struct mbuf *m, void *cl, int type)
  770 {
  771         int size;
  772 
  773         switch (type) {
  774         case EXT_CLUSTER:
  775                 size = MCLBYTES;
  776                 break;
  777 #if MJUMPAGESIZE != MCLBYTES
  778         case EXT_JUMBOP:
  779                 size = MJUMPAGESIZE;
  780                 break;
  781 #endif
  782         case EXT_JUMBO9:
  783                 size = MJUM9BYTES;
  784                 break;
  785         case EXT_JUMBO16:
  786                 size = MJUM16BYTES;
  787                 break;
  788         default:
  789                 panic("%s: unknown cluster type %d", __func__, type);
  790                 break;
  791         }
  792 
  793         m->m_data = m->m_ext.ext_buf = cl;
  794         m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL;
  795         m->m_ext.ext_size = size;
  796         m->m_ext.ext_type = type;
  797         m->m_ext.ext_flags = EXT_FLAG_EMBREF;
  798         m->m_ext.ext_count = 1;
  799         m->m_flags |= M_EXT;
  800         MBUF_PROBE3(m__cljset, m, cl, type);
  801 }
  802 
  803 static __inline void
  804 m_chtype(struct mbuf *m, short new_type)
  805 {
  806 
  807         m->m_type = new_type;
  808 }
  809 
  810 static __inline void
  811 m_clrprotoflags(struct mbuf *m)
  812 {
  813 
  814         while (m) {
  815                 m->m_flags &= ~M_PROTOFLAGS;
  816                 m = m->m_next;
  817         }
  818 }
  819 
  820 static __inline struct mbuf *
  821 m_last(struct mbuf *m)
  822 {
  823 
  824         while (m->m_next)
  825                 m = m->m_next;
  826         return (m);
  827 }
  828 
  829 static inline u_int
  830 m_extrefcnt(struct mbuf *m)
  831 {
  832 
  833         KASSERT(m->m_flags & M_EXT, ("%s: M_EXT missing", __func__));
  834 
  835         return ((m->m_ext.ext_flags & EXT_FLAG_EMBREF) ? m->m_ext.ext_count :
  836             *m->m_ext.ext_cnt);
  837 }
  838 
  839 /*
  840  * mbuf, cluster, and external object allocation macros (for compatibility
  841  * purposes).
  842  */
  843 #define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from))
  844 #define MGET(m, how, type)      ((m) = m_get((how), (type)))
  845 #define MGETHDR(m, how, type)   ((m) = m_gethdr((how), (type)))
  846 #define MCLGET(m, how)          m_clget((m), (how))
  847 #define MEXTADD(m, buf, size, free, arg1, arg2, flags, type)            \
  848     m_extadd((m), (caddr_t)(buf), (size), (free), (arg1), (arg2),       \
  849     (flags), (type))
  850 #define m_getm(m, len, how, type)                                       \
  851     m_getm2((m), (len), (how), (type), M_PKTHDR)
  852 
  853 /*
  854  * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can
  855  * be both the local data payload, or an external buffer area, depending on
  856  * whether M_EXT is set).
  857  */
  858 #define M_WRITABLE(m)   (!((m)->m_flags & M_RDONLY) &&                  \
  859                          (!(((m)->m_flags & M_EXT)) ||                  \
  860                          (m_extrefcnt(m) == 1)))
  861 
  862 /* Check if the supplied mbuf has a packet header, or else panic. */
  863 #define M_ASSERTPKTHDR(m)                                               \
  864         KASSERT((m) != NULL && (m)->m_flags & M_PKTHDR,                 \
  865             ("%s: no mbuf packet header!", __func__))
  866 
  867 /*
  868  * Ensure that the supplied mbuf is a valid, non-free mbuf.
  869  *
  870  * XXX: Broken at the moment.  Need some UMA magic to make it work again.
  871  */
  872 #define M_ASSERTVALID(m)                                                \
  873         KASSERT((((struct mbuf *)m)->m_flags & 0) == 0,                 \
  874             ("%s: attempted use of a free mbuf!", __func__))
  875 
  876 /*
  877  * Return the address of the start of the buffer associated with an mbuf,
  878  * handling external storage, packet-header mbufs, and regular data mbufs.
  879  */
  880 #define M_START(m)                                                      \
  881         (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf :                  \
  882          ((m)->m_flags & M_PKTHDR) ? &(m)->m_pktdat[0] :                \
  883          &(m)->m_dat[0])
  884 
  885 /*
  886  * Return the size of the buffer associated with an mbuf, handling external
  887  * storage, packet-header mbufs, and regular data mbufs.
  888  */
  889 #define M_SIZE(m)                                                       \
  890         (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size :                 \
  891          ((m)->m_flags & M_PKTHDR) ? MHLEN :                            \
  892          MLEN)
  893 
  894 /*
  895  * Set the m_data pointer of a newly allocated mbuf to place an object of the
  896  * specified size at the end of the mbuf, longword aligned.
  897  *
  898  * NB: Historically, we had M_ALIGN(), MH_ALIGN(), and MEXT_ALIGN() as
  899  * separate macros, each asserting that it was called at the proper moment.
  900  * This required callers to themselves test the storage type and call the
  901  * right one.  Rather than require callers to be aware of those layout
  902  * decisions, we centralize here.
  903  */
  904 static __inline void
  905 m_align(struct mbuf *m, int len)
  906 {
  907 #ifdef INVARIANTS
  908         const char *msg = "%s: not a virgin mbuf";
  909 #endif
  910         int adjust;
  911 
  912         KASSERT(m->m_data == M_START(m), (msg, __func__));
  913 
  914         adjust = M_SIZE(m) - len;
  915         m->m_data += adjust &~ (sizeof(long)-1);
  916 }
  917 
  918 #define M_ALIGN(m, len)         m_align(m, len)
  919 #define MH_ALIGN(m, len)        m_align(m, len)
  920 #define MEXT_ALIGN(m, len)      m_align(m, len)
  921 
  922 /*
  923  * Compute the amount of space available before the current start of data in
  924  * an mbuf.
  925  *
  926  * The M_WRITABLE() is a temporary, conservative safety measure: the burden
  927  * of checking writability of the mbuf data area rests solely with the caller.
  928  *
  929  * NB: In previous versions, M_LEADINGSPACE() would only check M_WRITABLE()
  930  * for mbufs with external storage.  We now allow mbuf-embedded data to be
  931  * read-only as well.
  932  */
  933 #define M_LEADINGSPACE(m)                                               \
  934         (M_WRITABLE(m) ? ((m)->m_data - M_START(m)) : 0)
  935 
  936 /*
  937  * Compute the amount of space available after the end of data in an mbuf.
  938  *
  939  * The M_WRITABLE() is a temporary, conservative safety measure: the burden
  940  * of checking writability of the mbuf data area rests solely with the caller.
  941  *
  942  * NB: In previous versions, M_TRAILINGSPACE() would only check M_WRITABLE()
  943  * for mbufs with external storage.  We now allow mbuf-embedded data to be
  944  * read-only as well.
  945  */
  946 #define M_TRAILINGSPACE(m)                                              \
  947         (M_WRITABLE(m) ?                                                \
  948             ((M_START(m) + M_SIZE(m)) - ((m)->m_data + (m)->m_len)) : 0)
  949 
  950 /*
  951  * Arrange to prepend space of size plen to mbuf m.  If a new mbuf must be
  952  * allocated, how specifies whether to wait.  If the allocation fails, the
  953  * original mbuf chain is freed and m is set to NULL.
  954  */
  955 #define M_PREPEND(m, plen, how) do {                                    \
  956         struct mbuf **_mmp = &(m);                                      \
  957         struct mbuf *_mm = *_mmp;                                       \
  958         int _mplen = (plen);                                            \
  959         int __mhow = (how);                                             \
  960                                                                         \
  961         MBUF_CHECKSLEEP(how);                                           \
  962         if (M_LEADINGSPACE(_mm) >= _mplen) {                            \
  963                 _mm->m_data -= _mplen;                                  \
  964                 _mm->m_len += _mplen;                                   \
  965         } else                                                          \
  966                 _mm = m_prepend(_mm, _mplen, __mhow);                   \
  967         if (_mm != NULL && _mm->m_flags & M_PKTHDR)                     \
  968                 _mm->m_pkthdr.len += _mplen;                            \
  969         *_mmp = _mm;                                                    \
  970 } while (0)
  971 
  972 /*
  973  * Change mbuf to new type.  This is a relatively expensive operation and
  974  * should be avoided.
  975  */
  976 #define MCHTYPE(m, t)   m_chtype((m), (t))
  977 
  978 /* Length to m_copy to copy all. */
  979 #define M_COPYALL       1000000000
  980 
  981 /* Compatibility with 4.3. */
  982 #define m_copy(m, o, l) m_copym((m), (o), (l), M_NOWAIT)
  983 
  984 extern int              max_datalen;    /* MHLEN - max_hdr */
  985 extern int              max_hdr;        /* Largest link + protocol header */
  986 extern int              max_linkhdr;    /* Largest link-level header */
  987 extern int              max_protohdr;   /* Largest protocol header */
  988 extern int              nmbclusters;    /* Maximum number of clusters */
  989 
  990 /*-
  991  * Network packets may have annotations attached by affixing a list of
  992  * "packet tags" to the pkthdr structure.  Packet tags are dynamically
  993  * allocated semi-opaque data structures that have a fixed header
  994  * (struct m_tag) that specifies the size of the memory block and a
  995  * <cookie,type> pair that identifies it.  The cookie is a 32-bit unique
  996  * unsigned value used to identify a module or ABI.  By convention this value
  997  * is chosen as the date+time that the module is created, expressed as the
  998  * number of seconds since the epoch (e.g., using date -u +'%s').  The type
  999  * value is an ABI/module-specific value that identifies a particular
 1000  * annotation and is private to the module.  For compatibility with systems
 1001  * like OpenBSD that define packet tags w/o an ABI/module cookie, the value
 1002  * PACKET_ABI_COMPAT is used to implement m_tag_get and m_tag_find
 1003  * compatibility shim functions and several tag types are defined below.
 1004  * Users that do not require compatibility should use a private cookie value
 1005  * so that packet tag-related definitions can be maintained privately.
 1006  *
 1007  * Note that the packet tag returned by m_tag_alloc has the default memory
 1008  * alignment implemented by malloc.  To reference private data one can use a
 1009  * construct like:
 1010  *
 1011  *      struct m_tag *mtag = m_tag_alloc(...);
 1012  *      struct foo *p = (struct foo *)(mtag+1);
 1013  *
 1014  * if the alignment of struct m_tag is sufficient for referencing members of
 1015  * struct foo.  Otherwise it is necessary to embed struct m_tag within the
 1016  * private data structure to insure proper alignment; e.g.,
 1017  *
 1018  *      struct foo {
 1019  *              struct m_tag    tag;
 1020  *              ...
 1021  *      };
 1022  *      struct foo *p = (struct foo *) m_tag_alloc(...);
 1023  *      struct m_tag *mtag = &p->tag;
 1024  */
 1025 
 1026 /*
 1027  * Persistent tags stay with an mbuf until the mbuf is reclaimed.  Otherwise
 1028  * tags are expected to ``vanish'' when they pass through a network
 1029  * interface.  For most interfaces this happens normally as the tags are
 1030  * reclaimed when the mbuf is free'd.  However in some special cases
 1031  * reclaiming must be done manually.  An example is packets that pass through
 1032  * the loopback interface.  Also, one must be careful to do this when
 1033  * ``turning around'' packets (e.g., icmp_reflect).
 1034  *
 1035  * To mark a tag persistent bit-or this flag in when defining the tag id.
 1036  * The tag will then be treated as described above.
 1037  */
 1038 #define MTAG_PERSISTENT                         0x800
 1039 
 1040 #define PACKET_TAG_NONE                         0  /* Nadda */
 1041 
 1042 /* Packet tags for use with PACKET_ABI_COMPAT. */
 1043 #define PACKET_TAG_IPSEC_IN_DONE                1  /* IPsec applied, in */
 1044 #define PACKET_TAG_IPSEC_OUT_DONE               2  /* IPsec applied, out */
 1045 #define PACKET_TAG_IPSEC_IN_CRYPTO_DONE         3  /* NIC IPsec crypto done */
 1046 #define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED      4  /* NIC IPsec crypto req'ed */
 1047 #define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO     5  /* NIC notifies IPsec */
 1048 #define PACKET_TAG_IPSEC_PENDING_TDB            6  /* Reminder to do IPsec */
 1049 #define PACKET_TAG_BRIDGE                       7  /* Bridge processing done */
 1050 #define PACKET_TAG_GIF                          8  /* GIF processing done */
 1051 #define PACKET_TAG_GRE                          9  /* GRE processing done */
 1052 #define PACKET_TAG_IN_PACKET_CHECKSUM           10 /* NIC checksumming done */
 1053 #define PACKET_TAG_ENCAP                        11 /* Encap.  processing */
 1054 #define PACKET_TAG_IPSEC_SOCKET                 12 /* IPSEC socket ref */
 1055 #define PACKET_TAG_IPSEC_HISTORY                13 /* IPSEC history */
 1056 #define PACKET_TAG_IPV6_INPUT                   14 /* IPV6 input processing */
 1057 #define PACKET_TAG_DUMMYNET                     15 /* dummynet info */
 1058 #define PACKET_TAG_DIVERT                       17 /* divert info */
 1059 #define PACKET_TAG_IPFORWARD                    18 /* ipforward info */
 1060 #define PACKET_TAG_MACLABEL     (19 | MTAG_PERSISTENT) /* MAC label */
 1061 #define PACKET_TAG_PF           (21 | MTAG_PERSISTENT) /* PF/ALTQ information */
 1062 #define PACKET_TAG_RTSOCKFAM                    25 /* rtsock sa family */
 1063 #define PACKET_TAG_IPOPTIONS                    27 /* Saved IP options */
 1064 #define PACKET_TAG_CARP                         28 /* CARP info */
 1065 #define PACKET_TAG_IPSEC_NAT_T_PORTS            29 /* two uint16_t */
 1066 #define PACKET_TAG_ND_OUTGOING                  30 /* ND outgoing */
 1067 
 1068 /* Specific cookies and tags. */
 1069 
 1070 /* Packet tag routines. */
 1071 struct m_tag    *m_tag_alloc(u_int32_t, int, int, int);
 1072 void             m_tag_delete(struct mbuf *, struct m_tag *);
 1073 void             m_tag_delete_chain(struct mbuf *, struct m_tag *);
 1074 void             m_tag_free_default(struct m_tag *);
 1075 struct m_tag    *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *);
 1076 struct m_tag    *m_tag_copy(struct m_tag *, int);
 1077 int              m_tag_copy_chain(struct mbuf *, const struct mbuf *, int);
 1078 void             m_tag_delete_nonpersistent(struct mbuf *);
 1079 
 1080 /*
 1081  * Initialize the list of tags associated with an mbuf.
 1082  */
 1083 static __inline void
 1084 m_tag_init(struct mbuf *m)
 1085 {
 1086 
 1087         SLIST_INIT(&m->m_pkthdr.tags);
 1088 }
 1089 
 1090 /*
 1091  * Set up the contents of a tag.  Note that this does not fill in the free
 1092  * method; the caller is expected to do that.
 1093  *
 1094  * XXX probably should be called m_tag_init, but that was already taken.
 1095  */
 1096 static __inline void
 1097 m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len)
 1098 {
 1099 
 1100         t->m_tag_id = type;
 1101         t->m_tag_len = len;
 1102         t->m_tag_cookie = cookie;
 1103 }
 1104 
 1105 /*
 1106  * Reclaim resources associated with a tag.
 1107  */
 1108 static __inline void
 1109 m_tag_free(struct m_tag *t)
 1110 {
 1111 
 1112         (*t->m_tag_free)(t);
 1113 }
 1114 
 1115 /*
 1116  * Return the first tag associated with an mbuf.
 1117  */
 1118 static __inline struct m_tag *
 1119 m_tag_first(struct mbuf *m)
 1120 {
 1121 
 1122         return (SLIST_FIRST(&m->m_pkthdr.tags));
 1123 }
 1124 
 1125 /*
 1126  * Return the next tag in the list of tags associated with an mbuf.
 1127  */
 1128 static __inline struct m_tag *
 1129 m_tag_next(struct mbuf *m __unused, struct m_tag *t)
 1130 {
 1131 
 1132         return (SLIST_NEXT(t, m_tag_link));
 1133 }
 1134 
 1135 /*
 1136  * Prepend a tag to the list of tags associated with an mbuf.
 1137  */
 1138 static __inline void
 1139 m_tag_prepend(struct mbuf *m, struct m_tag *t)
 1140 {
 1141 
 1142         SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
 1143 }
 1144 
 1145 /*
 1146  * Unlink a tag from the list of tags associated with an mbuf.
 1147  */
 1148 static __inline void
 1149 m_tag_unlink(struct mbuf *m, struct m_tag *t)
 1150 {
 1151 
 1152         SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
 1153 }
 1154 
 1155 /* These are for OpenBSD compatibility. */
 1156 #define MTAG_ABI_COMPAT         0               /* compatibility ABI */
 1157 
 1158 static __inline struct m_tag *
 1159 m_tag_get(int type, int length, int wait)
 1160 {
 1161         return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait));
 1162 }
 1163 
 1164 static __inline struct m_tag *
 1165 m_tag_find(struct mbuf *m, int type, struct m_tag *start)
 1166 {
 1167         return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL :
 1168             m_tag_locate(m, MTAG_ABI_COMPAT, type, start));
 1169 }
 1170 
 1171 static __inline struct mbuf *
 1172 m_free(struct mbuf *m)
 1173 {
 1174         struct mbuf *n = m->m_next;
 1175 
 1176         MBUF_PROBE1(m__free, m);
 1177         if ((m->m_flags & (M_PKTHDR|M_NOFREE)) == (M_PKTHDR|M_NOFREE))
 1178                 m_tag_delete_chain(m, NULL);
 1179         if (m->m_flags & M_EXT)
 1180                 mb_free_ext(m);
 1181         else if ((m->m_flags & M_NOFREE) == 0)
 1182                 uma_zfree(zone_mbuf, m);
 1183         return (n);
 1184 }
 1185 
 1186 static __inline int
 1187 rt_m_getfib(struct mbuf *m)
 1188 {
 1189         KASSERT(m->m_flags & M_PKTHDR , ("Attempt to get FIB from non header mbuf."));
 1190         return (m->m_pkthdr.fibnum);
 1191 }
 1192 
 1193 #define M_GETFIB(_m)   rt_m_getfib(_m)
 1194 
 1195 #define M_SETFIB(_m, _fib) do {                                         \
 1196         KASSERT((_m)->m_flags & M_PKTHDR, ("Attempt to set FIB on non header mbuf."));  \
 1197         ((_m)->m_pkthdr.fibnum) = (_fib);                               \
 1198 } while (0)
 1199 
 1200 /* flags passed as first argument for "m_ether_tcpip_hash()" */
 1201 #define MBUF_HASHFLAG_L2        (1 << 2)
 1202 #define MBUF_HASHFLAG_L3        (1 << 3)
 1203 #define MBUF_HASHFLAG_L4        (1 << 4)
 1204 
 1205 /* mbuf hashing helper routines */
 1206 uint32_t        m_ether_tcpip_hash_init(void);
 1207 uint32_t        m_ether_tcpip_hash(const uint32_t, const struct mbuf *, const uint32_t);
 1208 
 1209 #ifdef MBUF_PROFILING
 1210  void m_profile(struct mbuf *m);
 1211  #define M_PROFILE(m) m_profile(m)
 1212 #else
 1213  #define M_PROFILE(m)
 1214 #endif
 1215 
 1216 struct mbufq {
 1217         STAILQ_HEAD(, mbuf)     mq_head;
 1218         int                     mq_len;
 1219         int                     mq_maxlen;
 1220 };
 1221 
 1222 static inline void
 1223 mbufq_init(struct mbufq *mq, int maxlen)
 1224 {
 1225 
 1226         STAILQ_INIT(&mq->mq_head);
 1227         mq->mq_maxlen = maxlen;
 1228         mq->mq_len = 0;
 1229 }
 1230 
 1231 static inline struct mbuf *
 1232 mbufq_flush(struct mbufq *mq)
 1233 {
 1234         struct mbuf *m;
 1235 
 1236         m = STAILQ_FIRST(&mq->mq_head);
 1237         STAILQ_INIT(&mq->mq_head);
 1238         mq->mq_len = 0;
 1239         return (m);
 1240 }
 1241 
 1242 static inline void
 1243 mbufq_drain(struct mbufq *mq)
 1244 {
 1245         struct mbuf *m, *n;
 1246 
 1247         n = mbufq_flush(mq);
 1248         while ((m = n) != NULL) {
 1249                 n = STAILQ_NEXT(m, m_stailqpkt);
 1250                 m_freem(m);
 1251         }
 1252 }
 1253 
 1254 static inline struct mbuf *
 1255 mbufq_first(const struct mbufq *mq)
 1256 {
 1257 
 1258         return (STAILQ_FIRST(&mq->mq_head));
 1259 }
 1260 
 1261 static inline struct mbuf *
 1262 mbufq_last(const struct mbufq *mq)
 1263 {
 1264 
 1265         return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt));
 1266 }
 1267 
 1268 static inline int
 1269 mbufq_full(const struct mbufq *mq)
 1270 {
 1271 
 1272         return (mq->mq_len >= mq->mq_maxlen);
 1273 }
 1274 
 1275 static inline int
 1276 mbufq_len(const struct mbufq *mq)
 1277 {
 1278 
 1279         return (mq->mq_len);
 1280 }
 1281 
 1282 static inline int
 1283 mbufq_enqueue(struct mbufq *mq, struct mbuf *m)
 1284 {
 1285 
 1286         if (mbufq_full(mq))
 1287                 return (ENOBUFS);
 1288         STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt);
 1289         mq->mq_len++;
 1290         return (0);
 1291 }
 1292 
 1293 static inline struct mbuf *
 1294 mbufq_dequeue(struct mbufq *mq)
 1295 {
 1296         struct mbuf *m;
 1297 
 1298         m = STAILQ_FIRST(&mq->mq_head);
 1299         if (m) {
 1300                 STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt);
 1301                 m->m_nextpkt = NULL;
 1302                 mq->mq_len--;
 1303         }
 1304         return (m);
 1305 }
 1306 
 1307 static inline void
 1308 mbufq_prepend(struct mbufq *mq, struct mbuf *m)
 1309 {
 1310 
 1311         STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt);
 1312         mq->mq_len++;
 1313 }
 1314 #endif /* _KERNEL */
 1315 #endif /* !_SYS_MBUF_H_ */

Cache object: bb534890baf559aee0a7aefb36ec9ce4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.