The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sys/mbuf.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1982, 1986, 1988, 1993
    5  *      The Regents of the University of California.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)mbuf.h      8.5 (Berkeley) 2/19/95
   33  * $FreeBSD$
   34  */
   35 
   36 #ifndef _SYS_MBUF_H_
   37 #define _SYS_MBUF_H_
   38 
   39 /* XXX: These includes suck. Sorry! */
   40 #include <sys/queue.h>
   41 #ifdef _KERNEL
   42 #include <sys/systm.h>
   43 #include <vm/uma.h>
   44 #ifdef WITNESS
   45 #include <sys/lock.h>
   46 #endif
   47 #endif
   48 
   49 #ifdef _KERNEL
   50 #include <sys/sdt.h>
   51 
   52 #define MBUF_PROBE1(probe, arg0)                                        \
   53         SDT_PROBE1(sdt, , , probe, arg0)
   54 #define MBUF_PROBE2(probe, arg0, arg1)                                  \
   55         SDT_PROBE2(sdt, , , probe, arg0, arg1)
   56 #define MBUF_PROBE3(probe, arg0, arg1, arg2)                            \
   57         SDT_PROBE3(sdt, , , probe, arg0, arg1, arg2)
   58 #define MBUF_PROBE4(probe, arg0, arg1, arg2, arg3)                      \
   59         SDT_PROBE4(sdt, , , probe, arg0, arg1, arg2, arg3)
   60 #define MBUF_PROBE5(probe, arg0, arg1, arg2, arg3, arg4)                \
   61         SDT_PROBE5(sdt, , , probe, arg0, arg1, arg2, arg3, arg4)
   62 
   63 SDT_PROBE_DECLARE(sdt, , , m__init);
   64 SDT_PROBE_DECLARE(sdt, , , m__gethdr);
   65 SDT_PROBE_DECLARE(sdt, , , m__get);
   66 SDT_PROBE_DECLARE(sdt, , , m__getcl);
   67 SDT_PROBE_DECLARE(sdt, , , m__getjcl);
   68 SDT_PROBE_DECLARE(sdt, , , m__clget);
   69 SDT_PROBE_DECLARE(sdt, , , m__cljget);
   70 SDT_PROBE_DECLARE(sdt, , , m__cljset);
   71 SDT_PROBE_DECLARE(sdt, , , m__free);
   72 SDT_PROBE_DECLARE(sdt, , , m__freem);
   73 
   74 #endif /* _KERNEL */
   75 
   76 /*
   77  * Mbufs are of a single size, MSIZE (sys/param.h), which includes overhead.
   78  * An mbuf may add a single "mbuf cluster" of size MCLBYTES (also in
   79  * sys/param.h), which has no additional overhead and is used instead of the
   80  * internal data area; this is done when at least MINCLSIZE of data must be
   81  * stored.  Additionally, it is possible to allocate a separate buffer
   82  * externally and attach it to the mbuf in a way similar to that of mbuf
   83  * clusters.
   84  *
   85  * NB: These calculation do not take actual compiler-induced alignment and
   86  * padding inside the complete struct mbuf into account.  Appropriate
   87  * attention is required when changing members of struct mbuf.
   88  *
   89  * MLEN is data length in a normal mbuf.
   90  * MHLEN is data length in an mbuf with pktheader.
   91  * MINCLSIZE is a smallest amount of data that should be put into cluster.
   92  *
   93  * Compile-time assertions in uipc_mbuf.c test these values to ensure that
   94  * they are sensible.
   95  */
   96 struct mbuf;
   97 #define MHSIZE          offsetof(struct mbuf, m_dat)
   98 #define MPKTHSIZE       offsetof(struct mbuf, m_pktdat)
   99 #define MLEN            ((int)(MSIZE - MHSIZE))
  100 #define MHLEN           ((int)(MSIZE - MPKTHSIZE))
  101 #define MINCLSIZE       (MHLEN + 1)
  102 
  103 #ifdef _KERNEL
  104 /*-
  105  * Macro for type conversion: convert mbuf pointer to data pointer of correct
  106  * type:
  107  *
  108  * mtod(m, t)   -- Convert mbuf pointer to data pointer of correct type.
  109  * mtodo(m, o) -- Same as above but with offset 'o' into data.
  110  */
  111 #define mtod(m, t)      ((t)((m)->m_data))
  112 #define mtodo(m, o)     ((void *)(((m)->m_data) + (o)))
  113 
  114 /*
  115  * Argument structure passed to UMA routines during mbuf and packet
  116  * allocations.
  117  */
  118 struct mb_args {
  119         int     flags;  /* Flags for mbuf being allocated */
  120         short   type;   /* Type of mbuf being allocated */
  121 };
  122 #endif /* _KERNEL */
  123 
  124 /*
  125  * Packet tag structure (see below for details).
  126  */
  127 struct m_tag {
  128         SLIST_ENTRY(m_tag)      m_tag_link;     /* List of packet tags */
  129         u_int16_t               m_tag_id;       /* Tag ID */
  130         u_int16_t               m_tag_len;      /* Length of data */
  131         u_int32_t               m_tag_cookie;   /* ABI/Module ID */
  132         void                    (*m_tag_free)(struct m_tag *);
  133 };
  134 
  135 /*
  136  * Static network interface owned tag.
  137  * Allocated through ifp->if_snd_tag_alloc().
  138  */
  139 struct m_snd_tag {
  140         struct ifnet *ifp;              /* network interface tag belongs to */
  141 };
  142 
  143 /*
  144  * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set.
  145  * Size ILP32: 48
  146  *       LP64: 56
  147  * Compile-time assertions in uipc_mbuf.c test these values to ensure that
  148  * they are correct.
  149  */
  150 struct pkthdr {
  151         union {
  152                 struct m_snd_tag *snd_tag;      /* send tag, if any */
  153                 struct ifnet    *rcvif;         /* rcv interface */
  154         };
  155         SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */
  156         int32_t          len;           /* total packet length */
  157 
  158         /* Layer crossing persistent information. */
  159         uint32_t         flowid;        /* packet's 4-tuple system */
  160         uint32_t         csum_flags;    /* checksum and offload features */
  161         uint16_t         fibnum;        /* this packet should use this fib */
  162         uint8_t          cosqos;        /* class/quality of service */
  163         uint8_t          rsstype;       /* hash type */
  164         union {
  165                 uint64_t        rcv_tstmp;      /* timestamp in ns */
  166                 struct {
  167                         uint8_t          l2hlen;        /* layer 2 hdr len */
  168                         uint8_t          l3hlen;        /* layer 3 hdr len */
  169                         uint8_t          l4hlen;        /* layer 4 hdr len */
  170                         uint8_t          l5hlen;        /* layer 5 hdr len */
  171                         uint8_t          inner_l2hlen;
  172                         uint8_t          inner_l3hlen;
  173                         uint8_t          inner_l4hlen;
  174                         uint8_t          inner_l5hlen;
  175                 };
  176         };
  177         union {
  178                 uint8_t  eight[8];
  179                 uint16_t sixteen[4];
  180                 uint32_t thirtytwo[2];
  181                 uint64_t sixtyfour[1];
  182                 uintptr_t unintptr[1];
  183                 void    *ptr;
  184         } PH_per;
  185 
  186         /* Layer specific non-persistent local storage for reassembly, etc. */
  187         union {
  188                 uint8_t  eight[8];
  189                 uint16_t sixteen[4];
  190                 uint32_t thirtytwo[2];
  191                 uint64_t sixtyfour[1];
  192                 uintptr_t unintptr[1];
  193                 void    *ptr;
  194         } PH_loc;
  195 };
  196 #define ether_vtag      PH_per.sixteen[0]
  197 #define PH_vt           PH_per
  198 #define vt_nrecs        sixteen[0]
  199 #define tso_segsz       PH_per.sixteen[1]
  200 #define lro_nsegs       tso_segsz
  201 #define csum_phsum      PH_per.sixteen[2]
  202 #define csum_data       PH_per.thirtytwo[1]
  203 #define pace_thoff      PH_loc.sixteen[0]
  204 #define pace_tlen       PH_loc.sixteen[1]
  205 #define pace_drphdrlen  PH_loc.sixteen[2]
  206 #define pace_tos        PH_loc.eight[6]
  207 #define pace_lock       PH_loc.eight[7]
  208 
  209 /*
  210  * Description of external storage mapped into mbuf; valid only if M_EXT is
  211  * set.
  212  * Size ILP32: 28
  213  *       LP64: 48
  214  * Compile-time assertions in uipc_mbuf.c test these values to ensure that
  215  * they are correct.
  216  */
  217 typedef void m_ext_free_t(struct mbuf *);
  218 struct m_ext {
  219         union {
  220                 /*
  221                  * If EXT_FLAG_EMBREF is set, then we use refcount in the
  222                  * mbuf, the 'ext_count' member.  Otherwise, we have a
  223                  * shadow copy and we use pointer 'ext_cnt'.  The original
  224                  * mbuf is responsible to carry the pointer to free routine
  225                  * and its arguments.  They aren't copied into shadows in
  226                  * mb_dupcl() to avoid dereferencing next cachelines.
  227                  */
  228                 volatile u_int   ext_count;
  229                 volatile u_int  *ext_cnt;
  230         };
  231         char            *ext_buf;       /* start of buffer */
  232         uint32_t         ext_size;      /* size of buffer, for ext_free */
  233         uint32_t         ext_type:8,    /* type of external storage */
  234                          ext_flags:24;  /* external storage mbuf flags */
  235         /*
  236          * Fields below store the free context for the external storage.
  237          * They are valid only in the refcount carrying mbuf, the one with
  238          * EXT_FLAG_EMBREF flag, with exclusion for EXT_EXTREF type, where
  239          * the free context is copied into all mbufs that use same external
  240          * storage.
  241          */
  242 #define m_ext_copylen   offsetof(struct m_ext, ext_free)
  243         m_ext_free_t    *ext_free;      /* free routine if not the usual */
  244         void            *ext_arg1;      /* optional argument pointer */
  245         void            *ext_arg2;      /* optional argument pointer */
  246 };
  247 
  248 /*
  249  * The core of the mbuf object along with some shortcut defines for practical
  250  * purposes.
  251  */
  252 struct mbuf {
  253         /*
  254          * Header present at the beginning of every mbuf.
  255          * Size ILP32: 24
  256          *      LP64: 32
  257          * Compile-time assertions in uipc_mbuf.c test these values to ensure
  258          * that they are correct.
  259          */
  260         union { /* next buffer in chain */
  261                 struct mbuf             *m_next;
  262                 SLIST_ENTRY(mbuf)       m_slist;
  263                 STAILQ_ENTRY(mbuf)      m_stailq;
  264         };
  265         union { /* next chain in queue/record */
  266                 struct mbuf             *m_nextpkt;
  267                 SLIST_ENTRY(mbuf)       m_slistpkt;
  268                 STAILQ_ENTRY(mbuf)      m_stailqpkt;
  269         };
  270         caddr_t          m_data;        /* location of data */
  271         int32_t          m_len;         /* amount of data in this mbuf */
  272         uint32_t         m_type:8,      /* type of data in this mbuf */
  273                          m_flags:24;    /* flags; see below */
  274 #if !defined(__LP64__)
  275         uint32_t         m_pad;         /* pad for 64bit alignment */
  276 #endif
  277 
  278         /*
  279          * A set of optional headers (packet header, external storage header)
  280          * and internal data storage.  Historically, these arrays were sized
  281          * to MHLEN (space left after a packet header) and MLEN (space left
  282          * after only a regular mbuf header); they are now variable size in
  283          * order to support future work on variable-size mbufs.
  284          */
  285         union {
  286                 struct {
  287                         struct pkthdr   m_pkthdr;       /* M_PKTHDR set */
  288                         union {
  289                                 struct m_ext    m_ext;  /* M_EXT set */
  290                                 char            m_pktdat[0];
  291                         };
  292                 };
  293                 char    m_dat[0];                       /* !M_PKTHDR, !M_EXT */
  294         };
  295 };
  296 
  297 /*
  298  * mbuf flags of global significance and layer crossing.
  299  * Those of only protocol/layer specific significance are to be mapped
  300  * to M_PROTO[1-12] and cleared at layer handoff boundaries.
  301  * NB: Limited to the lower 24 bits.
  302  */
  303 #define M_EXT           0x00000001 /* has associated external storage */
  304 #define M_PKTHDR        0x00000002 /* start of record */
  305 #define M_EOR           0x00000004 /* end of record */
  306 #define M_RDONLY        0x00000008 /* associated data is marked read-only */
  307 #define M_BCAST         0x00000010 /* send/received as link-level broadcast */
  308 #define M_MCAST         0x00000020 /* send/received as link-level multicast */
  309 #define M_PROMISC       0x00000040 /* packet was not for us */
  310 #define M_VLANTAG       0x00000080 /* ether_vtag is valid */
  311 #define M_NOMAP         0x00000100 /* mbuf data is unmapped (soon from Drew) */
  312 #define M_NOFREE        0x00000200 /* do not free mbuf, embedded in cluster */
  313 #define M_TSTMP         0x00000400 /* rcv_tstmp field is valid */
  314 #define M_TSTMP_HPREC   0x00000800 /* rcv_tstmp is high-prec, typically
  315                                       hw-stamped on port (useful for IEEE 1588
  316                                       and 802.1AS) */
  317 #define M_TSTMP_LRO     0x00001000 /* Time LRO pushed in pkt is valid in (PH_loc) */
  318 
  319 #define M_PROTO1        0x00001000 /* protocol-specific */
  320 #define M_PROTO2        0x00002000 /* protocol-specific */
  321 #define M_PROTO3        0x00004000 /* protocol-specific */
  322 #define M_PROTO4        0x00008000 /* protocol-specific */
  323 #define M_PROTO5        0x00010000 /* protocol-specific */
  324 #define M_PROTO6        0x00020000 /* protocol-specific */
  325 #define M_PROTO7        0x00040000 /* protocol-specific */
  326 #define M_PROTO8        0x00080000 /* protocol-specific */
  327 #define M_PROTO9        0x00100000 /* protocol-specific */
  328 #define M_PROTO10       0x00200000 /* protocol-specific */
  329 #define M_PROTO11       0x00400000 /* protocol-specific */
  330 #define M_PROTO12       0x00800000 /* protocol-specific */
  331 
  332 #define MB_DTOR_SKIP    0x1     /* don't pollute the cache by touching a freed mbuf */
  333 
  334 /*
  335  * Flags to purge when crossing layers.
  336  */
  337 #define M_PROTOFLAGS \
  338     (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO4|M_PROTO5|M_PROTO6|M_PROTO7|M_PROTO8|\
  339      M_PROTO9|M_PROTO10|M_PROTO11|M_PROTO12)
  340 
  341 /*
  342  * Flags preserved when copying m_pkthdr.
  343  */
  344 #define M_COPYFLAGS \
  345     (M_PKTHDR|M_EOR|M_RDONLY|M_BCAST|M_MCAST|M_PROMISC|M_VLANTAG|M_TSTMP| \
  346      M_TSTMP_HPREC|M_PROTOFLAGS)
  347 
  348 /*
  349  * Mbuf flag description for use with printf(9) %b identifier.
  350  */
  351 #define M_FLAG_BITS \
  352     "\2\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY\5M_BCAST\6M_MCAST" \
  353     "\7M_PROMISC\10M_VLANTAG\13M_TSTMP\14M_TSTMP_HPREC"
  354 #define M_FLAG_PROTOBITS \
  355     "\15M_PROTO1\16M_PROTO2\17M_PROTO3\20M_PROTO4\21M_PROTO5" \
  356     "\22M_PROTO6\23M_PROTO7\24M_PROTO8\25M_PROTO9\26M_PROTO10" \
  357     "\27M_PROTO11\30M_PROTO12"
  358 #define M_FLAG_PRINTF (M_FLAG_BITS M_FLAG_PROTOBITS)
  359 
  360 /*
  361  * Network interface cards are able to hash protocol fields (such as IPv4
  362  * addresses and TCP port numbers) classify packets into flows.  These flows
  363  * can then be used to maintain ordering while delivering packets to the OS
  364  * via parallel input queues, as well as to provide a stateless affinity
  365  * model.  NIC drivers can pass up the hash via m->m_pkthdr.flowid, and set
  366  * m_flag fields to indicate how the hash should be interpreted by the
  367  * network stack.
  368  *
  369  * Most NICs support RSS, which provides ordering and explicit affinity, and
  370  * use the hash m_flag bits to indicate what header fields were covered by
  371  * the hash.  M_HASHTYPE_OPAQUE and M_HASHTYPE_OPAQUE_HASH can be set by non-
  372  * RSS cards or configurations that provide an opaque flow identifier, allowing
  373  * for ordering and distribution without explicit affinity.  Additionally,
  374  * M_HASHTYPE_OPAQUE_HASH indicates that the flow identifier has hash
  375  * properties.
  376  *
  377  * The meaning of the IPV6_EX suffix:
  378  * "o  Home address from the home address option in the IPv6 destination
  379  *     options header.  If the extension header is not present, use the Source
  380  *     IPv6 Address.
  381  *  o  IPv6 address that is contained in the Routing-Header-Type-2 from the
  382  *     associated extension header.  If the extension header is not present,
  383  *     use the Destination IPv6 Address."
  384  * Quoted from:
  385  * https://docs.microsoft.com/en-us/windows-hardware/drivers/network/rss-hashing-types#ndishashipv6ex
  386  */
  387 #define M_HASHTYPE_HASHPROP             0x80    /* has hash properties */
  388 #define M_HASHTYPE_INNER                0x40    /* calculated from inner headers */
  389 #define M_HASHTYPE_HASH(t)              (M_HASHTYPE_HASHPROP | (t))
  390 /* Microsoft RSS standard hash types */
  391 #define M_HASHTYPE_NONE                 0
  392 #define M_HASHTYPE_RSS_IPV4             M_HASHTYPE_HASH(1) /* IPv4 2-tuple */
  393 #define M_HASHTYPE_RSS_TCP_IPV4         M_HASHTYPE_HASH(2) /* TCPv4 4-tuple */
  394 #define M_HASHTYPE_RSS_IPV6             M_HASHTYPE_HASH(3) /* IPv6 2-tuple */
  395 #define M_HASHTYPE_RSS_TCP_IPV6         M_HASHTYPE_HASH(4) /* TCPv6 4-tuple */
  396 #define M_HASHTYPE_RSS_IPV6_EX          M_HASHTYPE_HASH(5) /* IPv6 2-tuple +
  397                                                             * ext hdrs */
  398 #define M_HASHTYPE_RSS_TCP_IPV6_EX      M_HASHTYPE_HASH(6) /* TCPv6 4-tuple +
  399                                                             * ext hdrs */
  400 #define M_HASHTYPE_RSS_UDP_IPV4         M_HASHTYPE_HASH(7) /* IPv4 UDP 4-tuple*/
  401 #define M_HASHTYPE_RSS_UDP_IPV6         M_HASHTYPE_HASH(9) /* IPv6 UDP 4-tuple*/
  402 #define M_HASHTYPE_RSS_UDP_IPV6_EX      M_HASHTYPE_HASH(10)/* IPv6 UDP 4-tuple +
  403                                                             * ext hdrs */
  404 
  405 #define M_HASHTYPE_OPAQUE               0x3f    /* ordering, not affinity */
  406 #define M_HASHTYPE_OPAQUE_HASH          M_HASHTYPE_HASH(M_HASHTYPE_OPAQUE)
  407                                                 /* ordering+hash, not affinity*/
  408 
  409 #define M_HASHTYPE_CLEAR(m)     ((m)->m_pkthdr.rsstype = 0)
  410 #define M_HASHTYPE_GET(m)       ((m)->m_pkthdr.rsstype & ~M_HASHTYPE_INNER)
  411 #define M_HASHTYPE_SET(m, v)    ((m)->m_pkthdr.rsstype = (v))
  412 #define M_HASHTYPE_TEST(m, v)   (M_HASHTYPE_GET(m) == (v))
  413 #define M_HASHTYPE_ISHASH(m)    \
  414     (((m)->m_pkthdr.rsstype & M_HASHTYPE_HASHPROP) != 0)
  415 #define M_HASHTYPE_SETINNER(m)  do {                    \
  416         (m)->m_pkthdr.rsstype |= M_HASHTYPE_INNER;      \
  417     } while (0)
  418 
  419 /*
  420  * COS/QOS class and quality of service tags.
  421  * It uses DSCP code points as base.
  422  */
  423 #define QOS_DSCP_CS0            0x00
  424 #define QOS_DSCP_DEF            QOS_DSCP_CS0
  425 #define QOS_DSCP_CS1            0x20
  426 #define QOS_DSCP_AF11           0x28
  427 #define QOS_DSCP_AF12           0x30
  428 #define QOS_DSCP_AF13           0x38
  429 #define QOS_DSCP_CS2            0x40
  430 #define QOS_DSCP_AF21           0x48
  431 #define QOS_DSCP_AF22           0x50
  432 #define QOS_DSCP_AF23           0x58
  433 #define QOS_DSCP_CS3            0x60
  434 #define QOS_DSCP_AF31           0x68
  435 #define QOS_DSCP_AF32           0x70
  436 #define QOS_DSCP_AF33           0x78
  437 #define QOS_DSCP_CS4            0x80
  438 #define QOS_DSCP_AF41           0x88
  439 #define QOS_DSCP_AF42           0x90
  440 #define QOS_DSCP_AF43           0x98
  441 #define QOS_DSCP_CS5            0xa0
  442 #define QOS_DSCP_EF             0xb8
  443 #define QOS_DSCP_CS6            0xc0
  444 #define QOS_DSCP_CS7            0xe0
  445 
  446 /*
  447  * External mbuf storage buffer types.
  448  */
  449 #define EXT_CLUSTER     1       /* mbuf cluster */
  450 #define EXT_SFBUF       2       /* sendfile(2)'s sf_buf */
  451 #define EXT_JUMBOP      3       /* jumbo cluster page sized */
  452 #define EXT_JUMBO9      4       /* jumbo cluster 9216 bytes */
  453 #define EXT_JUMBO16     5       /* jumbo cluster 16184 bytes */
  454 #define EXT_PACKET      6       /* mbuf+cluster from packet zone */
  455 #define EXT_MBUF        7       /* external mbuf reference */
  456 
  457 #define EXT_VENDOR1     224     /* for vendor-internal use */
  458 #define EXT_VENDOR2     225     /* for vendor-internal use */
  459 #define EXT_VENDOR3     226     /* for vendor-internal use */
  460 #define EXT_VENDOR4     227     /* for vendor-internal use */
  461 
  462 #define EXT_EXP1        244     /* for experimental use */
  463 #define EXT_EXP2        245     /* for experimental use */
  464 #define EXT_EXP3        246     /* for experimental use */
  465 #define EXT_EXP4        247     /* for experimental use */
  466 
  467 #define EXT_NET_DRV     252     /* custom ext_buf provided by net driver(s) */
  468 #define EXT_MOD_TYPE    253     /* custom module's ext_buf type */
  469 #define EXT_DISPOSABLE  254     /* can throw this buffer away w/page flipping */
  470 #define EXT_EXTREF      255     /* has externally maintained ext_cnt ptr */
  471 
  472 /*
  473  * Flags for external mbuf buffer types.
  474  * NB: limited to the lower 24 bits.
  475  */
  476 #define EXT_FLAG_EMBREF         0x000001        /* embedded ext_count */
  477 #define EXT_FLAG_EXTREF         0x000002        /* external ext_cnt, notyet */
  478 
  479 #define EXT_FLAG_NOFREE         0x000010        /* don't free mbuf to pool, notyet */
  480 
  481 #define EXT_FLAG_VENDOR1        0x010000        /* These flags are vendor */
  482 #define EXT_FLAG_VENDOR2        0x020000        /* or submodule specific, */
  483 #define EXT_FLAG_VENDOR3        0x040000        /* not used by mbuf code. */
  484 #define EXT_FLAG_VENDOR4        0x080000        /* Set/read by submodule. */
  485 
  486 #define EXT_FLAG_EXP1           0x100000        /* for experimental use */
  487 #define EXT_FLAG_EXP2           0x200000        /* for experimental use */
  488 #define EXT_FLAG_EXP3           0x400000        /* for experimental use */
  489 #define EXT_FLAG_EXP4           0x800000        /* for experimental use */
  490 
  491 /*
  492  * EXT flag description for use with printf(9) %b identifier.
  493  */
  494 #define EXT_FLAG_BITS \
  495     "\2\1EXT_FLAG_EMBREF\2EXT_FLAG_EXTREF\5EXT_FLAG_NOFREE" \
  496     "\21EXT_FLAG_VENDOR1\22EXT_FLAG_VENDOR2\23EXT_FLAG_VENDOR3" \
  497     "\24EXT_FLAG_VENDOR4\25EXT_FLAG_EXP1\26EXT_FLAG_EXP2\27EXT_FLAG_EXP3" \
  498     "\30EXT_FLAG_EXP4"
  499 
  500 /*
  501  * Flags indicating checksum, segmentation and other offload work to be
  502  * done, or already done, by hardware or lower layers.  It is split into
  503  * separate inbound and outbound flags.
  504  *
  505  * Outbound flags that are set by upper protocol layers requesting lower
  506  * layers, or ideally the hardware, to perform these offloading tasks.
  507  * For outbound packets this field and its flags can be directly tested
  508  * against ifnet if_hwassist.  Note that the outbound and the inbound flags do
  509  * not collide right now but they could be allowed to (as long as the flags are
  510  * scrubbed appropriately when the direction of an mbuf changes).  CSUM_BITS
  511  * would also have to split into CSUM_BITS_TX and CSUM_BITS_RX.
  512  *
  513  * CSUM_INNER_<x> is the same as CSUM_<x> but it applies to the inner frame.
  514  * The CSUM_ENCAP_<x> bits identify the outer encapsulation.
  515  */
  516 #define CSUM_IP                 0x00000001      /* IP header checksum offload */
  517 #define CSUM_IP_UDP             0x00000002      /* UDP checksum offload */
  518 #define CSUM_IP_TCP             0x00000004      /* TCP checksum offload */
  519 #define CSUM_IP_SCTP            0x00000008      /* SCTP checksum offload */
  520 #define CSUM_IP_TSO             0x00000010      /* TCP segmentation offload */
  521 #define CSUM_IP_ISCSI           0x00000020      /* iSCSI checksum offload */
  522 
  523 #define CSUM_INNER_IP6_UDP      0x00000040
  524 #define CSUM_INNER_IP6_TCP      0x00000080
  525 #define CSUM_INNER_IP6_TSO      0x00000100
  526 #define CSUM_IP6_UDP            0x00000200      /* UDP checksum offload */
  527 #define CSUM_IP6_TCP            0x00000400      /* TCP checksum offload */
  528 #define CSUM_IP6_SCTP           0x00000800      /* SCTP checksum offload */
  529 #define CSUM_IP6_TSO            0x00001000      /* TCP segmentation offload */
  530 #define CSUM_IP6_ISCSI          0x00002000      /* iSCSI checksum offload */
  531 
  532 #define CSUM_INNER_IP           0x00004000
  533 #define CSUM_INNER_IP_UDP       0x00008000
  534 #define CSUM_INNER_IP_TCP       0x00010000
  535 #define CSUM_INNER_IP_TSO       0x00020000
  536 
  537 #define CSUM_ENCAP_VXLAN        0x00040000      /* VXLAN outer encapsulation */
  538 #define CSUM_ENCAP_RSVD1        0x00080000
  539 
  540 /* Inbound checksum support where the checksum was verified by hardware. */
  541 #define CSUM_INNER_L3_CALC      0x00100000
  542 #define CSUM_INNER_L3_VALID     0x00200000
  543 #define CSUM_INNER_L4_CALC      0x00400000
  544 #define CSUM_INNER_L4_VALID     0x00800000
  545 #define CSUM_L3_CALC            0x01000000      /* calculated layer 3 csum */
  546 #define CSUM_L3_VALID           0x02000000      /* checksum is correct */
  547 #define CSUM_L4_CALC            0x04000000      /* calculated layer 4 csum */
  548 #define CSUM_L4_VALID           0x08000000      /* checksum is correct */
  549 #define CSUM_L5_CALC            0x10000000      /* calculated layer 5 csum */
  550 #define CSUM_L5_VALID           0x20000000      /* checksum is correct */
  551 #define CSUM_COALESCED          0x40000000      /* contains merged segments */
  552 
  553 #define CSUM_SND_TAG            0x80000000      /* Packet header has send tag */
  554 
  555 #define CSUM_FLAGS_TX (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP_SCTP | \
  556     CSUM_IP_TSO | CSUM_IP_ISCSI | CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP | \
  557     CSUM_INNER_IP6_TSO | CSUM_IP6_UDP | CSUM_IP6_TCP | CSUM_IP6_SCTP | \
  558     CSUM_IP6_TSO | CSUM_IP6_ISCSI | CSUM_INNER_IP | CSUM_INNER_IP_UDP | \
  559     CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN | \
  560     CSUM_ENCAP_RSVD1 | CSUM_SND_TAG)
  561 
  562 #define CSUM_FLAGS_RX (CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | \
  563     CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID | CSUM_L3_CALC | CSUM_L3_VALID | \
  564     CSUM_L4_CALC | CSUM_L4_VALID | CSUM_L5_CALC | CSUM_L5_VALID | \
  565     CSUM_COALESCED)
  566 
  567 /*
  568  * CSUM flag description for use with printf(9) %b identifier.
  569  */
  570 #define CSUM_BITS \
  571     "\2\1CSUM_IP\2CSUM_IP_UDP\3CSUM_IP_TCP\4CSUM_IP_SCTP\5CSUM_IP_TSO" \
  572     "\6CSUM_IP_ISCSI\7CSUM_INNER_IP6_UDP\10CSUM_INNER_IP6_TCP" \
  573     "\11CSUM_INNER_IP6_TSO\12CSUM_IP6_UDP\13CSUM_IP6_TCP\14CSUM_IP6_SCTP" \
  574     "\15CSUM_IP6_TSO\16CSUM_IP6_ISCSI\17CSUM_INNER_IP\20CSUM_INNER_IP_UDP" \
  575     "\21CSUM_INNER_IP_TCP\22CSUM_INNER_IP_TSO\23CSUM_ENCAP_VXLAN" \
  576     "\24CSUM_ENCAP_RSVD1\25CSUM_INNER_L3_CALC\26CSUM_INNER_L3_VALID" \
  577     "\27CSUM_INNER_L4_CALC\30CSUM_INNER_L4_VALID\31CSUM_L3_CALC" \
  578     "\32CSUM_L3_VALID\33CSUM_L4_CALC\34CSUM_L4_VALID\35CSUM_L5_CALC" \
  579     "\36CSUM_L5_VALID\37CSUM_COALESCED\40CSUM_SND_TAG"
  580 
  581 /* CSUM flags compatibility mappings. */
  582 #define CSUM_IP_CHECKED         CSUM_L3_CALC
  583 #define CSUM_IP_VALID           CSUM_L3_VALID
  584 #define CSUM_DATA_VALID         CSUM_L4_VALID
  585 #define CSUM_PSEUDO_HDR         CSUM_L4_CALC
  586 #define CSUM_SCTP_VALID         CSUM_L4_VALID
  587 #define CSUM_DELAY_DATA         (CSUM_TCP|CSUM_UDP)
  588 #define CSUM_DELAY_IP           CSUM_IP         /* Only v4, no v6 IP hdr csum */
  589 #define CSUM_DELAY_DATA_IPV6    (CSUM_TCP_IPV6|CSUM_UDP_IPV6)
  590 #define CSUM_DATA_VALID_IPV6    CSUM_DATA_VALID
  591 #define CSUM_TCP                CSUM_IP_TCP
  592 #define CSUM_UDP                CSUM_IP_UDP
  593 #define CSUM_SCTP               CSUM_IP_SCTP
  594 #define CSUM_TSO                (CSUM_IP_TSO|CSUM_IP6_TSO)
  595 #define CSUM_INNER_TSO          (CSUM_INNER_IP_TSO|CSUM_INNER_IP6_TSO)
  596 #define CSUM_UDP_IPV6           CSUM_IP6_UDP
  597 #define CSUM_TCP_IPV6           CSUM_IP6_TCP
  598 #define CSUM_SCTP_IPV6          CSUM_IP6_SCTP
  599 
  600 /*
  601  * mbuf types describing the content of the mbuf (including external storage).
  602  */
  603 #define MT_NOTMBUF      0       /* USED INTERNALLY ONLY! Object is not mbuf */
  604 #define MT_DATA         1       /* dynamic (data) allocation */
  605 #define MT_HEADER       MT_DATA /* packet header, use M_PKTHDR instead */
  606 
  607 #define MT_VENDOR1      4       /* for vendor-internal use */
  608 #define MT_VENDOR2      5       /* for vendor-internal use */
  609 #define MT_VENDOR3      6       /* for vendor-internal use */
  610 #define MT_VENDOR4      7       /* for vendor-internal use */
  611 
  612 #define MT_SONAME       8       /* socket name */
  613 
  614 #define MT_EXP1         9       /* for experimental use */
  615 #define MT_EXP2         10      /* for experimental use */
  616 #define MT_EXP3         11      /* for experimental use */
  617 #define MT_EXP4         12      /* for experimental use */
  618 
  619 #define MT_CONTROL      14      /* extra-data protocol message */
  620 #define MT_EXTCONTROL   15      /* control message with externalized contents */
  621 #define MT_OOBDATA      16      /* expedited data  */
  622 
  623 #define MT_NOINIT       255     /* Not a type but a flag to allocate
  624                                    a non-initialized mbuf */
  625 
  626 /*
  627  * String names of mbuf-related UMA(9) and malloc(9) types.  Exposed to
  628  * !_KERNEL so that monitoring tools can look up the zones with
  629  * libmemstat(3).
  630  */
  631 #define MBUF_MEM_NAME           "mbuf"
  632 #define MBUF_CLUSTER_MEM_NAME   "mbuf_cluster"
  633 #define MBUF_PACKET_MEM_NAME    "mbuf_packet"
  634 #define MBUF_JUMBOP_MEM_NAME    "mbuf_jumbo_page"
  635 #define MBUF_JUMBO9_MEM_NAME    "mbuf_jumbo_9k"
  636 #define MBUF_JUMBO16_MEM_NAME   "mbuf_jumbo_16k"
  637 #define MBUF_TAG_MEM_NAME       "mbuf_tag"
  638 #define MBUF_EXTREFCNT_MEM_NAME "mbuf_ext_refcnt"
  639 
  640 #ifdef _KERNEL
  641 
  642 #ifdef WITNESS
  643 #define MBUF_CHECKSLEEP(how) do {                                       \
  644         if (how == M_WAITOK)                                            \
  645                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,         \
  646                     "Sleeping in \"%s\"", __func__);                    \
  647 } while (0)
  648 #else
  649 #define MBUF_CHECKSLEEP(how)
  650 #endif
  651 
  652 /*
  653  * Network buffer allocation API
  654  *
  655  * The rest of it is defined in kern/kern_mbuf.c
  656  */
  657 extern uma_zone_t       zone_mbuf;
  658 extern uma_zone_t       zone_clust;
  659 extern uma_zone_t       zone_pack;
  660 extern uma_zone_t       zone_jumbop;
  661 extern uma_zone_t       zone_jumbo9;
  662 extern uma_zone_t       zone_jumbo16;
  663 
  664 void             mb_dupcl(struct mbuf *, struct mbuf *);
  665 void             mb_free_ext(struct mbuf *);
  666 void             m_adj(struct mbuf *, int);
  667 void             m_adj_decap(struct mbuf *, int);
  668 int              m_apply(struct mbuf *, int, int,
  669                     int (*)(void *, void *, u_int), void *);
  670 int              m_append(struct mbuf *, int, c_caddr_t);
  671 void             m_cat(struct mbuf *, struct mbuf *);
  672 void             m_catpkt(struct mbuf *, struct mbuf *);
  673 int              m_clget(struct mbuf *m, int how);
  674 void            *m_cljget(struct mbuf *m, int how, int size);
  675 struct mbuf     *m_collapse(struct mbuf *, int, int);
  676 void             m_copyback(struct mbuf *, int, int, c_caddr_t);
  677 void             m_copydata(const struct mbuf *, int, int, caddr_t);
  678 struct mbuf     *m_copym(struct mbuf *, int, int, int);
  679 struct mbuf     *m_copypacket(struct mbuf *, int);
  680 void             m_copy_pkthdr(struct mbuf *, struct mbuf *);
  681 struct mbuf     *m_copyup(struct mbuf *, int, int);
  682 struct mbuf     *m_defrag(struct mbuf *, int);
  683 void             m_demote_pkthdr(struct mbuf *);
  684 void             m_demote(struct mbuf *, int, int);
  685 struct mbuf     *m_devget(char *, int, int, struct ifnet *,
  686                     void (*)(char *, caddr_t, u_int));
  687 void             m_dispose_extcontrolm(struct mbuf *m);
  688 struct mbuf     *m_dup(const struct mbuf *, int);
  689 int              m_dup_pkthdr(struct mbuf *, const struct mbuf *, int);
  690 void             m_extadd(struct mbuf *, char *, u_int, m_ext_free_t,
  691                     void *, void *, int, int);
  692 u_int            m_fixhdr(struct mbuf *);
  693 struct mbuf     *m_fragment(struct mbuf *, int, int);
  694 void             m_freem(struct mbuf *);
  695 struct mbuf     *m_get2(int, int, short, int);
  696 struct mbuf     *m_getjcl(int, short, int, int);
  697 struct mbuf     *m_getm2(struct mbuf *, int, int, short, int);
  698 struct mbuf     *m_getptr(struct mbuf *, int, int *);
  699 u_int            m_length(struct mbuf *, struct mbuf **);
  700 int              m_mbuftouio(struct uio *, const struct mbuf *, int);
  701 void             m_move_pkthdr(struct mbuf *, struct mbuf *);
  702 int              m_pkthdr_init(struct mbuf *, int);
  703 struct mbuf     *m_prepend(struct mbuf *, int, int);
  704 void             m_print(const struct mbuf *, int);
  705 struct mbuf     *m_pulldown(struct mbuf *, int, int, int *);
  706 struct mbuf     *m_pullup(struct mbuf *, int);
  707 int              m_sanity(struct mbuf *, int);
  708 struct mbuf     *m_split(struct mbuf *, int, int);
  709 struct mbuf     *m_uiotombuf(struct uio *, int, int, int, int);
  710 struct mbuf     *m_unshare(struct mbuf *, int);
  711 
  712 static __inline int
  713 m_gettype(int size)
  714 {
  715         int type;
  716 
  717         switch (size) {
  718         case MSIZE:
  719                 type = EXT_MBUF;
  720                 break;
  721         case MCLBYTES:
  722                 type = EXT_CLUSTER;
  723                 break;
  724 #if MJUMPAGESIZE != MCLBYTES
  725         case MJUMPAGESIZE:
  726                 type = EXT_JUMBOP;
  727                 break;
  728 #endif
  729         case MJUM9BYTES:
  730                 type = EXT_JUMBO9;
  731                 break;
  732         case MJUM16BYTES:
  733                 type = EXT_JUMBO16;
  734                 break;
  735         default:
  736                 panic("%s: invalid cluster size %d", __func__, size);
  737         }
  738 
  739         return (type);
  740 }
  741 
  742 /*
  743  * Associated an external reference counted buffer with an mbuf.
  744  */
  745 static __inline void
  746 m_extaddref(struct mbuf *m, char *buf, u_int size, u_int *ref_cnt,
  747     m_ext_free_t freef, void *arg1, void *arg2)
  748 {
  749 
  750         KASSERT(ref_cnt != NULL, ("%s: ref_cnt not provided", __func__));
  751 
  752         atomic_add_int(ref_cnt, 1);
  753         m->m_flags |= M_EXT;
  754         m->m_ext.ext_buf = buf;
  755         m->m_ext.ext_cnt = ref_cnt;
  756         m->m_data = m->m_ext.ext_buf;
  757         m->m_ext.ext_size = size;
  758         m->m_ext.ext_free = freef;
  759         m->m_ext.ext_arg1 = arg1;
  760         m->m_ext.ext_arg2 = arg2;
  761         m->m_ext.ext_type = EXT_EXTREF;
  762         m->m_ext.ext_flags = 0;
  763 }
  764 
  765 static __inline uma_zone_t
  766 m_getzone(int size)
  767 {
  768         uma_zone_t zone;
  769 
  770         switch (size) {
  771         case MCLBYTES:
  772                 zone = zone_clust;
  773                 break;
  774 #if MJUMPAGESIZE != MCLBYTES
  775         case MJUMPAGESIZE:
  776                 zone = zone_jumbop;
  777                 break;
  778 #endif
  779         case MJUM9BYTES:
  780                 zone = zone_jumbo9;
  781                 break;
  782         case MJUM16BYTES:
  783                 zone = zone_jumbo16;
  784                 break;
  785         default:
  786                 panic("%s: invalid cluster size %d", __func__, size);
  787         }
  788 
  789         return (zone);
  790 }
  791 
  792 /*
  793  * Initialize an mbuf with linear storage.
  794  *
  795  * Inline because the consumer text overhead will be roughly the same to
  796  * initialize or call a function with this many parameters and M_PKTHDR
  797  * should go away with constant propagation for !MGETHDR.
  798  */
  799 static __inline int
  800 m_init(struct mbuf *m, int how, short type, int flags)
  801 {
  802         int error;
  803 
  804         m->m_next = NULL;
  805         m->m_nextpkt = NULL;
  806         m->m_data = m->m_dat;
  807         m->m_len = 0;
  808         m->m_flags = flags;
  809         m->m_type = type;
  810         if (flags & M_PKTHDR)
  811                 error = m_pkthdr_init(m, how);
  812         else
  813                 error = 0;
  814 
  815         MBUF_PROBE5(m__init, m, how, type, flags, error);
  816         return (error);
  817 }
  818 
  819 static __inline struct mbuf *
  820 m_get(int how, short type)
  821 {
  822         struct mbuf *m;
  823         struct mb_args args;
  824 
  825         args.flags = 0;
  826         args.type = type;
  827         m = uma_zalloc_arg(zone_mbuf, &args, how);
  828         MBUF_PROBE3(m__get, how, type, m);
  829         return (m);
  830 }
  831 
  832 static __inline struct mbuf *
  833 m_gethdr(int how, short type)
  834 {
  835         struct mbuf *m;
  836         struct mb_args args;
  837 
  838         args.flags = M_PKTHDR;
  839         args.type = type;
  840         m = uma_zalloc_arg(zone_mbuf, &args, how);
  841         MBUF_PROBE3(m__gethdr, how, type, m);
  842         return (m);
  843 }
  844 
  845 static __inline struct mbuf *
  846 m_getcl(int how, short type, int flags)
  847 {
  848         struct mbuf *m;
  849         struct mb_args args;
  850 
  851         args.flags = flags;
  852         args.type = type;
  853         m = uma_zalloc_arg(zone_pack, &args, how);
  854         MBUF_PROBE4(m__getcl, how, type, flags, m);
  855         return (m);
  856 }
  857 
  858 /*
  859  * XXX: m_cljset() is a dangerous API.  One must attach only a new,
  860  * unreferenced cluster to an mbuf(9).  It is not possible to assert
  861  * that, so care can be taken only by users of the API.
  862  */
  863 static __inline void
  864 m_cljset(struct mbuf *m, void *cl, int type)
  865 {
  866         int size;
  867 
  868         switch (type) {
  869         case EXT_CLUSTER:
  870                 size = MCLBYTES;
  871                 break;
  872 #if MJUMPAGESIZE != MCLBYTES
  873         case EXT_JUMBOP:
  874                 size = MJUMPAGESIZE;
  875                 break;
  876 #endif
  877         case EXT_JUMBO9:
  878                 size = MJUM9BYTES;
  879                 break;
  880         case EXT_JUMBO16:
  881                 size = MJUM16BYTES;
  882                 break;
  883         default:
  884                 panic("%s: unknown cluster type %d", __func__, type);
  885                 break;
  886         }
  887 
  888         m->m_data = m->m_ext.ext_buf = cl;
  889         m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL;
  890         m->m_ext.ext_size = size;
  891         m->m_ext.ext_type = type;
  892         m->m_ext.ext_flags = EXT_FLAG_EMBREF;
  893         m->m_ext.ext_count = 1;
  894         m->m_flags |= M_EXT;
  895         MBUF_PROBE3(m__cljset, m, cl, type);
  896 }
  897 
  898 static __inline void
  899 m_chtype(struct mbuf *m, short new_type)
  900 {
  901 
  902         m->m_type = new_type;
  903 }
  904 
  905 static __inline void
  906 m_clrprotoflags(struct mbuf *m)
  907 {
  908 
  909         while (m) {
  910                 m->m_flags &= ~M_PROTOFLAGS;
  911                 m = m->m_next;
  912         }
  913 }
  914 
  915 static __inline struct mbuf *
  916 m_last(struct mbuf *m)
  917 {
  918 
  919         while (m->m_next)
  920                 m = m->m_next;
  921         return (m);
  922 }
  923 
  924 static inline u_int
  925 m_extrefcnt(struct mbuf *m)
  926 {
  927 
  928         KASSERT(m->m_flags & M_EXT, ("%s: M_EXT missing", __func__));
  929 
  930         return ((m->m_ext.ext_flags & EXT_FLAG_EMBREF) ? m->m_ext.ext_count :
  931             *m->m_ext.ext_cnt);
  932 }
  933 
  934 /*
  935  * mbuf, cluster, and external object allocation macros (for compatibility
  936  * purposes).
  937  */
  938 #define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from))
  939 #define MGET(m, how, type)      ((m) = m_get((how), (type)))
  940 #define MGETHDR(m, how, type)   ((m) = m_gethdr((how), (type)))
  941 #define MCLGET(m, how)          m_clget((m), (how))
  942 #define MEXTADD(m, buf, size, free, arg1, arg2, flags, type)            \
  943     m_extadd((m), (char *)(buf), (size), (free), (arg1), (arg2),        \
  944     (flags), (type))
  945 #define m_getm(m, len, how, type)                                       \
  946     m_getm2((m), (len), (how), (type), M_PKTHDR)
  947 
  948 /*
  949  * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can
  950  * be both the local data payload, or an external buffer area, depending on
  951  * whether M_EXT is set).
  952  */
  953 #define M_WRITABLE(m)   (!((m)->m_flags & M_RDONLY) &&                  \
  954                          (!(((m)->m_flags & M_EXT)) ||                  \
  955                          (m_extrefcnt(m) == 1)))
  956 
  957 /* Check if the supplied mbuf has a packet header, or else panic. */
  958 #define M_ASSERTPKTHDR(m)                                               \
  959         KASSERT((m) != NULL && (m)->m_flags & M_PKTHDR,                 \
  960             ("%s: no mbuf packet header!", __func__))
  961 
  962 /*
  963  * Ensure that the supplied mbuf is a valid, non-free mbuf.
  964  *
  965  * XXX: Broken at the moment.  Need some UMA magic to make it work again.
  966  */
  967 #define M_ASSERTVALID(m)                                                \
  968         KASSERT((((struct mbuf *)m)->m_flags & 0) == 0,                 \
  969             ("%s: attempted use of a free mbuf!", __func__))
  970 
  971 /*
  972  * Return the address of the start of the buffer associated with an mbuf,
  973  * handling external storage, packet-header mbufs, and regular data mbufs.
  974  */
  975 #define M_START(m)                                                      \
  976         (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf :                  \
  977          ((m)->m_flags & M_PKTHDR) ? &(m)->m_pktdat[0] :                \
  978          &(m)->m_dat[0])
  979 
  980 /*
  981  * Return the size of the buffer associated with an mbuf, handling external
  982  * storage, packet-header mbufs, and regular data mbufs.
  983  */
  984 #define M_SIZE(m)                                                       \
  985         (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size :                 \
  986          ((m)->m_flags & M_PKTHDR) ? MHLEN :                            \
  987          MLEN)
  988 
  989 /*
  990  * Set the m_data pointer of a newly allocated mbuf to place an object of the
  991  * specified size at the end of the mbuf, longword aligned.
  992  *
  993  * NB: Historically, we had M_ALIGN(), MH_ALIGN(), and MEXT_ALIGN() as
  994  * separate macros, each asserting that it was called at the proper moment.
  995  * This required callers to themselves test the storage type and call the
  996  * right one.  Rather than require callers to be aware of those layout
  997  * decisions, we centralize here.
  998  */
  999 static __inline void
 1000 m_align(struct mbuf *m, int len)
 1001 {
 1002 #ifdef INVARIANTS
 1003         const char *msg = "%s: not a virgin mbuf";
 1004 #endif
 1005         int adjust;
 1006 
 1007         KASSERT(m->m_data == M_START(m), (msg, __func__));
 1008 
 1009         adjust = M_SIZE(m) - len;
 1010         m->m_data += adjust &~ (sizeof(long)-1);
 1011 }
 1012 
 1013 #define M_ALIGN(m, len)         m_align(m, len)
 1014 #define MH_ALIGN(m, len)        m_align(m, len)
 1015 #define MEXT_ALIGN(m, len)      m_align(m, len)
 1016 
 1017 /*
 1018  * Compute the amount of space available before the current start of data in
 1019  * an mbuf.
 1020  *
 1021  * The M_WRITABLE() is a temporary, conservative safety measure: the burden
 1022  * of checking writability of the mbuf data area rests solely with the caller.
 1023  *
 1024  * NB: In previous versions, M_LEADINGSPACE() would only check M_WRITABLE()
 1025  * for mbufs with external storage.  We now allow mbuf-embedded data to be
 1026  * read-only as well.
 1027  */
 1028 #define M_LEADINGSPACE(m)                                               \
 1029         (M_WRITABLE(m) ? ((m)->m_data - M_START(m)) : 0)
 1030 
 1031 /*
 1032  * Compute the amount of space available after the end of data in an mbuf.
 1033  *
 1034  * The M_WRITABLE() is a temporary, conservative safety measure: the burden
 1035  * of checking writability of the mbuf data area rests solely with the caller.
 1036  *
 1037  * NB: In previous versions, M_TRAILINGSPACE() would only check M_WRITABLE()
 1038  * for mbufs with external storage.  We now allow mbuf-embedded data to be
 1039  * read-only as well.
 1040  */
 1041 #define M_TRAILINGSPACE(m)                                              \
 1042         (M_WRITABLE(m) ?                                                \
 1043             ((M_START(m) + M_SIZE(m)) - ((m)->m_data + (m)->m_len)) : 0)
 1044 
 1045 /*
 1046  * Arrange to prepend space of size plen to mbuf m.  If a new mbuf must be
 1047  * allocated, how specifies whether to wait.  If the allocation fails, the
 1048  * original mbuf chain is freed and m is set to NULL.
 1049  */
 1050 #define M_PREPEND(m, plen, how) do {                                    \
 1051         struct mbuf **_mmp = &(m);                                      \
 1052         struct mbuf *_mm = *_mmp;                                       \
 1053         int _mplen = (plen);                                            \
 1054         int __mhow = (how);                                             \
 1055                                                                         \
 1056         MBUF_CHECKSLEEP(how);                                           \
 1057         if (M_LEADINGSPACE(_mm) >= _mplen) {                            \
 1058                 _mm->m_data -= _mplen;                                  \
 1059                 _mm->m_len += _mplen;                                   \
 1060         } else                                                          \
 1061                 _mm = m_prepend(_mm, _mplen, __mhow);                   \
 1062         if (_mm != NULL && _mm->m_flags & M_PKTHDR)                     \
 1063                 _mm->m_pkthdr.len += _mplen;                            \
 1064         *_mmp = _mm;                                                    \
 1065 } while (0)
 1066 
 1067 /*
 1068  * Change mbuf to new type.  This is a relatively expensive operation and
 1069  * should be avoided.
 1070  */
 1071 #define MCHTYPE(m, t)   m_chtype((m), (t))
 1072 
 1073 /* Length to m_copy to copy all. */
 1074 #define M_COPYALL       1000000000
 1075 
 1076 extern int              max_datalen;    /* MHLEN - max_hdr */
 1077 extern int              max_hdr;        /* Largest link + protocol header */
 1078 extern int              max_linkhdr;    /* Largest link-level header */
 1079 extern int              max_protohdr;   /* Largest protocol header */
 1080 extern int              nmbclusters;    /* Maximum number of clusters */
 1081 
 1082 /*-
 1083  * Network packets may have annotations attached by affixing a list of
 1084  * "packet tags" to the pkthdr structure.  Packet tags are dynamically
 1085  * allocated semi-opaque data structures that have a fixed header
 1086  * (struct m_tag) that specifies the size of the memory block and a
 1087  * <cookie,type> pair that identifies it.  The cookie is a 32-bit unique
 1088  * unsigned value used to identify a module or ABI.  By convention this value
 1089  * is chosen as the date+time that the module is created, expressed as the
 1090  * number of seconds since the epoch (e.g., using date -u +'%s').  The type
 1091  * value is an ABI/module-specific value that identifies a particular
 1092  * annotation and is private to the module.  For compatibility with systems
 1093  * like OpenBSD that define packet tags w/o an ABI/module cookie, the value
 1094  * PACKET_ABI_COMPAT is used to implement m_tag_get and m_tag_find
 1095  * compatibility shim functions and several tag types are defined below.
 1096  * Users that do not require compatibility should use a private cookie value
 1097  * so that packet tag-related definitions can be maintained privately.
 1098  *
 1099  * Note that the packet tag returned by m_tag_alloc has the default memory
 1100  * alignment implemented by malloc.  To reference private data one can use a
 1101  * construct like:
 1102  *
 1103  *      struct m_tag *mtag = m_tag_alloc(...);
 1104  *      struct foo *p = (struct foo *)(mtag+1);
 1105  *
 1106  * if the alignment of struct m_tag is sufficient for referencing members of
 1107  * struct foo.  Otherwise it is necessary to embed struct m_tag within the
 1108  * private data structure to insure proper alignment; e.g.,
 1109  *
 1110  *      struct foo {
 1111  *              struct m_tag    tag;
 1112  *              ...
 1113  *      };
 1114  *      struct foo *p = (struct foo *) m_tag_alloc(...);
 1115  *      struct m_tag *mtag = &p->tag;
 1116  */
 1117 
 1118 /*
 1119  * Persistent tags stay with an mbuf until the mbuf is reclaimed.  Otherwise
 1120  * tags are expected to ``vanish'' when they pass through a network
 1121  * interface.  For most interfaces this happens normally as the tags are
 1122  * reclaimed when the mbuf is free'd.  However in some special cases
 1123  * reclaiming must be done manually.  An example is packets that pass through
 1124  * the loopback interface.  Also, one must be careful to do this when
 1125  * ``turning around'' packets (e.g., icmp_reflect).
 1126  *
 1127  * To mark a tag persistent bit-or this flag in when defining the tag id.
 1128  * The tag will then be treated as described above.
 1129  */
 1130 #define MTAG_PERSISTENT                         0x800
 1131 
 1132 #define PACKET_TAG_NONE                         0  /* Nadda */
 1133 
 1134 /* Packet tags for use with PACKET_ABI_COMPAT. */
 1135 #define PACKET_TAG_IPSEC_IN_DONE                1  /* IPsec applied, in */
 1136 #define PACKET_TAG_IPSEC_OUT_DONE               2  /* IPsec applied, out */
 1137 #define PACKET_TAG_IPSEC_IN_CRYPTO_DONE         3  /* NIC IPsec crypto done */
 1138 #define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED      4  /* NIC IPsec crypto req'ed */
 1139 #define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO     5  /* NIC notifies IPsec */
 1140 #define PACKET_TAG_IPSEC_PENDING_TDB            6  /* Reminder to do IPsec */
 1141 #define PACKET_TAG_BRIDGE                       7  /* Bridge processing done */
 1142 #define PACKET_TAG_GIF                          8  /* GIF processing done */
 1143 #define PACKET_TAG_GRE                          9  /* GRE processing done */
 1144 #define PACKET_TAG_IN_PACKET_CHECKSUM           10 /* NIC checksumming done */
 1145 #define PACKET_TAG_ENCAP                        11 /* Encap.  processing */
 1146 #define PACKET_TAG_IPSEC_SOCKET                 12 /* IPSEC socket ref */
 1147 #define PACKET_TAG_IPSEC_HISTORY                13 /* IPSEC history */
 1148 #define PACKET_TAG_IPV6_INPUT                   14 /* IPV6 input processing */
 1149 #define PACKET_TAG_DUMMYNET                     15 /* dummynet info */
 1150 #define PACKET_TAG_DIVERT                       17 /* divert info */
 1151 #define PACKET_TAG_IPFORWARD                    18 /* ipforward info */
 1152 #define PACKET_TAG_MACLABEL     (19 | MTAG_PERSISTENT) /* MAC label */
 1153 #define PACKET_TAG_PF                           21 /* PF/ALTQ information */
 1154 #define PACKET_TAG_RTSOCKFAM                    25 /* rtsock sa family */
 1155 #define PACKET_TAG_IPOPTIONS                    27 /* Saved IP options */
 1156 #define PACKET_TAG_CARP                         28 /* CARP info */
 1157 #define PACKET_TAG_IPSEC_NAT_T_PORTS            29 /* two uint16_t */
 1158 #define PACKET_TAG_ND_OUTGOING                  30 /* ND outgoing */
 1159 
 1160 /* Specific cookies and tags. */
 1161 
 1162 /* Packet tag routines. */
 1163 struct m_tag    *m_tag_alloc(u_int32_t, int, int, int);
 1164 void             m_tag_delete(struct mbuf *, struct m_tag *);
 1165 void             m_tag_delete_chain(struct mbuf *, struct m_tag *);
 1166 void             m_tag_free_default(struct m_tag *);
 1167 struct m_tag    *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *);
 1168 struct m_tag    *m_tag_copy(struct m_tag *, int);
 1169 int              m_tag_copy_chain(struct mbuf *, const struct mbuf *, int);
 1170 void             m_tag_delete_nonpersistent(struct mbuf *);
 1171 
 1172 /*
 1173  * Initialize the list of tags associated with an mbuf.
 1174  */
 1175 static __inline void
 1176 m_tag_init(struct mbuf *m)
 1177 {
 1178 
 1179         SLIST_INIT(&m->m_pkthdr.tags);
 1180 }
 1181 
 1182 /*
 1183  * Set up the contents of a tag.  Note that this does not fill in the free
 1184  * method; the caller is expected to do that.
 1185  *
 1186  * XXX probably should be called m_tag_init, but that was already taken.
 1187  */
 1188 static __inline void
 1189 m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len)
 1190 {
 1191 
 1192         t->m_tag_id = type;
 1193         t->m_tag_len = len;
 1194         t->m_tag_cookie = cookie;
 1195 }
 1196 
 1197 /*
 1198  * Reclaim resources associated with a tag.
 1199  */
 1200 static __inline void
 1201 m_tag_free(struct m_tag *t)
 1202 {
 1203 
 1204         (*t->m_tag_free)(t);
 1205 }
 1206 
 1207 /*
 1208  * Return the first tag associated with an mbuf.
 1209  */
 1210 static __inline struct m_tag *
 1211 m_tag_first(struct mbuf *m)
 1212 {
 1213 
 1214         return (SLIST_FIRST(&m->m_pkthdr.tags));
 1215 }
 1216 
 1217 /*
 1218  * Return the next tag in the list of tags associated with an mbuf.
 1219  */
 1220 static __inline struct m_tag *
 1221 m_tag_next(struct mbuf *m __unused, struct m_tag *t)
 1222 {
 1223 
 1224         return (SLIST_NEXT(t, m_tag_link));
 1225 }
 1226 
 1227 /*
 1228  * Prepend a tag to the list of tags associated with an mbuf.
 1229  */
 1230 static __inline void
 1231 m_tag_prepend(struct mbuf *m, struct m_tag *t)
 1232 {
 1233 
 1234         SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
 1235 }
 1236 
 1237 /*
 1238  * Unlink a tag from the list of tags associated with an mbuf.
 1239  */
 1240 static __inline void
 1241 m_tag_unlink(struct mbuf *m, struct m_tag *t)
 1242 {
 1243 
 1244         SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
 1245 }
 1246 
 1247 /* These are for OpenBSD compatibility. */
 1248 #define MTAG_ABI_COMPAT         0               /* compatibility ABI */
 1249 
 1250 static __inline struct m_tag *
 1251 m_tag_get(int type, int length, int wait)
 1252 {
 1253         return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait));
 1254 }
 1255 
 1256 static __inline struct m_tag *
 1257 m_tag_find(struct mbuf *m, int type, struct m_tag *start)
 1258 {
 1259         return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL :
 1260             m_tag_locate(m, MTAG_ABI_COMPAT, type, start));
 1261 }
 1262 
 1263 static __inline struct mbuf *
 1264 m_free(struct mbuf *m)
 1265 {
 1266         struct mbuf *n = m->m_next;
 1267 
 1268         MBUF_PROBE1(m__free, m);
 1269         if ((m->m_flags & (M_PKTHDR|M_NOFREE)) == (M_PKTHDR|M_NOFREE))
 1270                 m_tag_delete_chain(m, NULL);
 1271         if (m->m_flags & M_EXT)
 1272                 mb_free_ext(m);
 1273         else if ((m->m_flags & M_NOFREE) == 0)
 1274                 uma_zfree(zone_mbuf, m);
 1275         return (n);
 1276 }
 1277 
 1278 static __inline int
 1279 rt_m_getfib(struct mbuf *m)
 1280 {
 1281         KASSERT(m->m_flags & M_PKTHDR , ("Attempt to get FIB from non header mbuf."));
 1282         return (m->m_pkthdr.fibnum);
 1283 }
 1284 
 1285 #define M_GETFIB(_m)   rt_m_getfib(_m)
 1286 
 1287 #define M_SETFIB(_m, _fib) do {                                         \
 1288         KASSERT((_m)->m_flags & M_PKTHDR, ("Attempt to set FIB on non header mbuf."));  \
 1289         ((_m)->m_pkthdr.fibnum) = (_fib);                               \
 1290 } while (0)
 1291 
 1292 /* flags passed as first argument for "m_xxx_tcpip_hash()" */
 1293 #define MBUF_HASHFLAG_L2        (1 << 2)
 1294 #define MBUF_HASHFLAG_L3        (1 << 3)
 1295 #define MBUF_HASHFLAG_L4        (1 << 4)
 1296 
 1297 /* mbuf hashing helper routines */
 1298 uint32_t        m_ether_tcpip_hash_init(void);
 1299 uint32_t        m_ether_tcpip_hash(const uint32_t, const struct mbuf *, uint32_t);
 1300 uint32_t        m_infiniband_tcpip_hash_init(void);
 1301 uint32_t        m_infiniband_tcpip_hash(const uint32_t, const struct mbuf *, uint32_t);
 1302 
 1303 #ifdef MBUF_PROFILING
 1304  void m_profile(struct mbuf *m);
 1305  #define M_PROFILE(m) m_profile(m)
 1306 #else
 1307  #define M_PROFILE(m)
 1308 #endif
 1309 
 1310 struct mbufq {
 1311         STAILQ_HEAD(, mbuf)     mq_head;
 1312         int                     mq_len;
 1313         int                     mq_maxlen;
 1314 };
 1315 
 1316 static inline void
 1317 mbufq_init(struct mbufq *mq, int maxlen)
 1318 {
 1319 
 1320         STAILQ_INIT(&mq->mq_head);
 1321         mq->mq_maxlen = maxlen;
 1322         mq->mq_len = 0;
 1323 }
 1324 
 1325 static inline struct mbuf *
 1326 mbufq_flush(struct mbufq *mq)
 1327 {
 1328         struct mbuf *m;
 1329 
 1330         m = STAILQ_FIRST(&mq->mq_head);
 1331         STAILQ_INIT(&mq->mq_head);
 1332         mq->mq_len = 0;
 1333         return (m);
 1334 }
 1335 
 1336 static inline void
 1337 mbufq_drain(struct mbufq *mq)
 1338 {
 1339         struct mbuf *m, *n;
 1340 
 1341         n = mbufq_flush(mq);
 1342         while ((m = n) != NULL) {
 1343                 n = STAILQ_NEXT(m, m_stailqpkt);
 1344                 m_freem(m);
 1345         }
 1346 }
 1347 
 1348 static inline struct mbuf *
 1349 mbufq_first(const struct mbufq *mq)
 1350 {
 1351 
 1352         return (STAILQ_FIRST(&mq->mq_head));
 1353 }
 1354 
 1355 static inline struct mbuf *
 1356 mbufq_last(const struct mbufq *mq)
 1357 {
 1358 
 1359         return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt));
 1360 }
 1361 
 1362 static inline int
 1363 mbufq_full(const struct mbufq *mq)
 1364 {
 1365 
 1366         return (mq->mq_len >= mq->mq_maxlen);
 1367 }
 1368 
 1369 static inline int
 1370 mbufq_len(const struct mbufq *mq)
 1371 {
 1372 
 1373         return (mq->mq_len);
 1374 }
 1375 
 1376 static inline int
 1377 mbufq_enqueue(struct mbufq *mq, struct mbuf *m)
 1378 {
 1379 
 1380         if (mbufq_full(mq))
 1381                 return (ENOBUFS);
 1382         STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt);
 1383         mq->mq_len++;
 1384         return (0);
 1385 }
 1386 
 1387 static inline struct mbuf *
 1388 mbufq_dequeue(struct mbufq *mq)
 1389 {
 1390         struct mbuf *m;
 1391 
 1392         m = STAILQ_FIRST(&mq->mq_head);
 1393         if (m) {
 1394                 STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt);
 1395                 m->m_nextpkt = NULL;
 1396                 mq->mq_len--;
 1397         }
 1398         return (m);
 1399 }
 1400 
 1401 static inline void
 1402 mbufq_prepend(struct mbufq *mq, struct mbuf *m)
 1403 {
 1404 
 1405         STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt);
 1406         mq->mq_len++;
 1407 }
 1408 
 1409 /*
 1410  * Note: this doesn't enforce the maximum list size for dst.
 1411  */
 1412 static inline void
 1413 mbufq_concat(struct mbufq *mq_dst, struct mbufq *mq_src)
 1414 {
 1415 
 1416         mq_dst->mq_len += mq_src->mq_len;
 1417         STAILQ_CONCAT(&mq_dst->mq_head, &mq_src->mq_head);
 1418         mq_src->mq_len = 0;
 1419 }
 1420 
 1421 #ifdef _SYS_TIMESPEC_H_
 1422 static inline void
 1423 mbuf_tstmp2timespec(struct mbuf *m, struct timespec *ts)
 1424 {
 1425 
 1426         KASSERT((m->m_flags & M_PKTHDR) != 0, ("mbuf %p no M_PKTHDR", m));
 1427         KASSERT((m->m_flags & M_TSTMP) != 0, ("mbuf %p no M_TSTMP", m));
 1428         ts->tv_sec = m->m_pkthdr.rcv_tstmp / 1000000000;
 1429         ts->tv_nsec = m->m_pkthdr.rcv_tstmp % 1000000000;
 1430 }
 1431 #endif
 1432 
 1433 #ifdef NETDUMP
 1434 /* Invoked from the netdump client code. */
 1435 void    netdump_mbuf_drain(void);
 1436 void    netdump_mbuf_dump(void);
 1437 void    netdump_mbuf_reinit(int nmbuf, int nclust, int clsize);
 1438 #endif
 1439 
 1440 #endif /* _KERNEL */
 1441 #endif /* !_SYS_MBUF_H_ */

Cache object: aebd1644b0c82c9c87367de456839905


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.