The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_sockbuf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1988, 1990, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      @(#)uipc_socket2.c      8.1 (Berkeley) 6/10/93
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/11.2/sys/kern/uipc_sockbuf.c 331722 2018-03-29 02:50:57Z eadler $");
   34 
   35 #include "opt_param.h"
   36 
   37 #include <sys/param.h>
   38 #include <sys/aio.h> /* for aio_swake proto */
   39 #include <sys/kernel.h>
   40 #include <sys/lock.h>
   41 #include <sys/malloc.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/mutex.h>
   44 #include <sys/proc.h>
   45 #include <sys/protosw.h>
   46 #include <sys/resourcevar.h>
   47 #include <sys/signalvar.h>
   48 #include <sys/socket.h>
   49 #include <sys/socketvar.h>
   50 #include <sys/sx.h>
   51 #include <sys/sysctl.h>
   52 
   53 /*
   54  * Function pointer set by the AIO routines so that the socket buffer code
   55  * can call back into the AIO module if it is loaded.
   56  */
   57 void    (*aio_swake)(struct socket *, struct sockbuf *);
   58 
   59 /*
   60  * Primitive routines for operating on socket buffers
   61  */
   62 
   63 u_long  sb_max = SB_MAX;
   64 u_long sb_max_adj =
   65        (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
   66 
   67 static  u_long sb_efficiency = 8;       /* parameter for sbreserve() */
   68 
   69 static struct mbuf      *sbcut_internal(struct sockbuf *sb, int len);
   70 static void     sbflush_internal(struct sockbuf *sb);
   71 
   72 /*
   73  * Our own version of m_clrprotoflags(), that can preserve M_NOTREADY.
   74  */
   75 static void
   76 sbm_clrprotoflags(struct mbuf *m, int flags)
   77 {
   78         int mask;
   79 
   80         mask = ~M_PROTOFLAGS;
   81         if (flags & PRUS_NOTREADY)
   82                 mask |= M_NOTREADY;
   83         while (m) {
   84                 m->m_flags &= mask;
   85                 m = m->m_next;
   86         }
   87 }
   88 
   89 /*
   90  * Mark ready "count" mbufs starting with "m".
   91  */
   92 int
   93 sbready(struct sockbuf *sb, struct mbuf *m, int count)
   94 {
   95         u_int blocker;
   96 
   97         SOCKBUF_LOCK_ASSERT(sb);
   98         KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb));
   99 
  100         blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0;
  101 
  102         for (int i = 0; i < count; i++, m = m->m_next) {
  103                 KASSERT(m->m_flags & M_NOTREADY,
  104                     ("%s: m %p !M_NOTREADY", __func__, m));
  105                 m->m_flags &= ~(M_NOTREADY | blocker);
  106                 if (blocker)
  107                         sb->sb_acc += m->m_len;
  108         }
  109 
  110         if (!blocker)
  111                 return (EINPROGRESS);
  112 
  113         /* This one was blocking all the queue. */
  114         for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) {
  115                 KASSERT(m->m_flags & M_BLOCKED,
  116                     ("%s: m %p !M_BLOCKED", __func__, m));
  117                 m->m_flags &= ~M_BLOCKED;
  118                 sb->sb_acc += m->m_len;
  119         }
  120 
  121         sb->sb_fnrdy = m;
  122 
  123         return (0);
  124 }
  125 
  126 /*
  127  * Adjust sockbuf state reflecting allocation of m.
  128  */
  129 void
  130 sballoc(struct sockbuf *sb, struct mbuf *m)
  131 {
  132 
  133         SOCKBUF_LOCK_ASSERT(sb);
  134 
  135         sb->sb_ccc += m->m_len;
  136 
  137         if (sb->sb_fnrdy == NULL) {
  138                 if (m->m_flags & M_NOTREADY)
  139                         sb->sb_fnrdy = m;
  140                 else
  141                         sb->sb_acc += m->m_len;
  142         } else
  143                 m->m_flags |= M_BLOCKED;
  144 
  145         if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
  146                 sb->sb_ctl += m->m_len;
  147 
  148         sb->sb_mbcnt += MSIZE;
  149         sb->sb_mcnt += 1;
  150 
  151         if (m->m_flags & M_EXT) {
  152                 sb->sb_mbcnt += m->m_ext.ext_size;
  153                 sb->sb_ccnt += 1;
  154         }
  155 }
  156 
  157 /*
  158  * Adjust sockbuf state reflecting freeing of m.
  159  */
  160 void
  161 sbfree(struct sockbuf *sb, struct mbuf *m)
  162 {
  163 
  164 #if 0   /* XXX: not yet: soclose() call path comes here w/o lock. */
  165         SOCKBUF_LOCK_ASSERT(sb);
  166 #endif
  167 
  168         sb->sb_ccc -= m->m_len;
  169 
  170         if (!(m->m_flags & M_NOTAVAIL))
  171                 sb->sb_acc -= m->m_len;
  172 
  173         if (m == sb->sb_fnrdy) {
  174                 struct mbuf *n;
  175 
  176                 KASSERT(m->m_flags & M_NOTREADY,
  177                     ("%s: m %p !M_NOTREADY", __func__, m));
  178 
  179                 n = m->m_next;
  180                 while (n != NULL && !(n->m_flags & M_NOTREADY)) {
  181                         n->m_flags &= ~M_BLOCKED;
  182                         sb->sb_acc += n->m_len;
  183                         n = n->m_next;
  184                 }
  185                 sb->sb_fnrdy = n;
  186         }
  187 
  188         if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
  189                 sb->sb_ctl -= m->m_len;
  190 
  191         sb->sb_mbcnt -= MSIZE;
  192         sb->sb_mcnt -= 1;
  193         if (m->m_flags & M_EXT) {
  194                 sb->sb_mbcnt -= m->m_ext.ext_size;
  195                 sb->sb_ccnt -= 1;
  196         }
  197 
  198         if (sb->sb_sndptr == m) {
  199                 sb->sb_sndptr = NULL;
  200                 sb->sb_sndptroff = 0;
  201         }
  202         if (sb->sb_sndptroff != 0)
  203                 sb->sb_sndptroff -= m->m_len;
  204 }
  205 
  206 /*
  207  * Socantsendmore indicates that no more data will be sent on the socket; it
  208  * would normally be applied to a socket when the user informs the system
  209  * that no more data is to be sent, by the protocol code (in case
  210  * PRU_SHUTDOWN).  Socantrcvmore indicates that no more data will be
  211  * received, and will normally be applied to the socket by a protocol when it
  212  * detects that the peer will send no more data.  Data queued for reading in
  213  * the socket may yet be read.
  214  */
  215 void
  216 socantsendmore_locked(struct socket *so)
  217 {
  218 
  219         SOCKBUF_LOCK_ASSERT(&so->so_snd);
  220 
  221         so->so_snd.sb_state |= SBS_CANTSENDMORE;
  222         sowwakeup_locked(so);
  223         mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
  224 }
  225 
  226 void
  227 socantsendmore(struct socket *so)
  228 {
  229 
  230         SOCKBUF_LOCK(&so->so_snd);
  231         socantsendmore_locked(so);
  232         mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
  233 }
  234 
  235 void
  236 socantrcvmore_locked(struct socket *so)
  237 {
  238 
  239         SOCKBUF_LOCK_ASSERT(&so->so_rcv);
  240 
  241         so->so_rcv.sb_state |= SBS_CANTRCVMORE;
  242         sorwakeup_locked(so);
  243         mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
  244 }
  245 
  246 void
  247 socantrcvmore(struct socket *so)
  248 {
  249 
  250         SOCKBUF_LOCK(&so->so_rcv);
  251         socantrcvmore_locked(so);
  252         mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
  253 }
  254 
  255 /*
  256  * Wait for data to arrive at/drain from a socket buffer.
  257  */
  258 int
  259 sbwait(struct sockbuf *sb)
  260 {
  261 
  262         SOCKBUF_LOCK_ASSERT(sb);
  263 
  264         sb->sb_flags |= SB_WAIT;
  265         return (msleep_sbt(&sb->sb_acc, &sb->sb_mtx,
  266             (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
  267             sb->sb_timeo, 0, 0));
  268 }
  269 
  270 int
  271 sblock(struct sockbuf *sb, int flags)
  272 {
  273 
  274         KASSERT((flags & SBL_VALID) == flags,
  275             ("sblock: flags invalid (0x%x)", flags));
  276 
  277         if (flags & SBL_WAIT) {
  278                 if ((sb->sb_flags & SB_NOINTR) ||
  279                     (flags & SBL_NOINTR)) {
  280                         sx_xlock(&sb->sb_sx);
  281                         return (0);
  282                 }
  283                 return (sx_xlock_sig(&sb->sb_sx));
  284         } else {
  285                 if (sx_try_xlock(&sb->sb_sx) == 0)
  286                         return (EWOULDBLOCK);
  287                 return (0);
  288         }
  289 }
  290 
  291 void
  292 sbunlock(struct sockbuf *sb)
  293 {
  294 
  295         sx_xunlock(&sb->sb_sx);
  296 }
  297 
  298 /*
  299  * Wakeup processes waiting on a socket buffer.  Do asynchronous notification
  300  * via SIGIO if the socket has the SS_ASYNC flag set.
  301  *
  302  * Called with the socket buffer lock held; will release the lock by the end
  303  * of the function.  This allows the caller to acquire the socket buffer lock
  304  * while testing for the need for various sorts of wakeup and hold it through
  305  * to the point where it's no longer required.  We currently hold the lock
  306  * through calls out to other subsystems (with the exception of kqueue), and
  307  * then release it to avoid lock order issues.  It's not clear that's
  308  * correct.
  309  */
  310 void
  311 sowakeup(struct socket *so, struct sockbuf *sb)
  312 {
  313         int ret;
  314 
  315         SOCKBUF_LOCK_ASSERT(sb);
  316 
  317         selwakeuppri(&sb->sb_sel, PSOCK);
  318         if (!SEL_WAITING(&sb->sb_sel))
  319                 sb->sb_flags &= ~SB_SEL;
  320         if (sb->sb_flags & SB_WAIT) {
  321                 sb->sb_flags &= ~SB_WAIT;
  322                 wakeup(&sb->sb_acc);
  323         }
  324         KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
  325         if (sb->sb_upcall != NULL) {
  326                 ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT);
  327                 if (ret == SU_ISCONNECTED) {
  328                         KASSERT(sb == &so->so_rcv,
  329                             ("SO_SND upcall returned SU_ISCONNECTED"));
  330                         soupcall_clear(so, SO_RCV);
  331                 }
  332         } else
  333                 ret = SU_OK;
  334         if (sb->sb_flags & SB_AIO)
  335                 sowakeup_aio(so, sb);
  336         SOCKBUF_UNLOCK(sb);
  337         if (ret == SU_ISCONNECTED)
  338                 soisconnected(so);
  339         if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
  340                 pgsigio(&so->so_sigio, SIGIO, 0);
  341         mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
  342 }
  343 
  344 /*
  345  * Socket buffer (struct sockbuf) utility routines.
  346  *
  347  * Each socket contains two socket buffers: one for sending data and one for
  348  * receiving data.  Each buffer contains a queue of mbufs, information about
  349  * the number of mbufs and amount of data in the queue, and other fields
  350  * allowing select() statements and notification on data availability to be
  351  * implemented.
  352  *
  353  * Data stored in a socket buffer is maintained as a list of records.  Each
  354  * record is a list of mbufs chained together with the m_next field.  Records
  355  * are chained together with the m_nextpkt field. The upper level routine
  356  * soreceive() expects the following conventions to be observed when placing
  357  * information in the receive buffer:
  358  *
  359  * 1. If the protocol requires each message be preceded by the sender's name,
  360  *    then a record containing that name must be present before any
  361  *    associated data (mbuf's must be of type MT_SONAME).
  362  * 2. If the protocol supports the exchange of ``access rights'' (really just
  363  *    additional data associated with the message), and there are ``rights''
  364  *    to be received, then a record containing this data should be present
  365  *    (mbuf's must be of type MT_RIGHTS).
  366  * 3. If a name or rights record exists, then it must be followed by a data
  367  *    record, perhaps of zero length.
  368  *
  369  * Before using a new socket structure it is first necessary to reserve
  370  * buffer space to the socket, by calling sbreserve().  This should commit
  371  * some of the available buffer space in the system buffer pool for the
  372  * socket (currently, it does nothing but enforce limits).  The space should
  373  * be released by calling sbrelease() when the socket is destroyed.
  374  */
  375 int
  376 soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
  377 {
  378         struct thread *td = curthread;
  379 
  380         SOCKBUF_LOCK(&so->so_snd);
  381         SOCKBUF_LOCK(&so->so_rcv);
  382         if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0)
  383                 goto bad;
  384         if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0)
  385                 goto bad2;
  386         if (so->so_rcv.sb_lowat == 0)
  387                 so->so_rcv.sb_lowat = 1;
  388         if (so->so_snd.sb_lowat == 0)
  389                 so->so_snd.sb_lowat = MCLBYTES;
  390         if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
  391                 so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
  392         SOCKBUF_UNLOCK(&so->so_rcv);
  393         SOCKBUF_UNLOCK(&so->so_snd);
  394         return (0);
  395 bad2:
  396         sbrelease_locked(&so->so_snd, so);
  397 bad:
  398         SOCKBUF_UNLOCK(&so->so_rcv);
  399         SOCKBUF_UNLOCK(&so->so_snd);
  400         return (ENOBUFS);
  401 }
  402 
  403 static int
  404 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
  405 {
  406         int error = 0;
  407         u_long tmp_sb_max = sb_max;
  408 
  409         error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req);
  410         if (error || !req->newptr)
  411                 return (error);
  412         if (tmp_sb_max < MSIZE + MCLBYTES)
  413                 return (EINVAL);
  414         sb_max = tmp_sb_max;
  415         sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
  416         return (0);
  417 }
  418         
  419 /*
  420  * Allot mbufs to a sockbuf.  Attempt to scale mbmax so that mbcnt doesn't
  421  * become limiting if buffering efficiency is near the normal case.
  422  */
  423 int
  424 sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so,
  425     struct thread *td)
  426 {
  427         rlim_t sbsize_limit;
  428 
  429         SOCKBUF_LOCK_ASSERT(sb);
  430 
  431         /*
  432          * When a thread is passed, we take into account the thread's socket
  433          * buffer size limit.  The caller will generally pass curthread, but
  434          * in the TCP input path, NULL will be passed to indicate that no
  435          * appropriate thread resource limits are available.  In that case,
  436          * we don't apply a process limit.
  437          */
  438         if (cc > sb_max_adj)
  439                 return (0);
  440         if (td != NULL) {
  441                 sbsize_limit = lim_cur(td, RLIMIT_SBSIZE);
  442         } else
  443                 sbsize_limit = RLIM_INFINITY;
  444         if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
  445             sbsize_limit))
  446                 return (0);
  447         sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
  448         if (sb->sb_lowat > sb->sb_hiwat)
  449                 sb->sb_lowat = sb->sb_hiwat;
  450         return (1);
  451 }
  452 
  453 int
  454 sbreserve(struct sockbuf *sb, u_long cc, struct socket *so, 
  455     struct thread *td)
  456 {
  457         int error;
  458 
  459         SOCKBUF_LOCK(sb);
  460         error = sbreserve_locked(sb, cc, so, td);
  461         SOCKBUF_UNLOCK(sb);
  462         return (error);
  463 }
  464 
  465 /*
  466  * Free mbufs held by a socket, and reserved mbuf space.
  467  */
  468 void
  469 sbrelease_internal(struct sockbuf *sb, struct socket *so)
  470 {
  471 
  472         sbflush_internal(sb);
  473         (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
  474             RLIM_INFINITY);
  475         sb->sb_mbmax = 0;
  476 }
  477 
  478 void
  479 sbrelease_locked(struct sockbuf *sb, struct socket *so)
  480 {
  481 
  482         SOCKBUF_LOCK_ASSERT(sb);
  483 
  484         sbrelease_internal(sb, so);
  485 }
  486 
  487 void
  488 sbrelease(struct sockbuf *sb, struct socket *so)
  489 {
  490 
  491         SOCKBUF_LOCK(sb);
  492         sbrelease_locked(sb, so);
  493         SOCKBUF_UNLOCK(sb);
  494 }
  495 
  496 void
  497 sbdestroy(struct sockbuf *sb, struct socket *so)
  498 {
  499 
  500         sbrelease_internal(sb, so);
  501 }
  502 
  503 /*
  504  * Routines to add and remove data from an mbuf queue.
  505  *
  506  * The routines sbappend() or sbappendrecord() are normally called to append
  507  * new mbufs to a socket buffer, after checking that adequate space is
  508  * available, comparing the function sbspace() with the amount of data to be
  509  * added.  sbappendrecord() differs from sbappend() in that data supplied is
  510  * treated as the beginning of a new record.  To place a sender's address,
  511  * optional access rights, and data in a socket receive buffer,
  512  * sbappendaddr() should be used.  To place access rights and data in a
  513  * socket receive buffer, sbappendrights() should be used.  In either case,
  514  * the new data begins a new record.  Note that unlike sbappend() and
  515  * sbappendrecord(), these routines check for the caller that there will be
  516  * enough space to store the data.  Each fails if there is not enough space,
  517  * or if it cannot find mbufs to store additional information in.
  518  *
  519  * Reliable protocols may use the socket send buffer to hold data awaiting
  520  * acknowledgement.  Data is normally copied from a socket send buffer in a
  521  * protocol with m_copy for output to a peer, and then removing the data from
  522  * the socket buffer with sbdrop() or sbdroprecord() when the data is
  523  * acknowledged by the peer.
  524  */
  525 #ifdef SOCKBUF_DEBUG
  526 void
  527 sblastrecordchk(struct sockbuf *sb, const char *file, int line)
  528 {
  529         struct mbuf *m = sb->sb_mb;
  530 
  531         SOCKBUF_LOCK_ASSERT(sb);
  532 
  533         while (m && m->m_nextpkt)
  534                 m = m->m_nextpkt;
  535 
  536         if (m != sb->sb_lastrecord) {
  537                 printf("%s: sb_mb %p sb_lastrecord %p last %p\n",
  538                         __func__, sb->sb_mb, sb->sb_lastrecord, m);
  539                 printf("packet chain:\n");
  540                 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
  541                         printf("\t%p\n", m);
  542                 panic("%s from %s:%u", __func__, file, line);
  543         }
  544 }
  545 
  546 void
  547 sblastmbufchk(struct sockbuf *sb, const char *file, int line)
  548 {
  549         struct mbuf *m = sb->sb_mb;
  550         struct mbuf *n;
  551 
  552         SOCKBUF_LOCK_ASSERT(sb);
  553 
  554         while (m && m->m_nextpkt)
  555                 m = m->m_nextpkt;
  556 
  557         while (m && m->m_next)
  558                 m = m->m_next;
  559 
  560         if (m != sb->sb_mbtail) {
  561                 printf("%s: sb_mb %p sb_mbtail %p last %p\n",
  562                         __func__, sb->sb_mb, sb->sb_mbtail, m);
  563                 printf("packet tree:\n");
  564                 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
  565                         printf("\t");
  566                         for (n = m; n != NULL; n = n->m_next)
  567                                 printf("%p ", n);
  568                         printf("\n");
  569                 }
  570                 panic("%s from %s:%u", __func__, file, line);
  571         }
  572 }
  573 #endif /* SOCKBUF_DEBUG */
  574 
  575 #define SBLINKRECORD(sb, m0) do {                                       \
  576         SOCKBUF_LOCK_ASSERT(sb);                                        \
  577         if ((sb)->sb_lastrecord != NULL)                                \
  578                 (sb)->sb_lastrecord->m_nextpkt = (m0);                  \
  579         else                                                            \
  580                 (sb)->sb_mb = (m0);                                     \
  581         (sb)->sb_lastrecord = (m0);                                     \
  582 } while (/*CONSTCOND*/0)
  583 
  584 /*
  585  * Append mbuf chain m to the last record in the socket buffer sb.  The
  586  * additional space associated the mbuf chain is recorded in sb.  Empty mbufs
  587  * are discarded and mbufs are compacted where possible.
  588  */
  589 void
  590 sbappend_locked(struct sockbuf *sb, struct mbuf *m, int flags)
  591 {
  592         struct mbuf *n;
  593 
  594         SOCKBUF_LOCK_ASSERT(sb);
  595 
  596         if (m == NULL)
  597                 return;
  598         sbm_clrprotoflags(m, flags);
  599         SBLASTRECORDCHK(sb);
  600         n = sb->sb_mb;
  601         if (n) {
  602                 while (n->m_nextpkt)
  603                         n = n->m_nextpkt;
  604                 do {
  605                         if (n->m_flags & M_EOR) {
  606                                 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
  607                                 return;
  608                         }
  609                 } while (n->m_next && (n = n->m_next));
  610         } else {
  611                 /*
  612                  * XXX Would like to simply use sb_mbtail here, but
  613                  * XXX I need to verify that I won't miss an EOR that
  614                  * XXX way.
  615                  */
  616                 if ((n = sb->sb_lastrecord) != NULL) {
  617                         do {
  618                                 if (n->m_flags & M_EOR) {
  619                                         sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
  620                                         return;
  621                                 }
  622                         } while (n->m_next && (n = n->m_next));
  623                 } else {
  624                         /*
  625                          * If this is the first record in the socket buffer,
  626                          * it's also the last record.
  627                          */
  628                         sb->sb_lastrecord = m;
  629                 }
  630         }
  631         sbcompress(sb, m, n);
  632         SBLASTRECORDCHK(sb);
  633 }
  634 
  635 /*
  636  * Append mbuf chain m to the last record in the socket buffer sb.  The
  637  * additional space associated the mbuf chain is recorded in sb.  Empty mbufs
  638  * are discarded and mbufs are compacted where possible.
  639  */
  640 void
  641 sbappend(struct sockbuf *sb, struct mbuf *m, int flags)
  642 {
  643 
  644         SOCKBUF_LOCK(sb);
  645         sbappend_locked(sb, m, flags);
  646         SOCKBUF_UNLOCK(sb);
  647 }
  648 
  649 /*
  650  * This version of sbappend() should only be used when the caller absolutely
  651  * knows that there will never be more than one record in the socket buffer,
  652  * that is, a stream protocol (such as TCP).
  653  */
  654 void
  655 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags)
  656 {
  657         SOCKBUF_LOCK_ASSERT(sb);
  658 
  659         KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
  660         KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));
  661 
  662         SBLASTMBUFCHK(sb);
  663 
  664         /* Remove all packet headers and mbuf tags to get a pure data chain. */
  665         m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0);
  666 
  667         sbcompress(sb, m, sb->sb_mbtail);
  668 
  669         sb->sb_lastrecord = sb->sb_mb;
  670         SBLASTRECORDCHK(sb);
  671 }
  672 
  673 /*
  674  * This version of sbappend() should only be used when the caller absolutely
  675  * knows that there will never be more than one record in the socket buffer,
  676  * that is, a stream protocol (such as TCP).
  677  */
  678 void
  679 sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags)
  680 {
  681 
  682         SOCKBUF_LOCK(sb);
  683         sbappendstream_locked(sb, m, flags);
  684         SOCKBUF_UNLOCK(sb);
  685 }
  686 
  687 #ifdef SOCKBUF_DEBUG
  688 void
  689 sbcheck(struct sockbuf *sb, const char *file, int line)
  690 {
  691         struct mbuf *m, *n, *fnrdy;
  692         u_long acc, ccc, mbcnt;
  693 
  694         SOCKBUF_LOCK_ASSERT(sb);
  695 
  696         acc = ccc = mbcnt = 0;
  697         fnrdy = NULL;
  698 
  699         for (m = sb->sb_mb; m; m = n) {
  700             n = m->m_nextpkt;
  701             for (; m; m = m->m_next) {
  702                 if (m->m_len == 0) {
  703                         printf("sb %p empty mbuf %p\n", sb, m);
  704                         goto fail;
  705                 }
  706                 if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) {
  707                         if (m != sb->sb_fnrdy) {
  708                                 printf("sb %p: fnrdy %p != m %p\n",
  709                                     sb, sb->sb_fnrdy, m);
  710                                 goto fail;
  711                         }
  712                         fnrdy = m;
  713                 }
  714                 if (fnrdy) {
  715                         if (!(m->m_flags & M_NOTAVAIL)) {
  716                                 printf("sb %p: fnrdy %p, m %p is avail\n",
  717                                     sb, sb->sb_fnrdy, m);
  718                                 goto fail;
  719                         }
  720                 } else
  721                         acc += m->m_len;
  722                 ccc += m->m_len;
  723                 mbcnt += MSIZE;
  724                 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
  725                         mbcnt += m->m_ext.ext_size;
  726             }
  727         }
  728         if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) {
  729                 printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n",
  730                     acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt);
  731                 goto fail;
  732         }
  733         return;
  734 fail:
  735         panic("%s from %s:%u", __func__, file, line);
  736 }
  737 #endif
  738 
  739 /*
  740  * As above, except the mbuf chain begins a new record.
  741  */
  742 void
  743 sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0)
  744 {
  745         struct mbuf *m;
  746 
  747         SOCKBUF_LOCK_ASSERT(sb);
  748 
  749         if (m0 == NULL)
  750                 return;
  751         m_clrprotoflags(m0);
  752         /*
  753          * Put the first mbuf on the queue.  Note this permits zero length
  754          * records.
  755          */
  756         sballoc(sb, m0);
  757         SBLASTRECORDCHK(sb);
  758         SBLINKRECORD(sb, m0);
  759         sb->sb_mbtail = m0;
  760         m = m0->m_next;
  761         m0->m_next = 0;
  762         if (m && (m0->m_flags & M_EOR)) {
  763                 m0->m_flags &= ~M_EOR;
  764                 m->m_flags |= M_EOR;
  765         }
  766         /* always call sbcompress() so it can do SBLASTMBUFCHK() */
  767         sbcompress(sb, m, m0);
  768 }
  769 
  770 /*
  771  * As above, except the mbuf chain begins a new record.
  772  */
  773 void
  774 sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
  775 {
  776 
  777         SOCKBUF_LOCK(sb);
  778         sbappendrecord_locked(sb, m0);
  779         SOCKBUF_UNLOCK(sb);
  780 }
  781 
  782 /* Helper routine that appends data, control, and address to a sockbuf. */
  783 static int
  784 sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa,
  785     struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last)
  786 {
  787         struct mbuf *m, *n, *nlast;
  788 #if MSIZE <= 256
  789         if (asa->sa_len > MLEN)
  790                 return (0);
  791 #endif
  792         m = m_get(M_NOWAIT, MT_SONAME);
  793         if (m == NULL)
  794                 return (0);
  795         m->m_len = asa->sa_len;
  796         bcopy(asa, mtod(m, caddr_t), asa->sa_len);
  797         if (m0) {
  798                 m_clrprotoflags(m0);
  799                 m_tag_delete_chain(m0, NULL);
  800                 /*
  801                  * Clear some persistent info from pkthdr.
  802                  * We don't use m_demote(), because some netgraph consumers
  803                  * expect M_PKTHDR presence.
  804                  */
  805                 m0->m_pkthdr.rcvif = NULL;
  806                 m0->m_pkthdr.flowid = 0;
  807                 m0->m_pkthdr.csum_flags = 0;
  808                 m0->m_pkthdr.fibnum = 0;
  809                 m0->m_pkthdr.rsstype = 0;
  810         }
  811         if (ctrl_last)
  812                 ctrl_last->m_next = m0; /* concatenate data to control */
  813         else
  814                 control = m0;
  815         m->m_next = control;
  816         for (n = m; n->m_next != NULL; n = n->m_next)
  817                 sballoc(sb, n);
  818         sballoc(sb, n);
  819         nlast = n;
  820         SBLINKRECORD(sb, m);
  821 
  822         sb->sb_mbtail = nlast;
  823         SBLASTMBUFCHK(sb);
  824 
  825         SBLASTRECORDCHK(sb);
  826         return (1);
  827 }
  828 
  829 /*
  830  * Append address and data, and optionally, control (ancillary) data to the
  831  * receive queue of a socket.  If present, m0 must include a packet header
  832  * with total length.  Returns 0 if no space in sockbuf or insufficient
  833  * mbufs.
  834  */
  835 int
  836 sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
  837     struct mbuf *m0, struct mbuf *control)
  838 {
  839         struct mbuf *ctrl_last;
  840         int space = asa->sa_len;
  841 
  842         SOCKBUF_LOCK_ASSERT(sb);
  843 
  844         if (m0 && (m0->m_flags & M_PKTHDR) == 0)
  845                 panic("sbappendaddr_locked");
  846         if (m0)
  847                 space += m0->m_pkthdr.len;
  848         space += m_length(control, &ctrl_last);
  849 
  850         if (space > sbspace(sb))
  851                 return (0);
  852         return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last));
  853 }
  854 
  855 /*
  856  * Append address and data, and optionally, control (ancillary) data to the
  857  * receive queue of a socket.  If present, m0 must include a packet header
  858  * with total length.  Returns 0 if insufficient mbufs.  Does not validate space
  859  * on the receiving sockbuf.
  860  */
  861 int
  862 sbappendaddr_nospacecheck_locked(struct sockbuf *sb, const struct sockaddr *asa,
  863     struct mbuf *m0, struct mbuf *control)
  864 {
  865         struct mbuf *ctrl_last;
  866 
  867         SOCKBUF_LOCK_ASSERT(sb);
  868 
  869         ctrl_last = (control == NULL) ? NULL : m_last(control);
  870         return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last));
  871 }
  872 
  873 /*
  874  * Append address and data, and optionally, control (ancillary) data to the
  875  * receive queue of a socket.  If present, m0 must include a packet header
  876  * with total length.  Returns 0 if no space in sockbuf or insufficient
  877  * mbufs.
  878  */
  879 int
  880 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa,
  881     struct mbuf *m0, struct mbuf *control)
  882 {
  883         int retval;
  884 
  885         SOCKBUF_LOCK(sb);
  886         retval = sbappendaddr_locked(sb, asa, m0, control);
  887         SOCKBUF_UNLOCK(sb);
  888         return (retval);
  889 }
  890 
  891 int
  892 sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0,
  893     struct mbuf *control)
  894 {
  895         struct mbuf *m, *n, *mlast;
  896         int space;
  897 
  898         SOCKBUF_LOCK_ASSERT(sb);
  899 
  900         if (control == NULL)
  901                 panic("sbappendcontrol_locked");
  902         space = m_length(control, &n) + m_length(m0, NULL);
  903 
  904         if (space > sbspace(sb))
  905                 return (0);
  906         m_clrprotoflags(m0);
  907         n->m_next = m0;                 /* concatenate data to control */
  908 
  909         SBLASTRECORDCHK(sb);
  910 
  911         for (m = control; m->m_next; m = m->m_next)
  912                 sballoc(sb, m);
  913         sballoc(sb, m);
  914         mlast = m;
  915         SBLINKRECORD(sb, control);
  916 
  917         sb->sb_mbtail = mlast;
  918         SBLASTMBUFCHK(sb);
  919 
  920         SBLASTRECORDCHK(sb);
  921         return (1);
  922 }
  923 
  924 int
  925 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
  926 {
  927         int retval;
  928 
  929         SOCKBUF_LOCK(sb);
  930         retval = sbappendcontrol_locked(sb, m0, control);
  931         SOCKBUF_UNLOCK(sb);
  932         return (retval);
  933 }
  934 
  935 /*
  936  * Append the data in mbuf chain (m) into the socket buffer sb following mbuf
  937  * (n).  If (n) is NULL, the buffer is presumed empty.
  938  *
  939  * When the data is compressed, mbufs in the chain may be handled in one of
  940  * three ways:
  941  *
  942  * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no
  943  *     record boundary, and no change in data type).
  944  *
  945  * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into
  946  *     an mbuf already in the socket buffer.  This can occur if an
  947  *     appropriate mbuf exists, there is room, both mbufs are not marked as
  948  *     not ready, and no merging of data types will occur.
  949  *
  950  * (3) The mbuf may be appended to the end of the existing mbuf chain.
  951  *
  952  * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as
  953  * end-of-record.
  954  */
  955 void
  956 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
  957 {
  958         int eor = 0;
  959         struct mbuf *o;
  960 
  961         SOCKBUF_LOCK_ASSERT(sb);
  962 
  963         while (m) {
  964                 eor |= m->m_flags & M_EOR;
  965                 if (m->m_len == 0 &&
  966                     (eor == 0 ||
  967                      (((o = m->m_next) || (o = n)) &&
  968                       o->m_type == m->m_type))) {
  969                         if (sb->sb_lastrecord == m)
  970                                 sb->sb_lastrecord = m->m_next;
  971                         m = m_free(m);
  972                         continue;
  973                 }
  974                 if (n && (n->m_flags & M_EOR) == 0 &&
  975                     M_WRITABLE(n) &&
  976                     ((sb->sb_flags & SB_NOCOALESCE) == 0) &&
  977                     !(m->m_flags & M_NOTREADY) &&
  978                     !(n->m_flags & M_NOTREADY) &&
  979                     m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
  980                     m->m_len <= M_TRAILINGSPACE(n) &&
  981                     n->m_type == m->m_type) {
  982                         bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
  983                             (unsigned)m->m_len);
  984                         n->m_len += m->m_len;
  985                         sb->sb_ccc += m->m_len;
  986                         if (sb->sb_fnrdy == NULL)
  987                                 sb->sb_acc += m->m_len;
  988                         if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
  989                                 /* XXX: Probably don't need.*/
  990                                 sb->sb_ctl += m->m_len;
  991                         m = m_free(m);
  992                         continue;
  993                 }
  994                 if (n)
  995                         n->m_next = m;
  996                 else
  997                         sb->sb_mb = m;
  998                 sb->sb_mbtail = m;
  999                 sballoc(sb, m);
 1000                 n = m;
 1001                 m->m_flags &= ~M_EOR;
 1002                 m = m->m_next;
 1003                 n->m_next = 0;
 1004         }
 1005         if (eor) {
 1006                 KASSERT(n != NULL, ("sbcompress: eor && n == NULL"));
 1007                 n->m_flags |= eor;
 1008         }
 1009         SBLASTMBUFCHK(sb);
 1010 }
 1011 
 1012 /*
 1013  * Free all mbufs in a sockbuf.  Check that all resources are reclaimed.
 1014  */
 1015 static void
 1016 sbflush_internal(struct sockbuf *sb)
 1017 {
 1018 
 1019         while (sb->sb_mbcnt) {
 1020                 /*
 1021                  * Don't call sbcut(sb, 0) if the leading mbuf is non-empty:
 1022                  * we would loop forever. Panic instead.
 1023                  */
 1024                 if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len))
 1025                         break;
 1026                 m_freem(sbcut_internal(sb, (int)sb->sb_ccc));
 1027         }
 1028         KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0,
 1029             ("%s: ccc %u mb %p mbcnt %u", __func__,
 1030             sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt));
 1031 }
 1032 
 1033 void
 1034 sbflush_locked(struct sockbuf *sb)
 1035 {
 1036 
 1037         SOCKBUF_LOCK_ASSERT(sb);
 1038         sbflush_internal(sb);
 1039 }
 1040 
 1041 void
 1042 sbflush(struct sockbuf *sb)
 1043 {
 1044 
 1045         SOCKBUF_LOCK(sb);
 1046         sbflush_locked(sb);
 1047         SOCKBUF_UNLOCK(sb);
 1048 }
 1049 
 1050 /*
 1051  * Cut data from (the front of) a sockbuf.
 1052  */
 1053 static struct mbuf *
 1054 sbcut_internal(struct sockbuf *sb, int len)
 1055 {
 1056         struct mbuf *m, *next, *mfree;
 1057 
 1058         next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
 1059         mfree = NULL;
 1060 
 1061         while (len > 0) {
 1062                 if (m == NULL) {
 1063                         KASSERT(next, ("%s: no next, len %d", __func__, len));
 1064                         m = next;
 1065                         next = m->m_nextpkt;
 1066                 }
 1067                 if (m->m_len > len) {
 1068                         KASSERT(!(m->m_flags & M_NOTAVAIL),
 1069                             ("%s: m %p M_NOTAVAIL", __func__, m));
 1070                         m->m_len -= len;
 1071                         m->m_data += len;
 1072                         sb->sb_ccc -= len;
 1073                         sb->sb_acc -= len;
 1074                         if (sb->sb_sndptroff != 0)
 1075                                 sb->sb_sndptroff -= len;
 1076                         if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
 1077                                 sb->sb_ctl -= len;
 1078                         break;
 1079                 }
 1080                 len -= m->m_len;
 1081                 sbfree(sb, m);
 1082                 /*
 1083                  * Do not put M_NOTREADY buffers to the free list, they
 1084                  * are referenced from outside.
 1085                  */
 1086                 if (m->m_flags & M_NOTREADY)
 1087                         m = m->m_next;
 1088                 else {
 1089                         struct mbuf *n;
 1090 
 1091                         n = m->m_next;
 1092                         m->m_next = mfree;
 1093                         mfree = m;
 1094                         m = n;
 1095                 }
 1096         }
 1097         /*
 1098          * Free any zero-length mbufs from the buffer.
 1099          * For SOCK_DGRAM sockets such mbufs represent empty records.
 1100          * XXX: For SOCK_STREAM sockets such mbufs can appear in the buffer,
 1101          * when sosend_generic() needs to send only control data.
 1102          */
 1103         while (m && m->m_len == 0) {
 1104                 struct mbuf *n;
 1105 
 1106                 sbfree(sb, m);
 1107                 n = m->m_next;
 1108                 m->m_next = mfree;
 1109                 mfree = m;
 1110                 m = n;
 1111         }
 1112         if (m) {
 1113                 sb->sb_mb = m;
 1114                 m->m_nextpkt = next;
 1115         } else
 1116                 sb->sb_mb = next;
 1117         /*
 1118          * First part is an inline SB_EMPTY_FIXUP().  Second part makes sure
 1119          * sb_lastrecord is up-to-date if we dropped part of the last record.
 1120          */
 1121         m = sb->sb_mb;
 1122         if (m == NULL) {
 1123                 sb->sb_mbtail = NULL;
 1124                 sb->sb_lastrecord = NULL;
 1125         } else if (m->m_nextpkt == NULL) {
 1126                 sb->sb_lastrecord = m;
 1127         }
 1128 
 1129         return (mfree);
 1130 }
 1131 
 1132 /*
 1133  * Drop data from (the front of) a sockbuf.
 1134  */
 1135 void
 1136 sbdrop_locked(struct sockbuf *sb, int len)
 1137 {
 1138 
 1139         SOCKBUF_LOCK_ASSERT(sb);
 1140         m_freem(sbcut_internal(sb, len));
 1141 }
 1142 
 1143 /*
 1144  * Drop data from (the front of) a sockbuf,
 1145  * and return it to caller.
 1146  */
 1147 struct mbuf *
 1148 sbcut_locked(struct sockbuf *sb, int len)
 1149 {
 1150 
 1151         SOCKBUF_LOCK_ASSERT(sb);
 1152         return (sbcut_internal(sb, len));
 1153 }
 1154 
 1155 void
 1156 sbdrop(struct sockbuf *sb, int len)
 1157 {
 1158         struct mbuf *mfree;
 1159 
 1160         SOCKBUF_LOCK(sb);
 1161         mfree = sbcut_internal(sb, len);
 1162         SOCKBUF_UNLOCK(sb);
 1163 
 1164         m_freem(mfree);
 1165 }
 1166 
 1167 /*
 1168  * Maintain a pointer and offset pair into the socket buffer mbuf chain to
 1169  * avoid traversal of the entire socket buffer for larger offsets.
 1170  */
 1171 struct mbuf *
 1172 sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff)
 1173 {
 1174         struct mbuf *m, *ret;
 1175 
 1176         KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
 1177         KASSERT(off + len <= sb->sb_acc, ("%s: beyond sb", __func__));
 1178         KASSERT(sb->sb_sndptroff <= sb->sb_acc, ("%s: sndptroff broken", __func__));
 1179 
 1180         /*
 1181          * Is off below stored offset? Happens on retransmits.
 1182          * Just return, we can't help here.
 1183          */
 1184         if (sb->sb_sndptroff > off) {
 1185                 *moff = off;
 1186                 return (sb->sb_mb);
 1187         }
 1188 
 1189         /* Return closest mbuf in chain for current offset. */
 1190         *moff = off - sb->sb_sndptroff;
 1191         m = ret = sb->sb_sndptr ? sb->sb_sndptr : sb->sb_mb;
 1192         if (*moff == m->m_len) {
 1193                 *moff = 0;
 1194                 sb->sb_sndptroff += m->m_len;
 1195                 m = ret = m->m_next;
 1196                 KASSERT(ret->m_len > 0,
 1197                     ("mbuf %p in sockbuf %p chain has no valid data", ret, sb));
 1198         }
 1199 
 1200         /* Advance by len to be as close as possible for the next transmit. */
 1201         for (off = off - sb->sb_sndptroff + len - 1;
 1202              off > 0 && m != NULL && off >= m->m_len;
 1203              m = m->m_next) {
 1204                 sb->sb_sndptroff += m->m_len;
 1205                 off -= m->m_len;
 1206         }
 1207         if (off > 0 && m == NULL)
 1208                 panic("%s: sockbuf %p and mbuf %p clashing", __func__, sb, ret);
 1209         sb->sb_sndptr = m;
 1210 
 1211         return (ret);
 1212 }
 1213 
 1214 /*
 1215  * Return the first mbuf and the mbuf data offset for the provided
 1216  * send offset without changing the "sb_sndptroff" field.
 1217  */
 1218 struct mbuf *
 1219 sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff)
 1220 {
 1221         struct mbuf *m;
 1222 
 1223         KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
 1224 
 1225         /*
 1226          * If the "off" is below the stored offset, which happens on
 1227          * retransmits, just use "sb_mb":
 1228          */
 1229         if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) {
 1230                 m = sb->sb_mb;
 1231         } else {
 1232                 m = sb->sb_sndptr;
 1233                 off -= sb->sb_sndptroff;
 1234         }
 1235         while (off > 0 && m != NULL) {
 1236                 if (off < m->m_len)
 1237                         break;
 1238                 off -= m->m_len;
 1239                 m = m->m_next;
 1240         }
 1241         *moff = off;
 1242         return (m);
 1243 }
 1244 
 1245 /*
 1246  * Drop a record off the front of a sockbuf and move the next record to the
 1247  * front.
 1248  */
 1249 void
 1250 sbdroprecord_locked(struct sockbuf *sb)
 1251 {
 1252         struct mbuf *m;
 1253 
 1254         SOCKBUF_LOCK_ASSERT(sb);
 1255 
 1256         m = sb->sb_mb;
 1257         if (m) {
 1258                 sb->sb_mb = m->m_nextpkt;
 1259                 do {
 1260                         sbfree(sb, m);
 1261                         m = m_free(m);
 1262                 } while (m);
 1263         }
 1264         SB_EMPTY_FIXUP(sb);
 1265 }
 1266 
 1267 /*
 1268  * Drop a record off the front of a sockbuf and move the next record to the
 1269  * front.
 1270  */
 1271 void
 1272 sbdroprecord(struct sockbuf *sb)
 1273 {
 1274 
 1275         SOCKBUF_LOCK(sb);
 1276         sbdroprecord_locked(sb);
 1277         SOCKBUF_UNLOCK(sb);
 1278 }
 1279 
 1280 /*
 1281  * Create a "control" mbuf containing the specified data with the specified
 1282  * type for presentation on a socket buffer.
 1283  */
 1284 struct mbuf *
 1285 sbcreatecontrol(caddr_t p, int size, int type, int level)
 1286 {
 1287         struct cmsghdr *cp;
 1288         struct mbuf *m;
 1289 
 1290         if (CMSG_SPACE((u_int)size) > MCLBYTES)
 1291                 return ((struct mbuf *) NULL);
 1292         if (CMSG_SPACE((u_int)size) > MLEN)
 1293                 m = m_getcl(M_NOWAIT, MT_CONTROL, 0);
 1294         else
 1295                 m = m_get(M_NOWAIT, MT_CONTROL);
 1296         if (m == NULL)
 1297                 return ((struct mbuf *) NULL);
 1298         cp = mtod(m, struct cmsghdr *);
 1299         m->m_len = 0;
 1300         KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m),
 1301             ("sbcreatecontrol: short mbuf"));
 1302         /*
 1303          * Don't leave the padding between the msg header and the
 1304          * cmsg data and the padding after the cmsg data un-initialized.
 1305          */
 1306         bzero(cp, CMSG_SPACE((u_int)size));
 1307         if (p != NULL)
 1308                 (void)memcpy(CMSG_DATA(cp), p, size);
 1309         m->m_len = CMSG_SPACE(size);
 1310         cp->cmsg_len = CMSG_LEN(size);
 1311         cp->cmsg_level = level;
 1312         cp->cmsg_type = type;
 1313         return (m);
 1314 }
 1315 
 1316 /*
 1317  * This does the same for socket buffers that sotoxsocket does for sockets:
 1318  * generate an user-format data structure describing the socket buffer.  Note
 1319  * that the xsockbuf structure, since it is always embedded in a socket, does
 1320  * not include a self pointer nor a length.  We make this entry point public
 1321  * in case some other mechanism needs it.
 1322  */
 1323 void
 1324 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
 1325 {
 1326 
 1327         xsb->sb_cc = sb->sb_ccc;
 1328         xsb->sb_hiwat = sb->sb_hiwat;
 1329         xsb->sb_mbcnt = sb->sb_mbcnt;
 1330         xsb->sb_mcnt = sb->sb_mcnt;     
 1331         xsb->sb_ccnt = sb->sb_ccnt;
 1332         xsb->sb_mbmax = sb->sb_mbmax;
 1333         xsb->sb_lowat = sb->sb_lowat;
 1334         xsb->sb_flags = sb->sb_flags;
 1335         xsb->sb_timeo = sb->sb_timeo;
 1336 }
 1337 
 1338 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
 1339 static int dummy;
 1340 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
 1341 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW,
 1342     &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size");
 1343 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
 1344     &sb_efficiency, 0, "Socket buffer size waste factor");

Cache object: 7e2cdcbcc26a0d3244ad4acc9227165e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.