The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_socket2.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: uipc_socket2.c,v 1.142 2022/10/26 23:38:09 riastradh Exp $     */
    2 
    3 /*-
    4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26  * POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 /*
   30  * Copyright (c) 1982, 1986, 1988, 1990, 1993
   31  *      The Regents of the University of California.  All rights reserved.
   32  *
   33  * Redistribution and use in source and binary forms, with or without
   34  * modification, are permitted provided that the following conditions
   35  * are met:
   36  * 1. Redistributions of source code must retain the above copyright
   37  *    notice, this list of conditions and the following disclaimer.
   38  * 2. Redistributions in binary form must reproduce the above copyright
   39  *    notice, this list of conditions and the following disclaimer in the
   40  *    documentation and/or other materials provided with the distribution.
   41  * 3. Neither the name of the University nor the names of its contributors
   42  *    may be used to endorse or promote products derived from this software
   43  *    without specific prior written permission.
   44  *
   45  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   55  * SUCH DAMAGE.
   56  *
   57  *      @(#)uipc_socket2.c      8.2 (Berkeley) 2/14/95
   58  */
   59 
   60 #include <sys/cdefs.h>
   61 __KERNEL_RCSID(0, "$NetBSD: uipc_socket2.c,v 1.142 2022/10/26 23:38:09 riastradh Exp $");
   62 
   63 #ifdef _KERNEL_OPT
   64 #include "opt_ddb.h"
   65 #include "opt_inet.h"
   66 #include "opt_mbuftrace.h"
   67 #include "opt_sb_max.h"
   68 #endif
   69 
   70 #include <sys/param.h>
   71 #include <sys/systm.h>
   72 #include <sys/proc.h>
   73 #include <sys/file.h>
   74 #include <sys/buf.h>
   75 #include <sys/mbuf.h>
   76 #include <sys/protosw.h>
   77 #include <sys/domain.h>
   78 #include <sys/poll.h>
   79 #include <sys/socket.h>
   80 #include <sys/socketvar.h>
   81 #include <sys/signalvar.h>
   82 #include <sys/kauth.h>
   83 #include <sys/pool.h>
   84 #include <sys/uidinfo.h>
   85 
   86 #ifdef DDB
   87 #include <sys/filedesc.h>
   88 #include <ddb/db_active.h>
   89 #endif
   90 
   91 /*
   92  * Primitive routines for operating on sockets and socket buffers.
   93  *
   94  * Connection life-cycle:
   95  *
   96  *      Normal sequence from the active (originating) side:
   97  *
   98  *      - soisconnecting() is called during processing of connect() call,
   99  *      - resulting in an eventual call to soisconnected() if/when the
  100  *        connection is established.
  101  *
  102  *      When the connection is torn down during processing of disconnect():
  103  *
  104  *      - soisdisconnecting() is called and,
  105  *      - soisdisconnected() is called when the connection to the peer
  106  *        is totally severed.
  107  *
  108  *      The semantics of these routines are such that connectionless protocols
  109  *      can call soisconnected() and soisdisconnected() only, bypassing the
  110  *      in-progress calls when setting up a ``connection'' takes no time.
  111  *
  112  *      From the passive side, a socket is created with two queues of sockets:
  113  *
  114  *      - so_q0 (0) for partial connections (i.e. connections in progress)
  115  *      - so_q (1) for connections already made and awaiting user acceptance.
  116  *
  117  *      As a protocol is preparing incoming connections, it creates a socket
  118  *      structure queued on so_q0 by calling sonewconn().  When the connection
  119  *      is established, soisconnected() is called, and transfers the
  120  *      socket structure to so_q, making it available to accept().
  121  *
  122  *      If a socket is closed with sockets on either so_q0 or so_q, these
  123  *      sockets are dropped.
  124  *
  125  * Locking rules and assumptions:
  126  *
  127  * o socket::so_lock can change on the fly.  The low level routines used
  128  *   to lock sockets are aware of this.  When so_lock is acquired, the
  129  *   routine locking must check to see if so_lock still points to the
  130  *   lock that was acquired.  If so_lock has changed in the meantime, the
  131  *   now irrelevant lock that was acquired must be dropped and the lock
  132  *   operation retried.  Although not proven here, this is completely safe
  133  *   on a multiprocessor system, even with relaxed memory ordering, given
  134  *   the next two rules:
  135  *
  136  * o In order to mutate so_lock, the lock pointed to by the current value
  137  *   of so_lock must be held: i.e., the socket must be held locked by the
  138  *   changing thread.  The thread must issue membar_release() to prevent
  139  *   memory accesses being reordered, and can set so_lock to the desired
  140  *   value.  If the lock pointed to by the new value of so_lock is not
  141  *   held by the changing thread, the socket must then be considered
  142  *   unlocked.
  143  *
  144  * o If so_lock is mutated, and the previous lock referred to by so_lock
  145  *   could still be visible to other threads in the system (e.g. via file
  146  *   descriptor or protocol-internal reference), then the old lock must
  147  *   remain valid until the socket and/or protocol control block has been
  148  *   torn down.
  149  *
  150  * o If a socket has a non-NULL so_head value (i.e. is in the process of
  151  *   connecting), then locking the socket must also lock the socket pointed
  152  *   to by so_head: their lock pointers must match.
  153  *
  154  * o If a socket has connections in progress (so_q, so_q0 not empty) then
  155  *   locking the socket must also lock the sockets attached to both queues.
  156  *   Again, their lock pointers must match.
  157  *
  158  * o Beyond the initial lock assignment in socreate(), assigning locks to
  159  *   sockets is the responsibility of the individual protocols / protocol
  160  *   domains.
  161  */
  162 
  163 static pool_cache_t     socket_cache;
  164 u_long                  sb_max = SB_MAX;/* maximum socket buffer size */
  165 static u_long           sb_max_adj;     /* adjusted sb_max */
  166 
  167 void
  168 soisconnecting(struct socket *so)
  169 {
  170 
  171         KASSERT(solocked(so));
  172 
  173         so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
  174         so->so_state |= SS_ISCONNECTING;
  175 }
  176 
  177 void
  178 soisconnected(struct socket *so)
  179 {
  180         struct socket   *head;
  181 
  182         head = so->so_head;
  183 
  184         KASSERT(solocked(so));
  185         KASSERT(head == NULL || solocked2(so, head));
  186 
  187         so->so_state &= ~(SS_ISCONNECTING | SS_ISDISCONNECTING);
  188         so->so_state |= SS_ISCONNECTED;
  189         if (head && so->so_onq == &head->so_q0) {
  190                 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
  191                         /*
  192                          * Re-enqueue and wake up any waiters, e.g.
  193                          * processes blocking on accept().
  194                          */
  195                         soqremque(so, 0);
  196                         soqinsque(head, so, 1);
  197                         sorwakeup(head);
  198                         cv_broadcast(&head->so_cv);
  199                 } else {
  200                         so->so_upcall =
  201                             head->so_accf->so_accept_filter->accf_callback;
  202                         so->so_upcallarg = head->so_accf->so_accept_filter_arg;
  203                         so->so_rcv.sb_flags |= SB_UPCALL;
  204                         so->so_options &= ~SO_ACCEPTFILTER;
  205                         (*so->so_upcall)(so, so->so_upcallarg,
  206                                          POLLIN|POLLRDNORM, M_DONTWAIT);
  207                 }
  208         } else {
  209                 cv_broadcast(&so->so_cv);
  210                 sorwakeup(so);
  211                 sowwakeup(so);
  212         }
  213 }
  214 
  215 void
  216 soisdisconnecting(struct socket *so)
  217 {
  218 
  219         KASSERT(solocked(so));
  220 
  221         so->so_state &= ~SS_ISCONNECTING;
  222         so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE);
  223         cv_broadcast(&so->so_cv);
  224         sowwakeup(so);
  225         sorwakeup(so);
  226 }
  227 
  228 void
  229 soisdisconnected(struct socket *so)
  230 {
  231 
  232         KASSERT(solocked(so));
  233 
  234         so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
  235         so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
  236         cv_broadcast(&so->so_cv);
  237         sowwakeup(so);
  238         sorwakeup(so);
  239 }
  240 
  241 void
  242 soinit2(void)
  243 {
  244 
  245         socket_cache = pool_cache_init(sizeof(struct socket), 0, 0, 0,
  246             "socket", NULL, IPL_SOFTNET, NULL, NULL, NULL);
  247 }
  248 
  249 /*
  250  * sonewconn: accept a new connection.
  251  *
  252  * When an attempt at a new connection is noted on a socket which accepts
  253  * connections, sonewconn(9) is called.  If the connection is possible
  254  * (subject to space constraints, etc) then we allocate a new structure,
  255  * properly linked into the data structure of the original socket.
  256  *
  257  * => If 'soready' is true, then socket will become ready for accept() i.e.
  258  *    inserted into the so_q queue, SS_ISCONNECTED set and waiters awoken.
  259  * => May be called from soft-interrupt context.
  260  * => Listening socket should be locked.
  261  * => Returns the new socket locked.
  262  */
  263 struct socket *
  264 sonewconn(struct socket *head, bool soready)
  265 {
  266         struct socket *so;
  267         int soqueue, error;
  268 
  269         KASSERT(solocked(head));
  270 
  271         if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2) {
  272                 /*
  273                  * Listen queue overflow.  If there is an accept filter
  274                  * active, pass through the oldest cxn it's handling.
  275                  */
  276                 if (head->so_accf == NULL) {
  277                         return NULL;
  278                 } else {
  279                         struct socket *so2, *next;
  280 
  281                         /* Pass the oldest connection waiting in the
  282                            accept filter */
  283                         for (so2 = TAILQ_FIRST(&head->so_q0);
  284                              so2 != NULL; so2 = next) {
  285                                 next = TAILQ_NEXT(so2, so_qe);
  286                                 if (so2->so_upcall == NULL) {
  287                                         continue;
  288                                 }
  289                                 so2->so_upcall = NULL;
  290                                 so2->so_upcallarg = NULL;
  291                                 so2->so_options &= ~SO_ACCEPTFILTER;
  292                                 so2->so_rcv.sb_flags &= ~SB_UPCALL;
  293                                 soisconnected(so2);
  294                                 break;
  295                         }
  296 
  297                         /* If nothing was nudged out of the acept filter, bail
  298                          * out; otherwise proceed allocating the socket. */
  299                         if (so2 == NULL) {
  300                                 return NULL;
  301                         }
  302                 }
  303         }
  304         if ((head->so_options & SO_ACCEPTFILTER) != 0) {
  305                 soready = false;
  306         }
  307         soqueue = soready ? 1 : 0;
  308 
  309         if ((so = soget(false)) == NULL) {
  310                 return NULL;
  311         }
  312         so->so_type = head->so_type;
  313         so->so_options = head->so_options & ~SO_ACCEPTCONN;
  314         so->so_linger = head->so_linger;
  315         so->so_state = head->so_state | SS_NOFDREF;
  316         so->so_proto = head->so_proto;
  317         so->so_timeo = head->so_timeo;
  318         so->so_pgid = head->so_pgid;
  319         so->so_send = head->so_send;
  320         so->so_receive = head->so_receive;
  321         so->so_uidinfo = head->so_uidinfo;
  322         so->so_egid = head->so_egid;
  323         so->so_cpid = head->so_cpid;
  324 
  325         /*
  326          * Share the lock with the listening-socket, it may get unshared
  327          * once the connection is complete.
  328          *
  329          * so_lock is stable while we hold the socket locked, so no
  330          * need for atomic_load_* here.
  331          */
  332         mutex_obj_hold(head->so_lock);
  333         so->so_lock = head->so_lock;
  334 
  335         /*
  336          * Reserve the space for socket buffers.
  337          */
  338 #ifdef MBUFTRACE
  339         so->so_mowner = head->so_mowner;
  340         so->so_rcv.sb_mowner = head->so_rcv.sb_mowner;
  341         so->so_snd.sb_mowner = head->so_snd.sb_mowner;
  342 #endif
  343         if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
  344                 goto out;
  345         }
  346         so->so_snd.sb_lowat = head->so_snd.sb_lowat;
  347         so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
  348         so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
  349         so->so_snd.sb_timeo = head->so_snd.sb_timeo;
  350         so->so_rcv.sb_flags |= head->so_rcv.sb_flags & (SB_AUTOSIZE | SB_ASYNC);
  351         so->so_snd.sb_flags |= head->so_snd.sb_flags & (SB_AUTOSIZE | SB_ASYNC);
  352 
  353         /*
  354          * Finally, perform the protocol attach.  Note: a new socket
  355          * lock may be assigned at this point (if so, it will be held).
  356          */
  357         error = (*so->so_proto->pr_usrreqs->pr_attach)(so, 0);
  358         if (error) {
  359 out:
  360                 KASSERT(solocked(so));
  361                 KASSERT(so->so_accf == NULL);
  362                 soput(so);
  363 
  364                 /* Note: the listening socket shall stay locked. */
  365                 KASSERT(solocked(head));
  366                 return NULL;
  367         }
  368         KASSERT(solocked2(head, so));
  369 
  370         /*
  371          * Insert into the queue.  If ready, update the connection status
  372          * and wake up any waiters, e.g. processes blocking on accept().
  373          */
  374         soqinsque(head, so, soqueue);
  375         if (soready) {
  376                 so->so_state |= SS_ISCONNECTED;
  377                 sorwakeup(head);
  378                 cv_broadcast(&head->so_cv);
  379         }
  380         return so;
  381 }
  382 
  383 struct socket *
  384 soget(bool waitok)
  385 {
  386         struct socket *so;
  387 
  388         so = pool_cache_get(socket_cache, (waitok ? PR_WAITOK : PR_NOWAIT));
  389         if (__predict_false(so == NULL))
  390                 return (NULL);
  391         memset(so, 0, sizeof(*so));
  392         TAILQ_INIT(&so->so_q0);
  393         TAILQ_INIT(&so->so_q);
  394         cv_init(&so->so_cv, "socket");
  395         cv_init(&so->so_rcv.sb_cv, "netio");
  396         cv_init(&so->so_snd.sb_cv, "netio");
  397         selinit(&so->so_rcv.sb_sel);
  398         selinit(&so->so_snd.sb_sel);
  399         so->so_rcv.sb_so = so;
  400         so->so_snd.sb_so = so;
  401         return so;
  402 }
  403 
  404 void
  405 soput(struct socket *so)
  406 {
  407 
  408         KASSERT(!cv_has_waiters(&so->so_cv));
  409         KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv));
  410         KASSERT(!cv_has_waiters(&so->so_snd.sb_cv));
  411         seldestroy(&so->so_rcv.sb_sel);
  412         seldestroy(&so->so_snd.sb_sel);
  413         mutex_obj_free(so->so_lock);
  414         cv_destroy(&so->so_cv);
  415         cv_destroy(&so->so_rcv.sb_cv);
  416         cv_destroy(&so->so_snd.sb_cv);
  417         pool_cache_put(socket_cache, so);
  418 }
  419 
  420 /*
  421  * soqinsque: insert socket of a new connection into the specified
  422  * accept queue of the listening socket (head).
  423  *
  424  *      q = 0: queue of partial connections
  425  *      q = 1: queue of incoming connections
  426  */
  427 void
  428 soqinsque(struct socket *head, struct socket *so, int q)
  429 {
  430         KASSERT(q == 0 || q == 1);
  431         KASSERT(solocked2(head, so));
  432         KASSERT(so->so_onq == NULL);
  433         KASSERT(so->so_head == NULL);
  434 
  435         so->so_head = head;
  436         if (q == 0) {
  437                 head->so_q0len++;
  438                 so->so_onq = &head->so_q0;
  439         } else {
  440                 head->so_qlen++;
  441                 so->so_onq = &head->so_q;
  442         }
  443         TAILQ_INSERT_TAIL(so->so_onq, so, so_qe);
  444 }
  445 
  446 /*
  447  * soqremque: remove socket from the specified queue.
  448  *
  449  * => Returns true if socket was removed from the specified queue.
  450  * => False if socket was not removed (because it was in other queue).
  451  */
  452 bool
  453 soqremque(struct socket *so, int q)
  454 {
  455         struct socket *head = so->so_head;
  456 
  457         KASSERT(q == 0 || q == 1);
  458         KASSERT(solocked(so));
  459         KASSERT(so->so_onq != NULL);
  460         KASSERT(head != NULL);
  461 
  462         if (q == 0) {
  463                 if (so->so_onq != &head->so_q0)
  464                         return false;
  465                 head->so_q0len--;
  466         } else {
  467                 if (so->so_onq != &head->so_q)
  468                         return false;
  469                 head->so_qlen--;
  470         }
  471         KASSERT(solocked2(so, head));
  472         TAILQ_REMOVE(so->so_onq, so, so_qe);
  473         so->so_onq = NULL;
  474         so->so_head = NULL;
  475         return true;
  476 }
  477 
  478 /*
  479  * socantsendmore: indicates that no more data will be sent on the
  480  * socket; it would normally be applied to a socket when the user
  481  * informs the system that no more data is to be sent, by the protocol
  482  * code (in case pr_shutdown()).
  483  */
  484 void
  485 socantsendmore(struct socket *so)
  486 {
  487         KASSERT(solocked(so));
  488 
  489         so->so_state |= SS_CANTSENDMORE;
  490         sowwakeup(so);
  491 }
  492 
  493 /*
  494  * socantrcvmore(): indicates that no more data will be received and
  495  * will normally be applied to the socket by a protocol when it detects
  496  * that the peer will send no more data.  Data queued for reading in
  497  * the socket may yet be read.
  498  */
  499 void
  500 socantrcvmore(struct socket *so)
  501 {
  502         KASSERT(solocked(so));
  503 
  504         so->so_state |= SS_CANTRCVMORE;
  505         sorwakeup(so);
  506 }
  507 
  508 /*
  509  * soroverflow(): indicates that data was attempted to be sent
  510  * but the receiving buffer overflowed.
  511  */
  512 void
  513 soroverflow(struct socket *so)
  514 {
  515         KASSERT(solocked(so));
  516 
  517         so->so_rcv.sb_overflowed++;
  518         if (so->so_options & SO_RERROR)  {
  519                 so->so_rerror = ENOBUFS;
  520                 sorwakeup(so);
  521         }
  522 }
  523 
  524 /*
  525  * Wait for data to arrive at/drain from a socket buffer.
  526  */
  527 int
  528 sbwait(struct sockbuf *sb)
  529 {
  530         struct socket *so;
  531         kmutex_t *lock;
  532         int error;
  533 
  534         so = sb->sb_so;
  535 
  536         KASSERT(solocked(so));
  537 
  538         sb->sb_flags |= SB_NOTIFY;
  539         lock = so->so_lock;
  540         if ((sb->sb_flags & SB_NOINTR) != 0)
  541                 error = cv_timedwait(&sb->sb_cv, lock, sb->sb_timeo);
  542         else
  543                 error = cv_timedwait_sig(&sb->sb_cv, lock, sb->sb_timeo);
  544         if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
  545                 solockretry(so, lock);
  546         return error;
  547 }
  548 
  549 /*
  550  * Wakeup processes waiting on a socket buffer.
  551  * Do asynchronous notification via SIGIO
  552  * if the socket buffer has the SB_ASYNC flag set.
  553  */
  554 void
  555 sowakeup(struct socket *so, struct sockbuf *sb, int code)
  556 {
  557         int band;
  558 
  559         KASSERT(solocked(so));
  560         KASSERT(sb->sb_so == so);
  561 
  562         switch (code) {
  563         case POLL_IN:
  564                 band = POLLIN|POLLRDNORM;
  565                 break;
  566 
  567         case POLL_OUT:
  568                 band = POLLOUT|POLLWRNORM;
  569                 break;
  570 
  571         case POLL_HUP:
  572                 band = POLLHUP;
  573                 break;
  574 
  575         default:
  576                 band = 0;
  577 #ifdef DIAGNOSTIC
  578                 printf("bad siginfo code %d in socket notification.\n", code);
  579 #endif 
  580                 break;
  581         }
  582 
  583         sb->sb_flags &= ~SB_NOTIFY;
  584         selnotify(&sb->sb_sel, band, NOTE_SUBMIT);
  585         cv_broadcast(&sb->sb_cv);
  586         if (sb->sb_flags & SB_ASYNC)
  587                 fownsignal(so->so_pgid, SIGIO, code, band, so);
  588         if (sb->sb_flags & SB_UPCALL)
  589                 (*so->so_upcall)(so, so->so_upcallarg, band, M_DONTWAIT);
  590 }
  591 
  592 /*
  593  * Reset a socket's lock pointer.  Wake all threads waiting on the
  594  * socket's condition variables so that they can restart their waits
  595  * using the new lock.  The existing lock must be held.
  596  *
  597  * Caller must have issued membar_release before this.
  598  */
  599 void
  600 solockreset(struct socket *so, kmutex_t *lock)
  601 {
  602 
  603         KASSERT(solocked(so));
  604 
  605         so->so_lock = lock;
  606         cv_broadcast(&so->so_snd.sb_cv);
  607         cv_broadcast(&so->so_rcv.sb_cv);
  608         cv_broadcast(&so->so_cv);
  609 }
  610 
  611 /*
  612  * Socket buffer (struct sockbuf) utility routines.
  613  *
  614  * Each socket contains two socket buffers: one for sending data and
  615  * one for receiving data.  Each buffer contains a queue of mbufs,
  616  * information about the number of mbufs and amount of data in the
  617  * queue, and other fields allowing poll() statements and notification
  618  * on data availability to be implemented.
  619  *
  620  * Data stored in a socket buffer is maintained as a list of records.
  621  * Each record is a list of mbufs chained together with the m_next
  622  * field.  Records are chained together with the m_nextpkt field. The upper
  623  * level routine soreceive() expects the following conventions to be
  624  * observed when placing information in the receive buffer:
  625  *
  626  * 1. If the protocol requires each message be preceded by the sender's
  627  *    name, then a record containing that name must be present before
  628  *    any associated data (mbuf's must be of type MT_SONAME).
  629  * 2. If the protocol supports the exchange of ``access rights'' (really
  630  *    just additional data associated with the message), and there are
  631  *    ``rights'' to be received, then a record containing this data
  632  *    should be present (mbuf's must be of type MT_CONTROL).
  633  * 3. If a name or rights record exists, then it must be followed by
  634  *    a data record, perhaps of zero length.
  635  *
  636  * Before using a new socket structure it is first necessary to reserve
  637  * buffer space to the socket, by calling sbreserve().  This should commit
  638  * some of the available buffer space in the system buffer pool for the
  639  * socket (currently, it does nothing but enforce limits).  The space
  640  * should be released by calling sbrelease() when the socket is destroyed.
  641  */
  642 
  643 int
  644 sb_max_set(u_long new_sbmax)
  645 {
  646         int s;
  647 
  648         if (new_sbmax < (16 * 1024))
  649                 return (EINVAL);
  650 
  651         s = splsoftnet();
  652         sb_max = new_sbmax;
  653         sb_max_adj = (u_quad_t)new_sbmax * MCLBYTES / (MSIZE + MCLBYTES);
  654         splx(s);
  655 
  656         return (0);
  657 }
  658 
  659 int
  660 soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
  661 {
  662         KASSERT(so->so_pcb == NULL || solocked(so));
  663 
  664         /*
  665          * there's at least one application (a configure script of screen)
  666          * which expects a fifo is writable even if it has "some" bytes
  667          * in its buffer.
  668          * so we want to make sure (hiwat - lowat) >= (some bytes).
  669          *
  670          * PIPE_BUF here is an arbitrary value chosen as (some bytes) above.
  671          * we expect it's large enough for such applications.
  672          */
  673         u_long  lowat = MAX(sock_loan_thresh, MCLBYTES);
  674         u_long  hiwat = lowat + PIPE_BUF;
  675 
  676         if (sndcc < hiwat)
  677                 sndcc = hiwat;
  678         if (sbreserve(&so->so_snd, sndcc, so) == 0)
  679                 goto bad;
  680         if (sbreserve(&so->so_rcv, rcvcc, so) == 0)
  681                 goto bad2;
  682         if (so->so_rcv.sb_lowat == 0)
  683                 so->so_rcv.sb_lowat = 1;
  684         if (so->so_snd.sb_lowat == 0)
  685                 so->so_snd.sb_lowat = lowat;
  686         if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
  687                 so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
  688         return (0);
  689  bad2:
  690         sbrelease(&so->so_snd, so);
  691  bad:
  692         return (ENOBUFS);
  693 }
  694 
  695 /*
  696  * Allot mbufs to a sockbuf.
  697  * Attempt to scale mbmax so that mbcnt doesn't become limiting
  698  * if buffering efficiency is near the normal case.
  699  */
  700 int
  701 sbreserve(struct sockbuf *sb, u_long cc, struct socket *so)
  702 {
  703         struct lwp *l = curlwp; /* XXX */
  704         rlim_t maxcc;
  705         struct uidinfo *uidinfo;
  706 
  707         KASSERT(so->so_pcb == NULL || solocked(so));
  708         KASSERT(sb->sb_so == so);
  709         KASSERT(sb_max_adj != 0);
  710 
  711         if (cc == 0 || cc > sb_max_adj)
  712                 return (0);
  713 
  714         maxcc = l->l_proc->p_rlimit[RLIMIT_SBSIZE].rlim_cur;
  715 
  716         uidinfo = so->so_uidinfo;
  717         if (!chgsbsize(uidinfo, &sb->sb_hiwat, cc, maxcc))
  718                 return 0;
  719         sb->sb_mbmax = uimin(cc * 2, sb_max);
  720         if (sb->sb_lowat > sb->sb_hiwat)
  721                 sb->sb_lowat = sb->sb_hiwat;
  722 
  723         return (1);
  724 }
  725 
  726 /*
  727  * Free mbufs held by a socket, and reserved mbuf space.  We do not assert
  728  * that the socket is held locked here: see sorflush().
  729  */
  730 void
  731 sbrelease(struct sockbuf *sb, struct socket *so)
  732 {
  733 
  734         KASSERT(sb->sb_so == so);
  735 
  736         sbflush(sb);
  737         (void)chgsbsize(so->so_uidinfo, &sb->sb_hiwat, 0, RLIM_INFINITY);
  738         sb->sb_mbmax = 0;
  739 }
  740 
  741 /*
  742  * Routines to add and remove
  743  * data from an mbuf queue.
  744  *
  745  * The routines sbappend() or sbappendrecord() are normally called to
  746  * append new mbufs to a socket buffer, after checking that adequate
  747  * space is available, comparing the function sbspace() with the amount
  748  * of data to be added.  sbappendrecord() differs from sbappend() in
  749  * that data supplied is treated as the beginning of a new record.
  750  * To place a sender's address, optional access rights, and data in a
  751  * socket receive buffer, sbappendaddr() should be used.  To place
  752  * access rights and data in a socket receive buffer, sbappendrights()
  753  * should be used.  In either case, the new data begins a new record.
  754  * Note that unlike sbappend() and sbappendrecord(), these routines check
  755  * for the caller that there will be enough space to store the data.
  756  * Each fails if there is not enough space, or if it cannot find mbufs
  757  * to store additional information in.
  758  *
  759  * Reliable protocols may use the socket send buffer to hold data
  760  * awaiting acknowledgement.  Data is normally copied from a socket
  761  * send buffer in a protocol with m_copym for output to a peer,
  762  * and then removing the data from the socket buffer with sbdrop()
  763  * or sbdroprecord() when the data is acknowledged by the peer.
  764  */
  765 
  766 #ifdef SOCKBUF_DEBUG
  767 void
  768 sblastrecordchk(struct sockbuf *sb, const char *where)
  769 {
  770         struct mbuf *m = sb->sb_mb;
  771 
  772         KASSERT(solocked(sb->sb_so));
  773 
  774         while (m && m->m_nextpkt)
  775                 m = m->m_nextpkt;
  776 
  777         if (m != sb->sb_lastrecord) {
  778                 printf("sblastrecordchk: sb_mb %p sb_lastrecord %p last %p\n",
  779                     sb->sb_mb, sb->sb_lastrecord, m);
  780                 printf("packet chain:\n");
  781                 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
  782                         printf("\t%p\n", m);
  783                 panic("sblastrecordchk from %s", where);
  784         }
  785 }
  786 
  787 void
  788 sblastmbufchk(struct sockbuf *sb, const char *where)
  789 {
  790         struct mbuf *m = sb->sb_mb;
  791         struct mbuf *n;
  792 
  793         KASSERT(solocked(sb->sb_so));
  794 
  795         while (m && m->m_nextpkt)
  796                 m = m->m_nextpkt;
  797 
  798         while (m && m->m_next)
  799                 m = m->m_next;
  800 
  801         if (m != sb->sb_mbtail) {
  802                 printf("sblastmbufchk: sb_mb %p sb_mbtail %p last %p\n",
  803                     sb->sb_mb, sb->sb_mbtail, m);
  804                 printf("packet tree:\n");
  805                 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
  806                         printf("\t");
  807                         for (n = m; n != NULL; n = n->m_next)
  808                                 printf("%p ", n);
  809                         printf("\n");
  810                 }
  811                 panic("sblastmbufchk from %s", where);
  812         }
  813 }
  814 #endif /* SOCKBUF_DEBUG */
  815 
  816 /*
  817  * Link a chain of records onto a socket buffer
  818  */
  819 #define SBLINKRECORDCHAIN(sb, m0, mlast)                                \
  820 do {                                                                    \
  821         if ((sb)->sb_lastrecord != NULL)                                \
  822                 (sb)->sb_lastrecord->m_nextpkt = (m0);                  \
  823         else                                                            \
  824                 (sb)->sb_mb = (m0);                                     \
  825         (sb)->sb_lastrecord = (mlast);                                  \
  826 } while (/*CONSTCOND*/0)
  827 
  828 
  829 #define SBLINKRECORD(sb, m0)                                            \
  830     SBLINKRECORDCHAIN(sb, m0, m0)
  831 
  832 /*
  833  * Append mbuf chain m to the last record in the
  834  * socket buffer sb.  The additional space associated
  835  * the mbuf chain is recorded in sb.  Empty mbufs are
  836  * discarded and mbufs are compacted where possible.
  837  */
  838 void
  839 sbappend(struct sockbuf *sb, struct mbuf *m)
  840 {
  841         struct mbuf     *n;
  842 
  843         KASSERT(solocked(sb->sb_so));
  844 
  845         if (m == NULL)
  846                 return;
  847 
  848 #ifdef MBUFTRACE
  849         m_claimm(m, sb->sb_mowner);
  850 #endif
  851 
  852         SBLASTRECORDCHK(sb, "sbappend 1");
  853 
  854         if ((n = sb->sb_lastrecord) != NULL) {
  855                 /*
  856                  * XXX Would like to simply use sb_mbtail here, but
  857                  * XXX I need to verify that I won't miss an EOR that
  858                  * XXX way.
  859                  */
  860                 do {
  861                         if (n->m_flags & M_EOR) {
  862                                 sbappendrecord(sb, m); /* XXXXXX!!!! */
  863                                 return;
  864                         }
  865                 } while (n->m_next && (n = n->m_next));
  866         } else {
  867                 /*
  868                  * If this is the first record in the socket buffer, it's
  869                  * also the last record.
  870                  */
  871                 sb->sb_lastrecord = m;
  872         }
  873         sbcompress(sb, m, n);
  874         SBLASTRECORDCHK(sb, "sbappend 2");
  875 }
  876 
  877 /*
  878  * This version of sbappend() should only be used when the caller
  879  * absolutely knows that there will never be more than one record
  880  * in the socket buffer, that is, a stream protocol (such as TCP).
  881  */
  882 void
  883 sbappendstream(struct sockbuf *sb, struct mbuf *m)
  884 {
  885 
  886         KASSERT(solocked(sb->sb_so));
  887         KDASSERT(m->m_nextpkt == NULL);
  888         KASSERT(sb->sb_mb == sb->sb_lastrecord);
  889 
  890         SBLASTMBUFCHK(sb, __func__);
  891 
  892 #ifdef MBUFTRACE
  893         m_claimm(m, sb->sb_mowner);
  894 #endif
  895 
  896         sbcompress(sb, m, sb->sb_mbtail);
  897 
  898         sb->sb_lastrecord = sb->sb_mb;
  899         SBLASTRECORDCHK(sb, __func__);
  900 }
  901 
  902 #ifdef SOCKBUF_DEBUG
  903 void
  904 sbcheck(struct sockbuf *sb)
  905 {
  906         struct mbuf     *m, *m2;
  907         u_long          len, mbcnt;
  908 
  909         KASSERT(solocked(sb->sb_so));
  910 
  911         len = 0;
  912         mbcnt = 0;
  913         for (m = sb->sb_mb; m; m = m->m_nextpkt) {
  914                 for (m2 = m; m2 != NULL; m2 = m2->m_next) {
  915                         len += m2->m_len;
  916                         mbcnt += MSIZE;
  917                         if (m2->m_flags & M_EXT)
  918                                 mbcnt += m2->m_ext.ext_size;
  919                         if (m2->m_nextpkt != NULL)
  920                                 panic("sbcheck nextpkt");
  921                 }
  922         }
  923         if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
  924                 printf("cc %lu != %lu || mbcnt %lu != %lu\n", len, sb->sb_cc,
  925                     mbcnt, sb->sb_mbcnt);
  926                 panic("sbcheck");
  927         }
  928 }
  929 #endif
  930 
  931 /*
  932  * As above, except the mbuf chain
  933  * begins a new record.
  934  */
  935 void
  936 sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
  937 {
  938         struct mbuf     *m;
  939 
  940         KASSERT(solocked(sb->sb_so));
  941 
  942         if (m0 == NULL)
  943                 return;
  944 
  945 #ifdef MBUFTRACE
  946         m_claimm(m0, sb->sb_mowner);
  947 #endif
  948         /*
  949          * Put the first mbuf on the queue.
  950          * Note this permits zero length records.
  951          */
  952         sballoc(sb, m0);
  953         SBLASTRECORDCHK(sb, "sbappendrecord 1");
  954         SBLINKRECORD(sb, m0);
  955         m = m0->m_next;
  956         m0->m_next = 0;
  957         if (m && (m0->m_flags & M_EOR)) {
  958                 m0->m_flags &= ~M_EOR;
  959                 m->m_flags |= M_EOR;
  960         }
  961         sbcompress(sb, m, m0);
  962         SBLASTRECORDCHK(sb, "sbappendrecord 2");
  963 }
  964 
  965 /*
  966  * As above except that OOB data
  967  * is inserted at the beginning of the sockbuf,
  968  * but after any other OOB data.
  969  */
  970 void
  971 sbinsertoob(struct sockbuf *sb, struct mbuf *m0)
  972 {
  973         struct mbuf     *m, **mp;
  974 
  975         KASSERT(solocked(sb->sb_so));
  976 
  977         if (m0 == NULL)
  978                 return;
  979 
  980         SBLASTRECORDCHK(sb, "sbinsertoob 1");
  981 
  982         for (mp = &sb->sb_mb; (m = *mp) != NULL; mp = &((*mp)->m_nextpkt)) {
  983             again:
  984                 switch (m->m_type) {
  985 
  986                 case MT_OOBDATA:
  987                         continue;               /* WANT next train */
  988 
  989                 case MT_CONTROL:
  990                         if ((m = m->m_next) != NULL)
  991                                 goto again;     /* inspect THIS train further */
  992                 }
  993                 break;
  994         }
  995         /*
  996          * Put the first mbuf on the queue.
  997          * Note this permits zero length records.
  998          */
  999         sballoc(sb, m0);
 1000         m0->m_nextpkt = *mp;
 1001         if (*mp == NULL) {
 1002                 /* m0 is actually the new tail */
 1003                 sb->sb_lastrecord = m0;
 1004         }
 1005         *mp = m0;
 1006         m = m0->m_next;
 1007         m0->m_next = 0;
 1008         if (m && (m0->m_flags & M_EOR)) {
 1009                 m0->m_flags &= ~M_EOR;
 1010                 m->m_flags |= M_EOR;
 1011         }
 1012         sbcompress(sb, m, m0);
 1013         SBLASTRECORDCHK(sb, "sbinsertoob 2");
 1014 }
 1015 
 1016 /*
 1017  * Append address and data, and optionally, control (ancillary) data
 1018  * to the receive queue of a socket.  If present,
 1019  * m0 must include a packet header with total length.
 1020  * Returns 0 if no space in sockbuf or insufficient mbufs.
 1021  */
 1022 int
 1023 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, struct mbuf *m0,
 1024         struct mbuf *control)
 1025 {
 1026         struct mbuf     *m, *n, *nlast;
 1027         int             space, len;
 1028 
 1029         KASSERT(solocked(sb->sb_so));
 1030 
 1031         space = asa->sa_len;
 1032 
 1033         if (m0 != NULL) {
 1034                 if ((m0->m_flags & M_PKTHDR) == 0)
 1035                         panic("sbappendaddr");
 1036                 space += m0->m_pkthdr.len;
 1037 #ifdef MBUFTRACE
 1038                 m_claimm(m0, sb->sb_mowner);
 1039 #endif
 1040         }
 1041         for (n = control; n; n = n->m_next) {
 1042                 space += n->m_len;
 1043                 MCLAIM(n, sb->sb_mowner);
 1044                 if (n->m_next == NULL)  /* keep pointer to last control buf */
 1045                         break;
 1046         }
 1047         if (space > sbspace(sb))
 1048                 return (0);
 1049         m = m_get(M_DONTWAIT, MT_SONAME);
 1050         if (m == NULL)
 1051                 return (0);
 1052         MCLAIM(m, sb->sb_mowner);
 1053         /*
 1054          * XXX avoid 'comparison always true' warning which isn't easily
 1055          * avoided.
 1056          */
 1057         len = asa->sa_len;
 1058         if (len > MLEN) {
 1059                 MEXTMALLOC(m, asa->sa_len, M_NOWAIT);
 1060                 if ((m->m_flags & M_EXT) == 0) {
 1061                         m_free(m);
 1062                         return (0);
 1063                 }
 1064         }
 1065         m->m_len = asa->sa_len;
 1066         memcpy(mtod(m, void *), asa, asa->sa_len);
 1067         if (n)
 1068                 n->m_next = m0;         /* concatenate data to control */
 1069         else
 1070                 control = m0;
 1071         m->m_next = control;
 1072 
 1073         SBLASTRECORDCHK(sb, "sbappendaddr 1");
 1074 
 1075         for (n = m; n->m_next != NULL; n = n->m_next)
 1076                 sballoc(sb, n);
 1077         sballoc(sb, n);
 1078         nlast = n;
 1079         SBLINKRECORD(sb, m);
 1080 
 1081         sb->sb_mbtail = nlast;
 1082         SBLASTMBUFCHK(sb, "sbappendaddr");
 1083         SBLASTRECORDCHK(sb, "sbappendaddr 2");
 1084 
 1085         return (1);
 1086 }
 1087 
 1088 /*
 1089  * Helper for sbappendchainaddr: prepend a struct sockaddr* to
 1090  * an mbuf chain.
 1091  */
 1092 static inline struct mbuf *
 1093 m_prepend_sockaddr(struct sockbuf *sb, struct mbuf *m0,
 1094                    const struct sockaddr *asa)
 1095 {
 1096         struct mbuf *m;
 1097         const int salen = asa->sa_len;
 1098 
 1099         KASSERT(solocked(sb->sb_so));
 1100 
 1101         /* only the first in each chain need be a pkthdr */
 1102         m = m_gethdr(M_DONTWAIT, MT_SONAME);
 1103         if (m == NULL)
 1104                 return NULL;
 1105         MCLAIM(m, sb->sb_mowner);
 1106 #ifdef notyet
 1107         if (salen > MHLEN) {
 1108                 MEXTMALLOC(m, salen, M_NOWAIT);
 1109                 if ((m->m_flags & M_EXT) == 0) {
 1110                         m_free(m);
 1111                         return NULL;
 1112                 }
 1113         }
 1114 #else
 1115         KASSERT(salen <= MHLEN);
 1116 #endif
 1117         m->m_len = salen;
 1118         memcpy(mtod(m, void *), asa, salen);
 1119         m->m_next = m0;
 1120         m->m_pkthdr.len = salen + m0->m_pkthdr.len;
 1121 
 1122         return m;
 1123 }
 1124 
 1125 int
 1126 sbappendaddrchain(struct sockbuf *sb, const struct sockaddr *asa,
 1127                   struct mbuf *m0, int sbprio)
 1128 {
 1129         struct mbuf *m, *n, *n0, *nlast;
 1130         int error;
 1131 
 1132         KASSERT(solocked(sb->sb_so));
 1133 
 1134         /*
 1135          * XXX sbprio reserved for encoding priority of this* request:
 1136          *  SB_PRIO_NONE --> honour normal sb limits
 1137          *  SB_PRIO_ONESHOT_OVERFLOW --> if socket has any space,
 1138          *      take whole chain. Intended for large requests
 1139          *      that should be delivered atomically (all, or none).
 1140          * SB_PRIO_OVERDRAFT -- allow a small (2*MLEN) overflow
 1141          *       over normal socket limits, for messages indicating
 1142          *       buffer overflow in earlier normal/lower-priority messages
 1143          * SB_PRIO_BESTEFFORT -->  ignore limits entirely.
 1144          *       Intended for  kernel-generated messages only.
 1145          *        Up to generator to avoid total mbuf resource exhaustion.
 1146          */
 1147         (void)sbprio;
 1148 
 1149         if (m0 && (m0->m_flags & M_PKTHDR) == 0)
 1150                 panic("sbappendaddrchain");
 1151 
 1152 #ifdef notyet
 1153         space = sbspace(sb);
 1154 
 1155         /*
 1156          * Enforce SB_PRIO_* limits as described above.
 1157          */
 1158 #endif
 1159 
 1160         n0 = NULL;
 1161         nlast = NULL;
 1162         for (m = m0; m; m = m->m_nextpkt) {
 1163                 struct mbuf *np;
 1164 
 1165 #ifdef MBUFTRACE
 1166                 m_claimm(m, sb->sb_mowner);
 1167 #endif
 1168 
 1169                 /* Prepend sockaddr to this record (m) of input chain m0 */
 1170                 n = m_prepend_sockaddr(sb, m, asa);
 1171                 if (n == NULL) {
 1172                         error = ENOBUFS;
 1173                         goto bad;
 1174                 }
 1175 
 1176                 /* Append record (asa+m) to end of new chain n0 */
 1177                 if (n0 == NULL) {
 1178                         n0 = n;
 1179                 } else {
 1180                         nlast->m_nextpkt = n;
 1181                 }
 1182                 /* Keep track of last record on new chain */
 1183                 nlast = n;
 1184 
 1185                 for (np = n; np; np = np->m_next)
 1186                         sballoc(sb, np);
 1187         }
 1188 
 1189         SBLASTRECORDCHK(sb, "sbappendaddrchain 1");
 1190 
 1191         /* Drop the entire chain of (asa+m) records onto the socket */
 1192         SBLINKRECORDCHAIN(sb, n0, nlast);
 1193 
 1194         SBLASTRECORDCHK(sb, "sbappendaddrchain 2");
 1195 
 1196         for (m = nlast; m->m_next; m = m->m_next)
 1197                 ;
 1198         sb->sb_mbtail = m;
 1199         SBLASTMBUFCHK(sb, "sbappendaddrchain");
 1200 
 1201         return (1);
 1202 
 1203 bad:
 1204         /*
 1205          * On error, free the prepended addreseses. For consistency
 1206          * with sbappendaddr(), leave it to our caller to free
 1207          * the input record chain passed to us as m0.
 1208          */
 1209         while ((n = n0) != NULL) {
 1210                 struct mbuf *np;
 1211 
 1212                 /* Undo the sballoc() of this record */
 1213                 for (np = n; np; np = np->m_next)
 1214                         sbfree(sb, np);
 1215 
 1216                 n0 = n->m_nextpkt;      /* iterate at next prepended address */
 1217                 np = m_free(n);         /* free prepended address (not data) */
 1218         }
 1219         return error;
 1220 }
 1221 
 1222 
 1223 int
 1224 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
 1225 {
 1226         struct mbuf     *m, *mlast, *n;
 1227         int             space;
 1228 
 1229         KASSERT(solocked(sb->sb_so));
 1230 
 1231         space = 0;
 1232         if (control == NULL)
 1233                 panic("sbappendcontrol");
 1234         for (m = control; ; m = m->m_next) {
 1235                 space += m->m_len;
 1236                 MCLAIM(m, sb->sb_mowner);
 1237                 if (m->m_next == NULL)
 1238                         break;
 1239         }
 1240         n = m;                  /* save pointer to last control buffer */
 1241         for (m = m0; m; m = m->m_next) {
 1242                 MCLAIM(m, sb->sb_mowner);
 1243                 space += m->m_len;
 1244         }
 1245         if (space > sbspace(sb))
 1246                 return (0);
 1247         n->m_next = m0;                 /* concatenate data to control */
 1248 
 1249         SBLASTRECORDCHK(sb, "sbappendcontrol 1");
 1250 
 1251         for (m = control; m->m_next != NULL; m = m->m_next)
 1252                 sballoc(sb, m);
 1253         sballoc(sb, m);
 1254         mlast = m;
 1255         SBLINKRECORD(sb, control);
 1256 
 1257         sb->sb_mbtail = mlast;
 1258         SBLASTMBUFCHK(sb, "sbappendcontrol");
 1259         SBLASTRECORDCHK(sb, "sbappendcontrol 2");
 1260 
 1261         return (1);
 1262 }
 1263 
 1264 /*
 1265  * Compress mbuf chain m into the socket
 1266  * buffer sb following mbuf n.  If n
 1267  * is null, the buffer is presumed empty.
 1268  */
 1269 void
 1270 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
 1271 {
 1272         int             eor;
 1273         struct mbuf     *o;
 1274 
 1275         KASSERT(solocked(sb->sb_so));
 1276 
 1277         eor = 0;
 1278         while (m) {
 1279                 eor |= m->m_flags & M_EOR;
 1280                 if (m->m_len == 0 &&
 1281                     (eor == 0 ||
 1282                      (((o = m->m_next) || (o = n)) &&
 1283                       o->m_type == m->m_type))) {
 1284                         if (sb->sb_lastrecord == m)
 1285                                 sb->sb_lastrecord = m->m_next;
 1286                         m = m_free(m);
 1287                         continue;
 1288                 }
 1289                 if (n && (n->m_flags & M_EOR) == 0 &&
 1290                     /* M_TRAILINGSPACE() checks buffer writeability */
 1291                     m->m_len <= MCLBYTES / 4 && /* XXX Don't copy too much */
 1292                     m->m_len <= M_TRAILINGSPACE(n) &&
 1293                     n->m_type == m->m_type) {
 1294                         memcpy(mtod(n, char *) + n->m_len, mtod(m, void *),
 1295                             (unsigned)m->m_len);
 1296                         n->m_len += m->m_len;
 1297                         sb->sb_cc += m->m_len;
 1298                         m = m_free(m);
 1299                         continue;
 1300                 }
 1301                 if (n)
 1302                         n->m_next = m;
 1303                 else
 1304                         sb->sb_mb = m;
 1305                 sb->sb_mbtail = m;
 1306                 sballoc(sb, m);
 1307                 n = m;
 1308                 m->m_flags &= ~M_EOR;
 1309                 m = m->m_next;
 1310                 n->m_next = 0;
 1311         }
 1312         if (eor) {
 1313                 if (n)
 1314                         n->m_flags |= eor;
 1315                 else
 1316                         printf("semi-panic: sbcompress\n");
 1317         }
 1318         SBLASTMBUFCHK(sb, __func__);
 1319 }
 1320 
 1321 /*
 1322  * Free all mbufs in a sockbuf.
 1323  * Check that all resources are reclaimed.
 1324  */
 1325 void
 1326 sbflush(struct sockbuf *sb)
 1327 {
 1328 
 1329         KASSERT(solocked(sb->sb_so));
 1330         KASSERT((sb->sb_flags & SB_LOCK) == 0);
 1331 
 1332         while (sb->sb_mbcnt)
 1333                 sbdrop(sb, (int)sb->sb_cc);
 1334 
 1335         KASSERT(sb->sb_cc == 0);
 1336         KASSERT(sb->sb_mb == NULL);
 1337         KASSERT(sb->sb_mbtail == NULL);
 1338         KASSERT(sb->sb_lastrecord == NULL);
 1339 }
 1340 
 1341 /*
 1342  * Drop data from (the front of) a sockbuf.
 1343  */
 1344 void
 1345 sbdrop(struct sockbuf *sb, int len)
 1346 {
 1347         struct mbuf     *m, *next;
 1348 
 1349         KASSERT(solocked(sb->sb_so));
 1350 
 1351         next = (m = sb->sb_mb) ? m->m_nextpkt : NULL;
 1352         while (len > 0) {
 1353                 if (m == NULL) {
 1354                         if (next == NULL)
 1355                                 panic("sbdrop(%p,%d): cc=%lu",
 1356                                     sb, len, sb->sb_cc);
 1357                         m = next;
 1358                         next = m->m_nextpkt;
 1359                         continue;
 1360                 }
 1361                 if (m->m_len > len) {
 1362                         m->m_len -= len;
 1363                         m->m_data += len;
 1364                         sb->sb_cc -= len;
 1365                         break;
 1366                 }
 1367                 len -= m->m_len;
 1368                 sbfree(sb, m);
 1369                 m = m_free(m);
 1370         }
 1371         while (m && m->m_len == 0) {
 1372                 sbfree(sb, m);
 1373                 m = m_free(m);
 1374         }
 1375         if (m) {
 1376                 sb->sb_mb = m;
 1377                 m->m_nextpkt = next;
 1378         } else
 1379                 sb->sb_mb = next;
 1380         /*
 1381          * First part is an inline SB_EMPTY_FIXUP().  Second part
 1382          * makes sure sb_lastrecord is up-to-date if we dropped
 1383          * part of the last record.
 1384          */
 1385         m = sb->sb_mb;
 1386         if (m == NULL) {
 1387                 sb->sb_mbtail = NULL;
 1388                 sb->sb_lastrecord = NULL;
 1389         } else if (m->m_nextpkt == NULL)
 1390                 sb->sb_lastrecord = m;
 1391 }
 1392 
 1393 /*
 1394  * Drop a record off the front of a sockbuf
 1395  * and move the next record to the front.
 1396  */
 1397 void
 1398 sbdroprecord(struct sockbuf *sb)
 1399 {
 1400         struct mbuf     *m, *mn;
 1401 
 1402         KASSERT(solocked(sb->sb_so));
 1403 
 1404         m = sb->sb_mb;
 1405         if (m) {
 1406                 sb->sb_mb = m->m_nextpkt;
 1407                 do {
 1408                         sbfree(sb, m);
 1409                         mn = m_free(m);
 1410                 } while ((m = mn) != NULL);
 1411         }
 1412         SB_EMPTY_FIXUP(sb);
 1413 }
 1414 
 1415 /*
 1416  * Create a "control" mbuf containing the specified data
 1417  * with the specified type for presentation on a socket buffer.
 1418  */
 1419 struct mbuf *
 1420 sbcreatecontrol1(void **p, int size, int type, int level, int flags)
 1421 {
 1422         struct cmsghdr  *cp;
 1423         struct mbuf     *m;
 1424         int space = CMSG_SPACE(size);
 1425 
 1426         if ((flags & M_DONTWAIT) && space > MCLBYTES) {
 1427                 printf("%s: message too large %d\n", __func__, space);
 1428                 return NULL;
 1429         }
 1430 
 1431         if ((m = m_get(flags, MT_CONTROL)) == NULL)
 1432                 return NULL;
 1433         if (space > MLEN) {
 1434                 if (space > MCLBYTES)
 1435                         MEXTMALLOC(m, space, M_WAITOK);
 1436                 else
 1437                         MCLGET(m, flags);
 1438                 if ((m->m_flags & M_EXT) == 0) {
 1439                         m_free(m);
 1440                         return NULL;
 1441                 }
 1442         }
 1443         cp = mtod(m, struct cmsghdr *);
 1444         *p = CMSG_DATA(cp);
 1445         m->m_len = space;
 1446         cp->cmsg_len = CMSG_LEN(size);
 1447         cp->cmsg_level = level;
 1448         cp->cmsg_type = type;
 1449 
 1450         memset(cp + 1, 0, CMSG_LEN(0) - sizeof(*cp));
 1451         memset((uint8_t *)*p + size, 0, CMSG_ALIGN(size) - size);
 1452 
 1453         return m;
 1454 }
 1455 
 1456 struct mbuf *
 1457 sbcreatecontrol(void *p, int size, int type, int level)
 1458 {
 1459         struct mbuf *m;
 1460         void *v;
 1461 
 1462         m = sbcreatecontrol1(&v, size, type, level, M_DONTWAIT);
 1463         if (m == NULL)
 1464                 return NULL;
 1465         memcpy(v, p, size);
 1466         return m;
 1467 }
 1468 
 1469 void
 1470 solockretry(struct socket *so, kmutex_t *lock)
 1471 {
 1472 
 1473         while (lock != atomic_load_relaxed(&so->so_lock)) {
 1474                 mutex_exit(lock);
 1475                 lock = atomic_load_consume(&so->so_lock);
 1476                 mutex_enter(lock);
 1477         }
 1478 }
 1479 
 1480 bool
 1481 solocked(const struct socket *so)
 1482 {
 1483 
 1484         /*
 1485          * Used only for diagnostic assertions, so so_lock should be
 1486          * stable at this point, hence on need for atomic_load_*.
 1487          */
 1488         return mutex_owned(so->so_lock);
 1489 }
 1490 
 1491 bool
 1492 solocked2(const struct socket *so1, const struct socket *so2)
 1493 {
 1494         const kmutex_t *lock;
 1495 
 1496         /*
 1497          * Used only for diagnostic assertions, so so_lock should be
 1498          * stable at this point, hence on need for atomic_load_*.
 1499          */
 1500         lock = so1->so_lock;
 1501         if (lock != so2->so_lock)
 1502                 return false;
 1503         return mutex_owned(lock);
 1504 }
 1505 
 1506 /*
 1507  * sosetlock: assign a default lock to a new socket.
 1508  */
 1509 void
 1510 sosetlock(struct socket *so)
 1511 {
 1512         if (so->so_lock == NULL) {
 1513                 kmutex_t *lock = softnet_lock;
 1514 
 1515                 so->so_lock = lock;
 1516                 mutex_obj_hold(lock);
 1517                 mutex_enter(lock);
 1518         }
 1519         KASSERT(solocked(so));
 1520 }
 1521 
 1522 /*
 1523  * Set lock on sockbuf sb; sleep if lock is already held.
 1524  * Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
 1525  * Returns error without lock if sleep is interrupted.
 1526  */
 1527 int
 1528 sblock(struct sockbuf *sb, int wf)
 1529 {
 1530         struct socket *so;
 1531         kmutex_t *lock;
 1532         int error;
 1533 
 1534         KASSERT(solocked(sb->sb_so));
 1535 
 1536         for (;;) {
 1537                 if (__predict_true((sb->sb_flags & SB_LOCK) == 0)) {
 1538                         sb->sb_flags |= SB_LOCK;
 1539                         return 0;
 1540                 }
 1541                 if (wf != M_WAITOK)
 1542                         return EWOULDBLOCK;
 1543                 so = sb->sb_so;
 1544                 lock = so->so_lock;
 1545                 if ((sb->sb_flags & SB_NOINTR) != 0) {
 1546                         cv_wait(&so->so_cv, lock);
 1547                         error = 0;
 1548                 } else
 1549                         error = cv_wait_sig(&so->so_cv, lock);
 1550                 if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
 1551                         solockretry(so, lock);
 1552                 if (error != 0)
 1553                         return error;
 1554         }
 1555 }
 1556 
 1557 void
 1558 sbunlock(struct sockbuf *sb)
 1559 {
 1560         struct socket *so;
 1561 
 1562         so = sb->sb_so;
 1563 
 1564         KASSERT(solocked(so));
 1565         KASSERT((sb->sb_flags & SB_LOCK) != 0);
 1566 
 1567         sb->sb_flags &= ~SB_LOCK;
 1568         cv_broadcast(&so->so_cv);
 1569 }
 1570 
 1571 int
 1572 sowait(struct socket *so, bool catch_p, int timo)
 1573 {
 1574         kmutex_t *lock;
 1575         int error;
 1576 
 1577         KASSERT(solocked(so));
 1578         KASSERT(catch_p || timo != 0);
 1579 
 1580         lock = so->so_lock;
 1581         if (catch_p)
 1582                 error = cv_timedwait_sig(&so->so_cv, lock, timo);
 1583         else
 1584                 error = cv_timedwait(&so->so_cv, lock, timo);
 1585         if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
 1586                 solockretry(so, lock);
 1587         return error;
 1588 }
 1589 
 1590 #ifdef DDB
 1591 
 1592 /*
 1593  * Currently, sofindproc() is used only from DDB. It could be used from others
 1594  * by using db_mutex_enter()
 1595  */
 1596 
 1597 static inline int
 1598 db_mutex_enter(kmutex_t *mtx)
 1599 {
 1600         int rv;
 1601 
 1602         if (!db_active) {
 1603                 mutex_enter(mtx);
 1604                 rv = 1;
 1605         } else
 1606                 rv = mutex_tryenter(mtx);
 1607 
 1608         return rv;
 1609 }
 1610 
 1611 int
 1612 sofindproc(struct socket *so, int all, void (*pr)(const char *, ...))
 1613 {
 1614         proc_t *p;
 1615         filedesc_t *fdp;
 1616         fdtab_t *dt;
 1617         fdfile_t *ff;
 1618         file_t *fp = NULL;
 1619         int found = 0;
 1620         int i, t;
 1621 
 1622         if (so == NULL)
 1623                 return 0;
 1624 
 1625         t = db_mutex_enter(&proc_lock);
 1626         if (!t) {
 1627                 pr("could not acquire proc_lock mutex\n");
 1628                 return 0;
 1629         }
 1630         PROCLIST_FOREACH(p, &allproc) {
 1631                 if (p->p_stat == SIDL)
 1632                         continue;
 1633                 fdp = p->p_fd;
 1634                 t = db_mutex_enter(&fdp->fd_lock);
 1635                 if (!t) {
 1636                         pr("could not acquire fd_lock mutex\n");
 1637                         continue;
 1638                 }
 1639                 dt = atomic_load_consume(&fdp->fd_dt);
 1640                 for (i = 0; i < dt->dt_nfiles; i++) {
 1641                         ff = dt->dt_ff[i];
 1642                         if (ff == NULL)
 1643                                 continue;
 1644 
 1645                         fp = atomic_load_consume(&ff->ff_file);
 1646                         if (fp == NULL)
 1647                                 continue;
 1648 
 1649                         t = db_mutex_enter(&fp->f_lock);
 1650                         if (!t) {
 1651                                 pr("could not acquire f_lock mutex\n");
 1652                                 continue;
 1653                         }
 1654                         if ((struct socket *)fp->f_data != so) {
 1655                                 mutex_exit(&fp->f_lock);
 1656                                 continue;
 1657                         }
 1658                         found++;
 1659                         if (pr)
 1660                                 pr("socket %p: owner %s(pid=%d)\n",
 1661                                     so, p->p_comm, p->p_pid);
 1662                         mutex_exit(&fp->f_lock);
 1663                         if (all == 0)
 1664                                 break;
 1665                 }
 1666                 mutex_exit(&fdp->fd_lock);
 1667                 if (all == 0 && found != 0)
 1668                         break;
 1669         }
 1670         mutex_exit(&proc_lock);
 1671 
 1672         return found;
 1673 }
 1674 
 1675 void
 1676 socket_print(const char *modif, void (*pr)(const char *, ...))
 1677 {
 1678         file_t *fp;
 1679         struct socket *so;
 1680         struct sockbuf *sb_snd, *sb_rcv;
 1681         struct mbuf *m_rec, *m;
 1682         bool opt_v = false;
 1683         bool opt_m = false;
 1684         bool opt_a = false;
 1685         bool opt_p = false;
 1686         int nrecs, nmbufs;
 1687         char ch;
 1688         const char *family;
 1689 
 1690         while ( (ch = *(modif++)) != '\0') {
 1691                 switch (ch) {
 1692                 case 'v':
 1693                         opt_v = true;
 1694                         break;
 1695                 case 'm':
 1696                         opt_m = true;
 1697                         break;
 1698                 case 'a':
 1699                         opt_a = true;
 1700                         break;
 1701                 case 'p':
 1702                         opt_p = true;
 1703                         break;
 1704                 }
 1705         }
 1706         if (opt_v == false && pr)
 1707                 (pr)("Ignore empty sockets. use /v to print all.\n");
 1708         if (opt_p == true && pr)
 1709                 (pr)("Don't search owner process.\n");
 1710 
 1711         LIST_FOREACH(fp, &filehead, f_list) {
 1712                 if (fp->f_type != DTYPE_SOCKET)
 1713                         continue;
 1714                 so = (struct socket *)fp->f_data;
 1715                 if (so == NULL)
 1716                         continue;
 1717 
 1718                 if (so->so_proto->pr_domain->dom_family == AF_INET)
 1719                         family = "INET";
 1720 #ifdef INET6
 1721                 else if (so->so_proto->pr_domain->dom_family == AF_INET6)
 1722                         family = "INET6";
 1723 #endif
 1724                 else if (so->so_proto->pr_domain->dom_family == pseudo_AF_KEY)
 1725                         family = "KEY";
 1726                 else if (so->so_proto->pr_domain->dom_family == AF_ROUTE)
 1727                         family = "ROUTE";
 1728                 else
 1729                         continue;
 1730 
 1731                 sb_snd = &so->so_snd;
 1732                 sb_rcv = &so->so_rcv;
 1733 
 1734                 if (opt_v != true &&
 1735                     sb_snd->sb_cc == 0 && sb_rcv->sb_cc == 0)
 1736                         continue;
 1737 
 1738                 pr("---SOCKET %p: type %s\n", so, family);
 1739                 if (opt_p != true)
 1740                         sofindproc(so, opt_a == true ? 1 : 0, pr);
 1741                 pr("Send Buffer Bytes: %d [bytes]\n", sb_snd->sb_cc);
 1742                 pr("Send Buffer mbufs:\n");
 1743                 m_rec = m = sb_snd->sb_mb;
 1744                 nrecs = 0;
 1745                 nmbufs = 0;
 1746                 while (m_rec) {
 1747                         nrecs++;
 1748                         if (opt_m == true)
 1749                                 pr(" mbuf chain %p\n", m_rec);
 1750                         while (m) {
 1751                                 nmbufs++;
 1752                                 m = m->m_next;
 1753                         }
 1754                         m_rec = m = m_rec->m_nextpkt;
 1755                 }
 1756                 pr(" Total %d records, %d mbufs.\n", nrecs, nmbufs);
 1757 
 1758                 pr("Recv Buffer Usage: %d [bytes]\n", sb_rcv->sb_cc);
 1759                 pr("Recv Buffer mbufs:\n");
 1760                 m_rec = m = sb_rcv->sb_mb;
 1761                 nrecs = 0;
 1762                 nmbufs = 0;
 1763                 while (m_rec) {
 1764                         nrecs++;
 1765                         if (opt_m == true)
 1766                                 pr(" mbuf chain %p\n", m_rec);
 1767                         while (m) {
 1768                                 nmbufs++;
 1769                                 m = m->m_next;
 1770                         }
 1771                         m_rec = m = m_rec->m_nextpkt;
 1772                 }
 1773                 pr(" Total %d records, %d mbufs.\n", nrecs, nmbufs);
 1774         }
 1775 }
 1776 #endif /* DDB */

Cache object: fc2f5ceb89c111245a1f19a67acfecce


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.