The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_usrreq.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
    3  *      The Regents of the University of California.
    4  * Copyright (c) 2004-2008 Robert N. M. Watson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 4. Neither the name of the University nor the names of its contributors
   16  *    may be used to endorse or promote products derived from this software
   17  *    without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  *      From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94
   32  */
   33 
   34 /*
   35  * UNIX Domain (Local) Sockets
   36  *
   37  * This is an implementation of UNIX (local) domain sockets.  Each socket has
   38  * an associated struct unpcb (UNIX protocol control block).  Stream sockets
   39  * may be connected to 0 or 1 other socket.  Datagram sockets may be
   40  * connected to 0, 1, or many other sockets.  Sockets may be created and
   41  * connected in pairs (socketpair(2)), or bound/connected to using the file
   42  * system name space.  For most purposes, only the receive socket buffer is
   43  * used, as sending on one socket delivers directly to the receive socket
   44  * buffer of a second socket.
   45  *
   46  * The implementation is substantially complicated by the fact that
   47  * "ancillary data", such as file descriptors or credentials, may be passed
   48  * across UNIX domain sockets.  The potential for passing UNIX domain sockets
   49  * over other UNIX domain sockets requires the implementation of a simple
   50  * garbage collector to find and tear down cycles of disconnected sockets.
   51  *
   52  * TODO:
   53  *      SEQPACKET, RDM
   54  *      rethink name space problems
   55  *      need a proper out-of-band
   56  */
   57 
   58 #include <sys/cdefs.h>
   59 __FBSDID("$FreeBSD: releng/7.4/sys/kern/uipc_usrreq.c 225827 2011-09-28 08:47:17Z bz $");
   60 
   61 #include "opt_ddb.h"
   62 #include "opt_mac.h"
   63 
   64 #include <sys/param.h>
   65 #include <sys/domain.h>
   66 #include <sys/fcntl.h>
   67 #include <sys/malloc.h>         /* XXX must be before <sys/file.h> */
   68 #include <sys/eventhandler.h>
   69 #include <sys/file.h>
   70 #include <sys/filedesc.h>
   71 #include <sys/jail.h>
   72 #include <sys/kernel.h>
   73 #include <sys/lock.h>
   74 #include <sys/mbuf.h>
   75 #include <sys/mount.h>
   76 #include <sys/mutex.h>
   77 #include <sys/namei.h>
   78 #include <sys/proc.h>
   79 #include <sys/protosw.h>
   80 #include <sys/resourcevar.h>
   81 #include <sys/rwlock.h>
   82 #include <sys/socket.h>
   83 #include <sys/socketvar.h>
   84 #include <sys/signalvar.h>
   85 #include <sys/stat.h>
   86 #include <sys/sx.h>
   87 #include <sys/sysctl.h>
   88 #include <sys/systm.h>
   89 #include <sys/taskqueue.h>
   90 #include <sys/un.h>
   91 #include <sys/unpcb.h>
   92 #include <sys/vnode.h>
   93 
   94 #ifdef DDB
   95 #include <ddb/ddb.h>
   96 #endif
   97 
   98 #include <security/mac/mac_framework.h>
   99 
  100 #include <vm/uma.h>
  101 
  102 static uma_zone_t       unp_zone;
  103 static unp_gen_t        unp_gencnt;
  104 static u_int            unp_count;      /* Count of local sockets. */
  105 static ino_t            unp_ino;        /* Prototype for fake inode numbers. */
  106 static int              unp_rights;     /* File descriptors in flight. */
  107 static struct unp_head  unp_shead;      /* List of local stream sockets. */
  108 static struct unp_head  unp_dhead;      /* List of local datagram sockets. */
  109 
  110 static const struct sockaddr    sun_noname = { sizeof(sun_noname), AF_LOCAL };
  111 
  112 /*
  113  * Garbage collection of cyclic file descriptor/socket references occurs
  114  * asynchronously in a taskqueue context in order to avoid recursion and
  115  * reentrance in the UNIX domain socket, file descriptor, and socket layer
  116  * code.  See unp_gc() for a full description.
  117  */
  118 static struct task      unp_gc_task;
  119 
  120 /*
  121  * Both send and receive buffers are allocated PIPSIZ bytes of buffering for
  122  * stream sockets, although the total for sender and receiver is actually
  123  * only PIPSIZ.
  124  *
  125  * Datagram sockets really use the sendspace as the maximum datagram size,
  126  * and don't really want to reserve the sendspace.  Their recvspace should be
  127  * large enough for at least one max-size datagram plus address.
  128  */
  129 #ifndef PIPSIZ
  130 #define PIPSIZ  8192
  131 #endif
  132 static u_long   unpst_sendspace = PIPSIZ;
  133 static u_long   unpst_recvspace = PIPSIZ;
  134 static u_long   unpdg_sendspace = 2*1024;       /* really max datagram size */
  135 static u_long   unpdg_recvspace = 4*1024;
  136 
  137 SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW, 0, "Local domain");
  138 SYSCTL_NODE(_net_local, SOCK_STREAM, stream, CTLFLAG_RW, 0, "SOCK_STREAM");
  139 SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram, CTLFLAG_RW, 0, "SOCK_DGRAM");
  140 
  141 SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
  142            &unpst_sendspace, 0, "");
  143 SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
  144            &unpst_recvspace, 0, "");
  145 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
  146            &unpdg_sendspace, 0, "");
  147 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
  148            &unpdg_recvspace, 0, "");
  149 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, "");
  150 
  151 /*-
  152  * Locking and synchronization:
  153  *
  154  * The global UNIX domain socket rwlock (unp_global_rwlock) protects all
  155  * global variables, including the linked lists tracking the set of allocated
  156  * UNIX domain sockets.  The global rwlock also serves to prevent deadlock
  157  * when more than one PCB lock is acquired at a time (i.e., during
  158  * connect()).  Finally, the global rwlock protects uncounted references from
  159  * vnodes to sockets bound to those vnodes: to safely dereference the
  160  * v_socket pointer, the global rwlock must be held while a full reference is
  161  * acquired.
  162  *
  163  * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer,
  164  * allocated in pru_attach() and freed in pru_detach().  The validity of that
  165  * pointer is an invariant, so no lock is required to dereference the so_pcb
  166  * pointer if a valid socket reference is held by the caller.  In practice,
  167  * this is always true during operations performed on a socket.  Each unpcb
  168  * has a back-pointer to its socket, unp_socket, which will be stable under
  169  * the same circumstances.
  170  *
  171  * This pointer may only be safely dereferenced as long as a valid reference
  172  * to the unpcb is held.  Typically, this reference will be from the socket,
  173  * or from another unpcb when the referring unpcb's lock is held (in order
  174  * that the reference not be invalidated during use).  For example, to follow
  175  * unp->unp_conn->unp_socket, you need unlock the lock on unp, not unp_conn,
  176  * as unp_socket remains valid as long as the reference to unp_conn is valid.
  177  *
  178  * Fields of unpcbss are locked using a per-unpcb lock, unp_mtx.  Individual
  179  * atomic reads without the lock may be performed "lockless", but more
  180  * complex reads and read-modify-writes require the mutex to be held.  No
  181  * lock order is defined between unpcb locks -- multiple unpcb locks may be
  182  * acquired at the same time only when holding the global UNIX domain socket
  183  * rwlock exclusively, which prevents deadlocks.
  184  *
  185  * Blocking with UNIX domain sockets is a tricky issue: unlike most network
  186  * protocols, bind() is a non-atomic operation, and connect() requires
  187  * potential sleeping in the protocol, due to potentially waiting on local or
  188  * distributed file systems.  We try to separate "lookup" operations, which
  189  * may sleep, and the IPC operations themselves, which typically can occur
  190  * with relative atomicity as locks can be held over the entire operation.
  191  *
  192  * Another tricky issue is simultaneous multi-threaded or multi-process
  193  * access to a single UNIX domain socket.  These are handled by the flags
  194  * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or
  195  * binding, both of which involve dropping UNIX domain socket locks in order
  196  * to perform namei() and other file system operations.
  197  */
  198 static struct rwlock    unp_global_rwlock;
  199 
  200 #define UNP_GLOBAL_LOCK_INIT()          rw_init(&unp_global_rwlock,     \
  201                                             "unp_global_rwlock")
  202 
  203 #define UNP_GLOBAL_LOCK_ASSERT()        rw_assert(&unp_global_rwlock,   \
  204                                             RA_LOCKED)
  205 #define UNP_GLOBAL_UNLOCK_ASSERT()      rw_assert(&unp_global_rwlock,   \
  206                                             RA_UNLOCKED)
  207 
  208 #define UNP_GLOBAL_WLOCK()              rw_wlock(&unp_global_rwlock)
  209 #define UNP_GLOBAL_WUNLOCK()            rw_wunlock(&unp_global_rwlock)
  210 #define UNP_GLOBAL_WLOCK_ASSERT()       rw_assert(&unp_global_rwlock,   \
  211                                             RA_WLOCKED)
  212 #define UNP_GLOBAL_WOWNED()             rw_wowned(&unp_global_rwlock)
  213 
  214 #define UNP_GLOBAL_RLOCK()              rw_rlock(&unp_global_rwlock)
  215 #define UNP_GLOBAL_RUNLOCK()            rw_runlock(&unp_global_rwlock)
  216 #define UNP_GLOBAL_RLOCK_ASSERT()       rw_assert(&unp_global_rwlock,   \
  217                                             RA_RLOCKED)
  218 
  219 #define UNP_PCB_LOCK_INIT(unp)          mtx_init(&(unp)->unp_mtx,       \
  220                                             "unp_mtx", "unp_mtx",       \
  221                                             MTX_DUPOK|MTX_DEF|MTX_RECURSE)
  222 #define UNP_PCB_LOCK_DESTROY(unp)       mtx_destroy(&(unp)->unp_mtx)
  223 #define UNP_PCB_LOCK(unp)               mtx_lock(&(unp)->unp_mtx)
  224 #define UNP_PCB_UNLOCK(unp)             mtx_unlock(&(unp)->unp_mtx)
  225 #define UNP_PCB_LOCK_ASSERT(unp)        mtx_assert(&(unp)->unp_mtx, MA_OWNED)
  226 
  227 static int      uipc_connect2(struct socket *, struct socket *);
  228 static int      uipc_ctloutput(struct socket *, struct sockopt *);
  229 static int      unp_connect(struct socket *, struct sockaddr *,
  230                     struct thread *);
  231 static int      unp_connect2(struct socket *so, struct socket *so2, int);
  232 static void     unp_disconnect(struct unpcb *unp, struct unpcb *unp2);
  233 static void     unp_dispose(struct mbuf *);
  234 static void     unp_shutdown(struct unpcb *);
  235 static void     unp_drop(struct unpcb *, int);
  236 static void     unp_gc(__unused void *, int);
  237 static void     unp_scan(struct mbuf *, void (*)(struct file *));
  238 static void     unp_mark(struct file *);
  239 static void     unp_discard(struct file *);
  240 static void     unp_freerights(struct file **, int);
  241 static void     unp_init(void);
  242 static int      unp_internalize(struct mbuf **, struct thread *);
  243 static int      unp_externalize(struct mbuf *, struct mbuf **);
  244 static struct mbuf      *unp_addsockcred(struct thread *, struct mbuf *);
  245 
  246 /*
  247  * Definitions of protocols supported in the LOCAL domain.
  248  */
  249 static struct domain localdomain;
  250 static struct pr_usrreqs uipc_usrreqs_dgram, uipc_usrreqs_stream;
  251 static struct protosw localsw[] = {
  252 {
  253         .pr_type =              SOCK_STREAM,
  254         .pr_domain =            &localdomain,
  255         .pr_flags =             PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS,
  256         .pr_ctloutput =         &uipc_ctloutput,
  257         .pr_usrreqs =           &uipc_usrreqs_stream
  258 },
  259 {
  260         .pr_type =              SOCK_DGRAM,
  261         .pr_domain =            &localdomain,
  262         .pr_flags =             PR_ATOMIC|PR_ADDR|PR_RIGHTS,
  263         .pr_usrreqs =           &uipc_usrreqs_dgram
  264 },
  265 };
  266 
  267 static struct domain localdomain = {
  268         .dom_family =           AF_LOCAL,
  269         .dom_name =             "local",
  270         .dom_init =             unp_init,
  271         .dom_externalize =      unp_externalize,
  272         .dom_dispose =          unp_dispose,
  273         .dom_protosw =          localsw,
  274         .dom_protoswNPROTOSW =  &localsw[sizeof(localsw)/sizeof(localsw[0])]
  275 };
  276 DOMAIN_SET(local);
  277 
  278 static void
  279 uipc_abort(struct socket *so)
  280 {
  281         struct unpcb *unp, *unp2;
  282 
  283         unp = sotounpcb(so);
  284         KASSERT(unp != NULL, ("uipc_abort: unp == NULL"));
  285 
  286         UNP_GLOBAL_WLOCK();
  287         UNP_PCB_LOCK(unp);
  288         unp2 = unp->unp_conn;
  289         if (unp2 != NULL) {
  290                 UNP_PCB_LOCK(unp2);
  291                 unp_drop(unp2, ECONNABORTED);
  292                 UNP_PCB_UNLOCK(unp2);
  293         }
  294         UNP_PCB_UNLOCK(unp);
  295         UNP_GLOBAL_WUNLOCK();
  296 }
  297 
  298 static int
  299 uipc_accept(struct socket *so, struct sockaddr **nam)
  300 {
  301         struct unpcb *unp, *unp2;
  302         const struct sockaddr *sa;
  303 
  304         /*
  305          * Pass back name of connected socket, if it was bound and we are
  306          * still connected (our peer may have closed already!).
  307          */
  308         unp = sotounpcb(so);
  309         KASSERT(unp != NULL, ("uipc_accept: unp == NULL"));
  310 
  311         *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
  312         UNP_GLOBAL_RLOCK();
  313         unp2 = unp->unp_conn;
  314         if (unp2 != NULL && unp2->unp_addr != NULL) {
  315                 UNP_PCB_LOCK(unp2);
  316                 sa = (struct sockaddr *) unp2->unp_addr;
  317                 bcopy(sa, *nam, sa->sa_len);
  318                 UNP_PCB_UNLOCK(unp2);
  319         } else {
  320                 sa = &sun_noname;
  321                 bcopy(sa, *nam, sa->sa_len);
  322         }
  323         UNP_GLOBAL_RUNLOCK();
  324         return (0);
  325 }
  326 
  327 static int
  328 uipc_attach(struct socket *so, int proto, struct thread *td)
  329 {
  330         u_long sendspace, recvspace;
  331         struct unpcb *unp;
  332         int error, locked;
  333 
  334         KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL"));
  335         if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
  336                 switch (so->so_type) {
  337                 case SOCK_STREAM:
  338                         sendspace = unpst_sendspace;
  339                         recvspace = unpst_recvspace;
  340                         break;
  341 
  342                 case SOCK_DGRAM:
  343                         sendspace = unpdg_sendspace;
  344                         recvspace = unpdg_recvspace;
  345                         break;
  346 
  347                 default:
  348                         panic("uipc_attach");
  349                 }
  350                 error = soreserve(so, sendspace, recvspace);
  351                 if (error)
  352                         return (error);
  353         }
  354         unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO);
  355         if (unp == NULL)
  356                 return (ENOBUFS);
  357         LIST_INIT(&unp->unp_refs);
  358         UNP_PCB_LOCK_INIT(unp);
  359         unp->unp_socket = so;
  360         so->so_pcb = unp;
  361         unp->unp_refcount = 1;
  362 
  363         /*
  364          * uipc_attach() may be called indirectly from within the UNIX domain
  365          * socket code via sonewconn() in unp_connect().  Since rwlocks can
  366          * not be recursed, we do the closest thing.
  367          */
  368         locked = 0;
  369         if (!UNP_GLOBAL_WOWNED()) {
  370                 UNP_GLOBAL_WLOCK();
  371                 locked = 1;
  372         }
  373         unp->unp_gencnt = ++unp_gencnt;
  374         unp_count++;
  375         LIST_INSERT_HEAD(so->so_type == SOCK_DGRAM ? &unp_dhead : &unp_shead,
  376             unp, unp_link);
  377         if (locked)
  378                 UNP_GLOBAL_WUNLOCK();
  379 
  380         return (0);
  381 }
  382 
  383 static int
  384 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
  385 {
  386         struct sockaddr_un *soun = (struct sockaddr_un *)nam;
  387         struct vattr vattr;
  388         int error, namelen, vfslocked;
  389         struct nameidata nd;
  390         struct unpcb *unp;
  391         struct vnode *vp;
  392         struct mount *mp;
  393         char *buf;
  394 
  395         unp = sotounpcb(so);
  396         KASSERT(unp != NULL, ("uipc_bind: unp == NULL"));
  397 
  398         if (soun->sun_len > sizeof(struct sockaddr_un))
  399                 return (EINVAL);
  400         namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
  401         if (namelen <= 0)
  402                 return (EINVAL);
  403 
  404         /*
  405          * We don't allow simultaneous bind() calls on a single UNIX domain
  406          * socket, so flag in-progress operations, and return an error if an
  407          * operation is already in progress.
  408          *
  409          * Historically, we have not allowed a socket to be rebound, so this
  410          * also returns an error.  Not allowing re-binding simplifies the
  411          * implementation and avoids a great many possible failure modes.
  412          */
  413         UNP_PCB_LOCK(unp);
  414         if (unp->unp_vnode != NULL) {
  415                 UNP_PCB_UNLOCK(unp);
  416                 return (EINVAL);
  417         }
  418         if (unp->unp_flags & UNP_BINDING) {
  419                 UNP_PCB_UNLOCK(unp);
  420                 return (EALREADY);
  421         }
  422         unp->unp_flags |= UNP_BINDING;
  423         UNP_PCB_UNLOCK(unp);
  424 
  425         buf = malloc(namelen + 1, M_TEMP, M_WAITOK);
  426         strlcpy(buf, soun->sun_path, namelen + 1);
  427 
  428 restart:
  429         vfslocked = 0;
  430         NDINIT(&nd, CREATE, MPSAFE | NOFOLLOW | LOCKPARENT | SAVENAME,
  431             UIO_SYSSPACE, buf, td);
  432 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
  433         error = namei(&nd);
  434         if (error)
  435                 goto error;
  436         vp = nd.ni_vp;
  437         vfslocked = NDHASGIANT(&nd);
  438         if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
  439                 NDFREE(&nd, NDF_ONLY_PNBUF);
  440                 if (nd.ni_dvp == vp)
  441                         vrele(nd.ni_dvp);
  442                 else
  443                         vput(nd.ni_dvp);
  444                 if (vp != NULL) {
  445                         vrele(vp);
  446                         error = EADDRINUSE;
  447                         goto error;
  448                 }
  449                 error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH);
  450                 if (error)
  451                         goto error;
  452                 VFS_UNLOCK_GIANT(vfslocked);
  453                 goto restart;
  454         }
  455         VATTR_NULL(&vattr);
  456         vattr.va_type = VSOCK;
  457         vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask);
  458 #ifdef MAC
  459         error = mac_check_vnode_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd,
  460             &vattr);
  461 #endif
  462         if (error == 0) {
  463                 VOP_LEASE(nd.ni_dvp, td, td->td_ucred, LEASE_WRITE);
  464                 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
  465         }
  466         NDFREE(&nd, NDF_ONLY_PNBUF);
  467         vput(nd.ni_dvp);
  468         if (error) {
  469                 vn_finished_write(mp);
  470                 goto error;
  471         }
  472         vp = nd.ni_vp;
  473         ASSERT_VOP_ELOCKED(vp, "uipc_bind");
  474         soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK);
  475 
  476         UNP_GLOBAL_WLOCK();
  477         UNP_PCB_LOCK(unp);
  478         vp->v_socket = unp->unp_socket;
  479         unp->unp_vnode = vp;
  480         unp->unp_addr = soun;
  481         unp->unp_flags &= ~UNP_BINDING;
  482         UNP_PCB_UNLOCK(unp);
  483         UNP_GLOBAL_WUNLOCK();
  484         VOP_UNLOCK(vp, 0, td);
  485         vn_finished_write(mp);
  486         VFS_UNLOCK_GIANT(vfslocked);
  487         free(buf, M_TEMP);
  488         return (0);
  489 
  490 error:
  491         VFS_UNLOCK_GIANT(vfslocked);
  492         UNP_PCB_LOCK(unp);
  493         unp->unp_flags &= ~UNP_BINDING;
  494         UNP_PCB_UNLOCK(unp);
  495         free(buf, M_TEMP);
  496         return (error);
  497 }
  498 
  499 static int
  500 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
  501 {
  502         int error;
  503 
  504         KASSERT(td == curthread, ("uipc_connect: td != curthread"));
  505         UNP_GLOBAL_WLOCK();
  506         error = unp_connect(so, nam, td);
  507         UNP_GLOBAL_WUNLOCK();
  508         return (error);
  509 }
  510 
  511 static void
  512 uipc_close(struct socket *so)
  513 {
  514         struct unpcb *unp, *unp2;
  515 
  516         unp = sotounpcb(so);
  517         KASSERT(unp != NULL, ("uipc_close: unp == NULL"));
  518 
  519         UNP_GLOBAL_WLOCK();
  520         UNP_PCB_LOCK(unp);
  521         unp2 = unp->unp_conn;
  522         if (unp2 != NULL) {
  523                 UNP_PCB_LOCK(unp2);
  524                 unp_disconnect(unp, unp2);
  525                 UNP_PCB_UNLOCK(unp2);
  526         }
  527         UNP_PCB_UNLOCK(unp);
  528         UNP_GLOBAL_WUNLOCK();
  529 }
  530 
  531 static int
  532 uipc_connect2(struct socket *so1, struct socket *so2)
  533 {
  534         struct unpcb *unp, *unp2;
  535         int error;
  536 
  537         UNP_GLOBAL_WLOCK();
  538         unp = so1->so_pcb;
  539         KASSERT(unp != NULL, ("uipc_connect2: unp == NULL"));
  540         UNP_PCB_LOCK(unp);
  541         unp2 = so2->so_pcb;
  542         KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL"));
  543         UNP_PCB_LOCK(unp2);
  544         error = unp_connect2(so1, so2, PRU_CONNECT2);
  545         UNP_PCB_UNLOCK(unp2);
  546         UNP_PCB_UNLOCK(unp);
  547         UNP_GLOBAL_WUNLOCK();
  548         return (error);
  549 }
  550 
  551 static void
  552 uipc_detach(struct socket *so)
  553 {
  554         struct unpcb *unp, *unp2;
  555         struct sockaddr_un *saved_unp_addr;
  556         struct vnode *vp;
  557         int freeunp, local_unp_rights;
  558 
  559         unp = sotounpcb(so);
  560         KASSERT(unp != NULL, ("uipc_detach: unp == NULL"));
  561 
  562         UNP_GLOBAL_WLOCK();
  563         UNP_PCB_LOCK(unp);
  564 
  565         LIST_REMOVE(unp, unp_link);
  566         unp->unp_gencnt = ++unp_gencnt;
  567         --unp_count;
  568 
  569         /*
  570          * XXXRW: Should assert vp->v_socket == so.
  571          */
  572         if ((vp = unp->unp_vnode) != NULL) {
  573                 unp->unp_vnode->v_socket = NULL;
  574                 unp->unp_vnode = NULL;
  575         }
  576         unp2 = unp->unp_conn;
  577         if (unp2 != NULL) {
  578                 UNP_PCB_LOCK(unp2);
  579                 unp_disconnect(unp, unp2);
  580                 UNP_PCB_UNLOCK(unp2);
  581         }
  582 
  583         /*
  584          * We hold the global lock exclusively, so it's OK to acquire
  585          * multiple pcb locks at a time.
  586          */
  587         while (!LIST_EMPTY(&unp->unp_refs)) {
  588                 struct unpcb *ref = LIST_FIRST(&unp->unp_refs);
  589 
  590                 UNP_PCB_LOCK(ref);
  591                 unp_drop(ref, ECONNRESET);
  592                 UNP_PCB_UNLOCK(ref);
  593         }
  594         local_unp_rights = unp_rights;
  595         UNP_GLOBAL_WUNLOCK();
  596         unp->unp_socket->so_pcb = NULL;
  597         saved_unp_addr = unp->unp_addr;
  598         unp->unp_addr = NULL;
  599         unp->unp_refcount--;
  600         freeunp = (unp->unp_refcount == 0);
  601         if (saved_unp_addr != NULL)
  602                 FREE(saved_unp_addr, M_SONAME);
  603         if (freeunp) {
  604                 UNP_PCB_LOCK_DESTROY(unp);
  605                 uma_zfree(unp_zone, unp);
  606         } else
  607                 UNP_PCB_UNLOCK(unp);
  608         if (vp) {
  609                 int vfslocked;
  610 
  611                 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  612                 vrele(vp);
  613                 VFS_UNLOCK_GIANT(vfslocked);
  614         }
  615         if (local_unp_rights)
  616                 taskqueue_enqueue(taskqueue_thread, &unp_gc_task);
  617 }
  618 
  619 static int
  620 uipc_disconnect(struct socket *so)
  621 {
  622         struct unpcb *unp, *unp2;
  623 
  624         unp = sotounpcb(so);
  625         KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL"));
  626 
  627         UNP_GLOBAL_WLOCK();
  628         UNP_PCB_LOCK(unp);
  629         unp2 = unp->unp_conn;
  630         if (unp2 != NULL) {
  631                 UNP_PCB_LOCK(unp2);
  632                 unp_disconnect(unp, unp2);
  633                 UNP_PCB_UNLOCK(unp2);
  634         }
  635         UNP_PCB_UNLOCK(unp);
  636         UNP_GLOBAL_WUNLOCK();
  637         return (0);
  638 }
  639 
  640 static int
  641 uipc_listen(struct socket *so, int backlog, struct thread *td)
  642 {
  643         struct unpcb *unp;
  644         int error;
  645 
  646         unp = sotounpcb(so);
  647         KASSERT(unp != NULL, ("uipc_listen: unp == NULL"));
  648 
  649         UNP_PCB_LOCK(unp);
  650         if (unp->unp_vnode == NULL) {
  651                 UNP_PCB_UNLOCK(unp);
  652                 return (EINVAL);
  653         }
  654 
  655         SOCK_LOCK(so);
  656         error = solisten_proto_check(so);
  657         if (error == 0) {
  658                 cru2x(td->td_ucred, &unp->unp_peercred);
  659                 unp->unp_flags |= UNP_HAVEPCCACHED;
  660                 solisten_proto(so, backlog);
  661         }
  662         SOCK_UNLOCK(so);
  663         UNP_PCB_UNLOCK(unp);
  664         return (error);
  665 }
  666 
  667 static int
  668 uipc_peeraddr(struct socket *so, struct sockaddr **nam)
  669 {
  670         struct unpcb *unp, *unp2;
  671         const struct sockaddr *sa;
  672 
  673         unp = sotounpcb(so);
  674         KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL"));
  675 
  676         *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
  677         UNP_GLOBAL_RLOCK();
  678         /*
  679          * XXX: It seems that this test always fails even when connection is
  680          * established.  So, this else clause is added as workaround to
  681          * return PF_LOCAL sockaddr.
  682          */
  683         unp2 = unp->unp_conn;
  684         if (unp2 != NULL) {
  685                 UNP_PCB_LOCK(unp2);
  686                 if (unp2->unp_addr != NULL)
  687                         sa = (struct sockaddr *) unp2->unp_addr;
  688                 else
  689                         sa = &sun_noname;
  690                 bcopy(sa, *nam, sa->sa_len);
  691                 UNP_PCB_UNLOCK(unp2);
  692         } else {
  693                 sa = &sun_noname;
  694                 bcopy(sa, *nam, sa->sa_len);
  695         }
  696         UNP_GLOBAL_RUNLOCK();
  697         return (0);
  698 }
  699 
  700 static int
  701 uipc_rcvd(struct socket *so, int flags)
  702 {
  703         struct unpcb *unp, *unp2;
  704         struct socket *so2;
  705         u_int mbcnt, sbcc;
  706         u_long newhiwat;
  707 
  708         unp = sotounpcb(so);
  709         KASSERT(unp != NULL, ("uipc_rcvd: unp == NULL"));
  710 
  711         if (so->so_type == SOCK_DGRAM)
  712                 panic("uipc_rcvd DGRAM?");
  713 
  714         if (so->so_type != SOCK_STREAM)
  715                 panic("uipc_rcvd unknown socktype");
  716 
  717         /*
  718          * Adjust backpressure on sender and wakeup any waiting to write.
  719          *
  720          * The unp lock is acquired to maintain the validity of the unp_conn
  721          * pointer; no lock on unp2 is required as unp2->unp_socket will be
  722          * static as long as we don't permit unp2 to disconnect from unp,
  723          * which is prevented by the lock on unp.  We cache values from
  724          * so_rcv to avoid holding the so_rcv lock over the entire
  725          * transaction on the remote so_snd.
  726          */
  727         SOCKBUF_LOCK(&so->so_rcv);
  728         mbcnt = so->so_rcv.sb_mbcnt;
  729         sbcc = so->so_rcv.sb_cc;
  730         SOCKBUF_UNLOCK(&so->so_rcv);
  731         UNP_PCB_LOCK(unp);
  732         unp2 = unp->unp_conn;
  733         if (unp2 == NULL) {
  734                 UNP_PCB_UNLOCK(unp);
  735                 return (0);
  736         }
  737         so2 = unp2->unp_socket;
  738         SOCKBUF_LOCK(&so2->so_snd);
  739         so2->so_snd.sb_mbmax += unp->unp_mbcnt - mbcnt;
  740         newhiwat = so2->so_snd.sb_hiwat + unp->unp_cc - sbcc;
  741         (void)chgsbsize(so2->so_cred->cr_uidinfo, &so2->so_snd.sb_hiwat,
  742             newhiwat, RLIM_INFINITY);
  743         sowwakeup_locked(so2);
  744         unp->unp_mbcnt = mbcnt;
  745         unp->unp_cc = sbcc;
  746         UNP_PCB_UNLOCK(unp);
  747         return (0);
  748 }
  749 
  750 static int
  751 uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
  752     struct mbuf *control, struct thread *td)
  753 {
  754         struct unpcb *unp, *unp2;
  755         struct socket *so2;
  756         u_int mbcnt_delta, sbcc;
  757         u_long newhiwat;
  758         int error = 0;
  759 
  760         unp = sotounpcb(so);
  761         KASSERT(unp != NULL, ("uipc_send: unp == NULL"));
  762 
  763         if (flags & PRUS_OOB) {
  764                 error = EOPNOTSUPP;
  765                 goto release;
  766         }
  767         if (control != NULL && (error = unp_internalize(&control, td)))
  768                 goto release;
  769         if ((nam != NULL) || (flags & PRUS_EOF))
  770                 UNP_GLOBAL_WLOCK();
  771         else
  772                 UNP_GLOBAL_RLOCK();
  773         switch (so->so_type) {
  774         case SOCK_DGRAM:
  775         {
  776                 const struct sockaddr *from;
  777 
  778                 unp2 = unp->unp_conn;
  779                 if (nam != NULL) {
  780                         UNP_GLOBAL_WLOCK_ASSERT();
  781                         if (unp2 != NULL) {
  782                                 error = EISCONN;
  783                                 break;
  784                         }
  785                         error = unp_connect(so, nam, td);
  786                         if (error)
  787                                 break;
  788                         unp2 = unp->unp_conn;
  789                 }
  790 
  791                 /*
  792                  * Because connect() and send() are non-atomic in a sendto()
  793                  * with a target address, it's possible that the socket will
  794                  * have disconnected before the send() can run.  In that case
  795                  * return the slightly counter-intuitive but otherwise
  796                  * correct error that the socket is not connected.
  797                  */
  798                 if (unp2 == NULL) {
  799                         error = ENOTCONN;
  800                         break;
  801                 }
  802                 /* Lockless read. */
  803                 if (unp2->unp_flags & UNP_WANTCRED)
  804                         control = unp_addsockcred(td, control);
  805                 UNP_PCB_LOCK(unp);
  806                 if (unp->unp_addr != NULL)
  807                         from = (struct sockaddr *)unp->unp_addr;
  808                 else
  809                         from = &sun_noname;
  810                 so2 = unp2->unp_socket;
  811                 SOCKBUF_LOCK(&so2->so_rcv);
  812                 if (sbappendaddr_locked(&so2->so_rcv, from, m, control)) {
  813                         sorwakeup_locked(so2);
  814                         m = NULL;
  815                         control = NULL;
  816                 } else {
  817                         SOCKBUF_UNLOCK(&so2->so_rcv);
  818                         error = ENOBUFS;
  819                 }
  820                 if (nam != NULL) {
  821                         UNP_GLOBAL_WLOCK_ASSERT();
  822                         UNP_PCB_LOCK(unp2);
  823                         unp_disconnect(unp, unp2);
  824                         UNP_PCB_UNLOCK(unp2);
  825                 }
  826                 UNP_PCB_UNLOCK(unp);
  827                 break;
  828         }
  829 
  830         case SOCK_STREAM:
  831                 if ((so->so_state & SS_ISCONNECTED) == 0) {
  832                         if (nam != NULL) {
  833                                 UNP_GLOBAL_WLOCK_ASSERT();
  834                                 error = unp_connect(so, nam, td);
  835                                 if (error)
  836                                         break;  /* XXX */
  837                         } else {
  838                                 error = ENOTCONN;
  839                                 break;
  840                         }
  841                 }
  842 
  843                 /* Lockless read. */
  844                 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
  845                         error = EPIPE;
  846                         break;
  847                 }
  848 
  849                 /*
  850                  * Because connect() and send() are non-atomic in a sendto()
  851                  * with a target address, it's possible that the socket will
  852                  * have disconnected before the send() can run.  In that case
  853                  * return the slightly counter-intuitive but otherwise
  854                  * correct error that the socket is not connected.
  855                  *
  856                  * Locking here must be done carefully: the global lock
  857                  * prevents interconnections between unpcbs from changing, so
  858                  * we can traverse from unp to unp2 without acquiring unp's
  859                  * lock.  Socket buffer locks follow unpcb locks, so we can
  860                  * acquire both remote and lock socket buffer locks.
  861                  */
  862                 unp2 = unp->unp_conn;
  863                 if (unp2 == NULL) {
  864                         error = ENOTCONN;
  865                         break;
  866                 }
  867                 so2 = unp2->unp_socket;
  868                 UNP_PCB_LOCK(unp2);
  869                 SOCKBUF_LOCK(&so2->so_rcv);
  870                 if (unp2->unp_flags & UNP_WANTCRED) {
  871                         /*
  872                          * Credentials are passed only once on SOCK_STREAM.
  873                          */
  874                         unp2->unp_flags &= ~UNP_WANTCRED;
  875                         control = unp_addsockcred(td, control);
  876                 }
  877                 /*
  878                  * Send to paired receive port, and then reduce send buffer
  879                  * hiwater marks to maintain backpressure.  Wake up readers.
  880                  */
  881                 if (control != NULL) {
  882                         if (sbappendcontrol_locked(&so2->so_rcv, m, control))
  883                                 control = NULL;
  884                 } else
  885                         sbappend_locked(&so2->so_rcv, m);
  886                 mbcnt_delta = so2->so_rcv.sb_mbcnt - unp2->unp_mbcnt;
  887                 unp2->unp_mbcnt = so2->so_rcv.sb_mbcnt;
  888                 sbcc = so2->so_rcv.sb_cc;
  889                 sorwakeup_locked(so2);
  890 
  891                 SOCKBUF_LOCK(&so->so_snd);
  892                 newhiwat = so->so_snd.sb_hiwat - (sbcc - unp2->unp_cc);
  893                 (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.sb_hiwat,
  894                     newhiwat, RLIM_INFINITY);
  895                 so->so_snd.sb_mbmax -= mbcnt_delta;
  896                 SOCKBUF_UNLOCK(&so->so_snd);
  897                 unp2->unp_cc = sbcc;
  898                 UNP_PCB_UNLOCK(unp2);
  899                 m = NULL;
  900                 break;
  901 
  902         default:
  903                 panic("uipc_send unknown socktype");
  904         }
  905 
  906         /*
  907          * PRUS_EOF is equivalent to pru_send followed by pru_shutdown.
  908          */
  909         if (flags & PRUS_EOF) {
  910                 UNP_PCB_LOCK(unp);
  911                 socantsendmore(so);
  912                 unp_shutdown(unp);
  913                 UNP_PCB_UNLOCK(unp);
  914         }
  915 
  916         if ((nam != NULL) || (flags & PRUS_EOF))
  917                 UNP_GLOBAL_WUNLOCK();
  918         else
  919                 UNP_GLOBAL_RUNLOCK();
  920 
  921         if (control != NULL && error != 0)
  922                 unp_dispose(control);
  923 
  924 release:
  925         if (control != NULL)
  926                 m_freem(control);
  927         if (m != NULL)
  928                 m_freem(m);
  929         return (error);
  930 }
  931 
  932 static int
  933 uipc_sense(struct socket *so, struct stat *sb)
  934 {
  935         struct unpcb *unp, *unp2;
  936         struct socket *so2;
  937 
  938         unp = sotounpcb(so);
  939         KASSERT(unp != NULL, ("uipc_sense: unp == NULL"));
  940 
  941         sb->st_blksize = so->so_snd.sb_hiwat;
  942         UNP_GLOBAL_RLOCK();
  943         UNP_PCB_LOCK(unp);
  944         unp2 = unp->unp_conn;
  945         if (so->so_type == SOCK_STREAM && unp2 != NULL) {
  946                 so2 = unp2->unp_socket;
  947                 sb->st_blksize += so2->so_rcv.sb_cc;
  948         }
  949         sb->st_dev = NODEV;
  950         if (unp->unp_ino == 0)
  951                 unp->unp_ino = (++unp_ino == 0) ? ++unp_ino : unp_ino;
  952         sb->st_ino = unp->unp_ino;
  953         UNP_PCB_UNLOCK(unp);
  954         UNP_GLOBAL_RUNLOCK();
  955         return (0);
  956 }
  957 
  958 static int
  959 uipc_shutdown(struct socket *so)
  960 {
  961         struct unpcb *unp;
  962 
  963         unp = sotounpcb(so);
  964         KASSERT(unp != NULL, ("uipc_shutdown: unp == NULL"));
  965 
  966         UNP_GLOBAL_WLOCK();
  967         UNP_PCB_LOCK(unp);
  968         socantsendmore(so);
  969         unp_shutdown(unp);
  970         UNP_PCB_UNLOCK(unp);
  971         UNP_GLOBAL_WUNLOCK();
  972         return (0);
  973 }
  974 
  975 static int
  976 uipc_sockaddr(struct socket *so, struct sockaddr **nam)
  977 {
  978         struct unpcb *unp;
  979         const struct sockaddr *sa;
  980 
  981         unp = sotounpcb(so);
  982         KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL"));
  983 
  984         *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
  985         UNP_PCB_LOCK(unp);
  986         if (unp->unp_addr != NULL)
  987                 sa = (struct sockaddr *) unp->unp_addr;
  988         else
  989                 sa = &sun_noname;
  990         bcopy(sa, *nam, sa->sa_len);
  991         UNP_PCB_UNLOCK(unp);
  992         return (0);
  993 }
  994 
  995 static struct pr_usrreqs uipc_usrreqs_dgram = {
  996         .pru_abort =            uipc_abort,
  997         .pru_accept =           uipc_accept,
  998         .pru_attach =           uipc_attach,
  999         .pru_bind =             uipc_bind,
 1000         .pru_connect =          uipc_connect,
 1001         .pru_connect2 =         uipc_connect2,
 1002         .pru_detach =           uipc_detach,
 1003         .pru_disconnect =       uipc_disconnect,
 1004         .pru_listen =           uipc_listen,
 1005         .pru_peeraddr =         uipc_peeraddr,
 1006         .pru_rcvd =             uipc_rcvd,
 1007         .pru_send =             uipc_send,
 1008         .pru_sense =            uipc_sense,
 1009         .pru_shutdown =         uipc_shutdown,
 1010         .pru_sockaddr =         uipc_sockaddr,
 1011         .pru_soreceive =        soreceive_dgram,
 1012         .pru_close =            uipc_close,
 1013 };
 1014 
 1015 static struct pr_usrreqs uipc_usrreqs_stream = {
 1016         .pru_abort =            uipc_abort,
 1017         .pru_accept =           uipc_accept,
 1018         .pru_attach =           uipc_attach,
 1019         .pru_bind =             uipc_bind,
 1020         .pru_connect =          uipc_connect,
 1021         .pru_connect2 =         uipc_connect2,
 1022         .pru_detach =           uipc_detach,
 1023         .pru_disconnect =       uipc_disconnect,
 1024         .pru_listen =           uipc_listen,
 1025         .pru_peeraddr =         uipc_peeraddr,
 1026         .pru_rcvd =             uipc_rcvd,
 1027         .pru_send =             uipc_send,
 1028         .pru_sense =            uipc_sense,
 1029         .pru_shutdown =         uipc_shutdown,
 1030         .pru_sockaddr =         uipc_sockaddr,
 1031         .pru_soreceive =        soreceive_generic,
 1032         .pru_close =            uipc_close,
 1033 };
 1034 
 1035 static int
 1036 uipc_ctloutput(struct socket *so, struct sockopt *sopt)
 1037 {
 1038         struct unpcb *unp;
 1039         struct xucred xu;
 1040         int error, optval;
 1041 
 1042         if (sopt->sopt_level != 0)
 1043                 return (EINVAL);
 1044 
 1045         unp = sotounpcb(so);
 1046         KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL"));
 1047         error = 0;
 1048         switch (sopt->sopt_dir) {
 1049         case SOPT_GET:
 1050                 switch (sopt->sopt_name) {
 1051                 case LOCAL_PEERCRED:
 1052                         UNP_PCB_LOCK(unp);
 1053                         if (unp->unp_flags & UNP_HAVEPC)
 1054                                 xu = unp->unp_peercred;
 1055                         else {
 1056                                 if (so->so_type == SOCK_STREAM)
 1057                                         error = ENOTCONN;
 1058                                 else
 1059                                         error = EINVAL;
 1060                         }
 1061                         UNP_PCB_UNLOCK(unp);
 1062                         if (error == 0)
 1063                                 error = sooptcopyout(sopt, &xu, sizeof(xu));
 1064                         break;
 1065 
 1066                 case LOCAL_CREDS:
 1067                         /* Unlocked read. */
 1068                         optval = unp->unp_flags & UNP_WANTCRED ? 1 : 0;
 1069                         error = sooptcopyout(sopt, &optval, sizeof(optval));
 1070                         break;
 1071 
 1072                 case LOCAL_CONNWAIT:
 1073                         /* Unlocked read. */
 1074                         optval = unp->unp_flags & UNP_CONNWAIT ? 1 : 0;
 1075                         error = sooptcopyout(sopt, &optval, sizeof(optval));
 1076                         break;
 1077 
 1078                 default:
 1079                         error = EOPNOTSUPP;
 1080                         break;
 1081                 }
 1082                 break;
 1083 
 1084         case SOPT_SET:
 1085                 switch (sopt->sopt_name) {
 1086                 case LOCAL_CREDS:
 1087                 case LOCAL_CONNWAIT:
 1088                         error = sooptcopyin(sopt, &optval, sizeof(optval),
 1089                                             sizeof(optval));
 1090                         if (error)
 1091                                 break;
 1092 
 1093 #define OPTSET(bit) do {                                                \
 1094         UNP_PCB_LOCK(unp);                                              \
 1095         if (optval)                                                     \
 1096                 unp->unp_flags |= bit;                                  \
 1097         else                                                            \
 1098                 unp->unp_flags &= ~bit;                                 \
 1099         UNP_PCB_UNLOCK(unp);                                            \
 1100 } while (0)
 1101 
 1102                         switch (sopt->sopt_name) {
 1103                         case LOCAL_CREDS:
 1104                                 OPTSET(UNP_WANTCRED);
 1105                                 break;
 1106 
 1107                         case LOCAL_CONNWAIT:
 1108                                 OPTSET(UNP_CONNWAIT);
 1109                                 break;
 1110 
 1111                         default:
 1112                                 break;
 1113                         }
 1114                         break;
 1115 #undef  OPTSET
 1116                 default:
 1117                         error = ENOPROTOOPT;
 1118                         break;
 1119                 }
 1120                 break;
 1121 
 1122         default:
 1123                 error = EOPNOTSUPP;
 1124                 break;
 1125         }
 1126         return (error);
 1127 }
 1128 
 1129 static int
 1130 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
 1131 {
 1132         struct sockaddr_un *soun = (struct sockaddr_un *)nam;
 1133         struct vnode *vp;
 1134         struct socket *so2, *so3;
 1135         struct unpcb *unp, *unp2, *unp3;
 1136         int error, len, vfslocked;
 1137         struct nameidata nd;
 1138         char buf[SOCK_MAXADDRLEN];
 1139         struct sockaddr *sa;
 1140 
 1141         UNP_GLOBAL_WLOCK_ASSERT();
 1142 
 1143         unp = sotounpcb(so);
 1144         KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
 1145 
 1146         if (nam->sa_len > sizeof(struct sockaddr_un))
 1147                 return (EINVAL);
 1148         len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
 1149         if (len <= 0)
 1150                 return (EINVAL);
 1151         strlcpy(buf, soun->sun_path, len + 1);
 1152 
 1153         UNP_PCB_LOCK(unp);
 1154         if (unp->unp_flags & UNP_CONNECTING) {
 1155                 UNP_PCB_UNLOCK(unp);
 1156                 return (EALREADY);
 1157         }
 1158         UNP_GLOBAL_WUNLOCK();
 1159         unp->unp_flags |= UNP_CONNECTING;
 1160         UNP_PCB_UNLOCK(unp);
 1161 
 1162         sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
 1163         NDINIT(&nd, LOOKUP, MPSAFE | FOLLOW | LOCKLEAF, UIO_SYSSPACE, buf,
 1164             td);
 1165         error = namei(&nd);
 1166         if (error)
 1167                 vp = NULL;
 1168         else
 1169                 vp = nd.ni_vp;
 1170         ASSERT_VOP_LOCKED(vp, "unp_connect");
 1171         vfslocked = NDHASGIANT(&nd);
 1172         NDFREE(&nd, NDF_ONLY_PNBUF);
 1173         if (error)
 1174                 goto bad;
 1175 
 1176         if (vp->v_type != VSOCK) {
 1177                 error = ENOTSOCK;
 1178                 goto bad;
 1179         }
 1180 #ifdef MAC
 1181         error = mac_check_vnode_open(td->td_ucred, vp, VWRITE | VREAD);
 1182         if (error)
 1183                 goto bad;
 1184 #endif
 1185         error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td);
 1186         if (error)
 1187                 goto bad;
 1188         VFS_UNLOCK_GIANT(vfslocked);
 1189 
 1190         unp = sotounpcb(so);
 1191         KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
 1192 
 1193         /*
 1194          * Lock global lock for two reasons: make sure v_socket is stable,
 1195          * and to protect simultaneous locking of multiple pcbs.
 1196          */
 1197         UNP_GLOBAL_WLOCK();
 1198         so2 = vp->v_socket;
 1199         if (so2 == NULL) {
 1200                 error = ECONNREFUSED;
 1201                 goto bad2;
 1202         }
 1203         if (so->so_type != so2->so_type) {
 1204                 error = EPROTOTYPE;
 1205                 goto bad2;
 1206         }
 1207         if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
 1208                 if (so2->so_options & SO_ACCEPTCONN) {
 1209                         /*
 1210                          * We can't drop the global lock here or 'so2' may
 1211                          * become invalid.  As a result, we need to handle
 1212                          * possibly lock recursion in uipc_attach.
 1213                          */
 1214                         so3 = sonewconn(so2, 0);
 1215                 } else
 1216                         so3 = NULL;
 1217                 if (so3 == NULL) {
 1218                         error = ECONNREFUSED;
 1219                         goto bad2;
 1220                 }
 1221                 unp = sotounpcb(so);
 1222                 unp2 = sotounpcb(so2);
 1223                 unp3 = sotounpcb(so3);
 1224                 UNP_PCB_LOCK(unp);
 1225                 UNP_PCB_LOCK(unp2);
 1226                 UNP_PCB_LOCK(unp3);
 1227                 if (unp2->unp_addr != NULL) {
 1228                         bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len);
 1229                         unp3->unp_addr = (struct sockaddr_un *) sa;
 1230                         sa = NULL;
 1231                 }
 1232 
 1233                 /*
 1234                  * The connecter's (client's) credentials are copied from its
 1235                  * process structure at the time of connect() (which is now).
 1236                  */
 1237                 cru2x(td->td_ucred, &unp3->unp_peercred);
 1238                 unp3->unp_flags |= UNP_HAVEPC;
 1239 
 1240                 /*
 1241                  * The receiver's (server's) credentials are copied from the
 1242                  * unp_peercred member of socket on which the former called
 1243                  * listen(); uipc_listen() cached that process's credentials
 1244                  * at that time so we can use them now.
 1245                  */
 1246                 KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED,
 1247                     ("unp_connect: listener without cached peercred"));
 1248                 memcpy(&unp->unp_peercred, &unp2->unp_peercred,
 1249                     sizeof(unp->unp_peercred));
 1250                 unp->unp_flags |= UNP_HAVEPC;
 1251                 if (unp2->unp_flags & UNP_WANTCRED)
 1252                         unp3->unp_flags |= UNP_WANTCRED;
 1253                 UNP_PCB_UNLOCK(unp3);
 1254                 UNP_PCB_UNLOCK(unp2);
 1255                 UNP_PCB_UNLOCK(unp);
 1256 #ifdef MAC
 1257                 SOCK_LOCK(so);
 1258                 mac_set_socket_peer_from_socket(so, so3);
 1259                 mac_set_socket_peer_from_socket(so3, so);
 1260                 SOCK_UNLOCK(so);
 1261 #endif
 1262 
 1263                 so2 = so3;
 1264         }
 1265         unp = sotounpcb(so);
 1266         KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
 1267         unp2 = sotounpcb(so2);
 1268         KASSERT(unp2 != NULL, ("unp_connect: unp2 == NULL"));
 1269         UNP_PCB_LOCK(unp);
 1270         UNP_PCB_LOCK(unp2);
 1271         error = unp_connect2(so, so2, PRU_CONNECT);
 1272         UNP_PCB_UNLOCK(unp2);
 1273         UNP_PCB_UNLOCK(unp);
 1274 bad2:
 1275         UNP_GLOBAL_WUNLOCK();
 1276         if (vfslocked)
 1277                 /* 
 1278                  * Giant has been previously acquired. This means filesystem
 1279                  * isn't MPSAFE.  Do it once again.
 1280                  */
 1281                 mtx_lock(&Giant);
 1282 bad:
 1283         if (vp != NULL)
 1284                 vput(vp);
 1285         VFS_UNLOCK_GIANT(vfslocked);
 1286         free(sa, M_SONAME);
 1287         UNP_GLOBAL_WLOCK();
 1288         UNP_PCB_LOCK(unp);
 1289         unp->unp_flags &= ~UNP_CONNECTING;
 1290         UNP_PCB_UNLOCK(unp);
 1291         return (error);
 1292 }
 1293 
 1294 static int
 1295 unp_connect2(struct socket *so, struct socket *so2, int req)
 1296 {
 1297         struct unpcb *unp;
 1298         struct unpcb *unp2;
 1299 
 1300         unp = sotounpcb(so);
 1301         KASSERT(unp != NULL, ("unp_connect2: unp == NULL"));
 1302         unp2 = sotounpcb(so2);
 1303         KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL"));
 1304 
 1305         UNP_GLOBAL_WLOCK_ASSERT();
 1306         UNP_PCB_LOCK_ASSERT(unp);
 1307         UNP_PCB_LOCK_ASSERT(unp2);
 1308 
 1309         if (so2->so_type != so->so_type)
 1310                 return (EPROTOTYPE);
 1311         unp->unp_conn = unp2;
 1312 
 1313         switch (so->so_type) {
 1314         case SOCK_DGRAM:
 1315                 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
 1316                 soisconnected(so);
 1317                 break;
 1318 
 1319         case SOCK_STREAM:
 1320                 unp2->unp_conn = unp;
 1321                 if (req == PRU_CONNECT &&
 1322                     ((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT))
 1323                         soisconnecting(so);
 1324                 else
 1325                         soisconnected(so);
 1326                 soisconnected(so2);
 1327                 break;
 1328 
 1329         default:
 1330                 panic("unp_connect2");
 1331         }
 1332         return (0);
 1333 }
 1334 
 1335 static void
 1336 unp_disconnect(struct unpcb *unp, struct unpcb *unp2)
 1337 {
 1338         struct socket *so;
 1339 
 1340         KASSERT(unp2 != NULL, ("unp_disconnect: unp2 == NULL"));
 1341 
 1342         UNP_GLOBAL_WLOCK_ASSERT();
 1343         UNP_PCB_LOCK_ASSERT(unp);
 1344         UNP_PCB_LOCK_ASSERT(unp2);
 1345 
 1346         unp->unp_conn = NULL;
 1347         switch (unp->unp_socket->so_type) {
 1348         case SOCK_DGRAM:
 1349                 LIST_REMOVE(unp, unp_reflink);
 1350                 so = unp->unp_socket;
 1351                 SOCK_LOCK(so);
 1352                 so->so_state &= ~SS_ISCONNECTED;
 1353                 SOCK_UNLOCK(so);
 1354                 break;
 1355 
 1356         case SOCK_STREAM:
 1357                 soisdisconnected(unp->unp_socket);
 1358                 unp2->unp_conn = NULL;
 1359                 soisdisconnected(unp2->unp_socket);
 1360                 break;
 1361         }
 1362 }
 1363 
 1364 /*
 1365  * unp_pcblist() walks the global list of struct unpcb's to generate a
 1366  * pointer list, bumping the refcount on each unpcb.  It then copies them out
 1367  * sequentially, validating the generation number on each to see if it has
 1368  * been detached.  All of this is necessary because copyout() may sleep on
 1369  * disk I/O.
 1370  */
 1371 static int
 1372 unp_pcblist(SYSCTL_HANDLER_ARGS)
 1373 {
 1374         int error, i, n;
 1375         int freeunp;
 1376         struct unpcb *unp, **unp_list;
 1377         unp_gen_t gencnt;
 1378         struct xunpgen *xug;
 1379         struct unp_head *head;
 1380         struct xunpcb *xu;
 1381 
 1382         head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead);
 1383 
 1384         /*
 1385          * The process of preparing the PCB list is too time-consuming and
 1386          * resource-intensive to repeat twice on every request.
 1387          */
 1388         if (req->oldptr == NULL) {
 1389                 n = unp_count;
 1390                 req->oldidx = 2 * (sizeof *xug)
 1391                         + (n + n/8) * sizeof(struct xunpcb);
 1392                 return (0);
 1393         }
 1394 
 1395         if (req->newptr != NULL)
 1396                 return (EPERM);
 1397 
 1398         /*
 1399          * OK, now we're committed to doing something.
 1400          */
 1401         xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK);
 1402         UNP_GLOBAL_RLOCK();
 1403         gencnt = unp_gencnt;
 1404         n = unp_count;
 1405         UNP_GLOBAL_RUNLOCK();
 1406 
 1407         xug->xug_len = sizeof *xug;
 1408         xug->xug_count = n;
 1409         xug->xug_gen = gencnt;
 1410         xug->xug_sogen = so_gencnt;
 1411         error = SYSCTL_OUT(req, xug, sizeof *xug);
 1412         if (error) {
 1413                 free(xug, M_TEMP);
 1414                 return (error);
 1415         }
 1416 
 1417         unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK);
 1418 
 1419         UNP_GLOBAL_RLOCK();
 1420         for (unp = LIST_FIRST(head), i = 0; unp && i < n;
 1421              unp = LIST_NEXT(unp, unp_link)) {
 1422                 UNP_PCB_LOCK(unp);
 1423                 if (unp->unp_gencnt <= gencnt) {
 1424                         if (cr_cansee(req->td->td_ucred,
 1425                             unp->unp_socket->so_cred)) {
 1426                                 UNP_PCB_UNLOCK(unp);
 1427                                 continue;
 1428                         }
 1429                         unp_list[i++] = unp;
 1430                         unp->unp_refcount++;
 1431                 }
 1432                 UNP_PCB_UNLOCK(unp);
 1433         }
 1434         UNP_GLOBAL_RUNLOCK();
 1435         n = i;                  /* In case we lost some during malloc. */
 1436 
 1437         error = 0;
 1438         xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO);
 1439         for (i = 0; i < n; i++) {
 1440                 unp = unp_list[i];
 1441                 UNP_PCB_LOCK(unp);
 1442                 unp->unp_refcount--;
 1443                 if (unp->unp_refcount != 0 && unp->unp_gencnt <= gencnt) {
 1444                         xu->xu_len = sizeof *xu;
 1445                         xu->xu_unpp = unp;
 1446                         /*
 1447                          * XXX - need more locking here to protect against
 1448                          * connect/disconnect races for SMP.
 1449                          */
 1450                         if (unp->unp_addr != NULL)
 1451                                 bcopy(unp->unp_addr, &xu->xu_addr,
 1452                                       unp->unp_addr->sun_len);
 1453                         if (unp->unp_conn != NULL &&
 1454                             unp->unp_conn->unp_addr != NULL)
 1455                                 bcopy(unp->unp_conn->unp_addr,
 1456                                       &xu->xu_caddr,
 1457                                       unp->unp_conn->unp_addr->sun_len);
 1458                         bcopy(unp, &xu->xu_unp, sizeof *unp);
 1459                         sotoxsocket(unp->unp_socket, &xu->xu_socket);
 1460                         UNP_PCB_UNLOCK(unp);
 1461                         error = SYSCTL_OUT(req, xu, sizeof *xu);
 1462                 } else {
 1463                         freeunp = (unp->unp_refcount == 0);
 1464                         UNP_PCB_UNLOCK(unp);
 1465                         if (freeunp) {
 1466                                 UNP_PCB_LOCK_DESTROY(unp);
 1467                                 uma_zfree(unp_zone, unp);
 1468                         }
 1469                 }
 1470         }
 1471         free(xu, M_TEMP);
 1472         if (!error) {
 1473                 /*
 1474                  * Give the user an updated idea of our state.  If the
 1475                  * generation differs from what we told her before, she knows
 1476                  * that something happened while we were processing this
 1477                  * request, and it might be necessary to retry.
 1478                  */
 1479                 xug->xug_gen = unp_gencnt;
 1480                 xug->xug_sogen = so_gencnt;
 1481                 xug->xug_count = unp_count;
 1482                 error = SYSCTL_OUT(req, xug, sizeof *xug);
 1483         }
 1484         free(unp_list, M_TEMP);
 1485         free(xug, M_TEMP);
 1486         return (error);
 1487 }
 1488 
 1489 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD,
 1490             (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
 1491             "List of active local datagram sockets");
 1492 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD,
 1493             (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
 1494             "List of active local stream sockets");
 1495 
 1496 static void
 1497 unp_shutdown(struct unpcb *unp)
 1498 {
 1499         struct unpcb *unp2;
 1500         struct socket *so;
 1501 
 1502         UNP_GLOBAL_WLOCK_ASSERT();
 1503         UNP_PCB_LOCK_ASSERT(unp);
 1504 
 1505         unp2 = unp->unp_conn;
 1506         if (unp->unp_socket->so_type == SOCK_STREAM && unp2 != NULL) {
 1507                 so = unp2->unp_socket;
 1508                 if (so != NULL)
 1509                         socantrcvmore(so);
 1510         }
 1511 }
 1512 
 1513 static void
 1514 unp_drop(struct unpcb *unp, int errno)
 1515 {
 1516         struct socket *so = unp->unp_socket;
 1517         struct unpcb *unp2;
 1518 
 1519         UNP_GLOBAL_WLOCK_ASSERT();
 1520         UNP_PCB_LOCK_ASSERT(unp);
 1521 
 1522         so->so_error = errno;
 1523         unp2 = unp->unp_conn;
 1524         if (unp2 == NULL)
 1525                 return;
 1526         UNP_PCB_LOCK(unp2);
 1527         unp_disconnect(unp, unp2);
 1528         UNP_PCB_UNLOCK(unp2);
 1529 }
 1530 
 1531 static void
 1532 unp_freerights(struct file **rp, int fdcount)
 1533 {
 1534         int i;
 1535         struct file *fp;
 1536 
 1537         for (i = 0; i < fdcount; i++) {
 1538                 fp = *rp;
 1539                 *rp++ = NULL;
 1540                 unp_discard(fp);
 1541         }
 1542 }
 1543 
 1544 static int
 1545 unp_externalize(struct mbuf *control, struct mbuf **controlp)
 1546 {
 1547         struct thread *td = curthread;          /* XXX */
 1548         struct cmsghdr *cm = mtod(control, struct cmsghdr *);
 1549         int i;
 1550         int *fdp;
 1551         struct file **rp;
 1552         struct file *fp;
 1553         void *data;
 1554         socklen_t clen = control->m_len, datalen;
 1555         int error, newfds;
 1556         int f;
 1557         u_int newlen;
 1558 
 1559         UNP_GLOBAL_UNLOCK_ASSERT();
 1560 
 1561         error = 0;
 1562         if (controlp != NULL) /* controlp == NULL => free control messages */
 1563                 *controlp = NULL;
 1564         while (cm != NULL) {
 1565                 if (sizeof(*cm) > clen || cm->cmsg_len > clen) {
 1566                         error = EINVAL;
 1567                         break;
 1568                 }
 1569                 data = CMSG_DATA(cm);
 1570                 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data;
 1571                 if (cm->cmsg_level == SOL_SOCKET
 1572                     && cm->cmsg_type == SCM_RIGHTS) {
 1573                         newfds = datalen / sizeof(struct file *);
 1574                         rp = data;
 1575 
 1576                         /* If we're not outputting the descriptors free them. */
 1577                         if (error || controlp == NULL) {
 1578                                 unp_freerights(rp, newfds);
 1579                                 goto next;
 1580                         }
 1581                         FILEDESC_XLOCK(td->td_proc->p_fd);
 1582                         /* if the new FD's will not fit free them.  */
 1583                         if (!fdavail(td, newfds)) {
 1584                                 FILEDESC_XUNLOCK(td->td_proc->p_fd);
 1585                                 error = EMSGSIZE;
 1586                                 unp_freerights(rp, newfds);
 1587                                 goto next;
 1588                         }
 1589 
 1590                         /*
 1591                          * Now change each pointer to an fd in the global
 1592                          * table to an integer that is the index to the local
 1593                          * fd table entry that we set up to point to the
 1594                          * global one we are transferring.
 1595                          */
 1596                         newlen = newfds * sizeof(int);
 1597                         *controlp = sbcreatecontrol(NULL, newlen,
 1598                             SCM_RIGHTS, SOL_SOCKET);
 1599                         if (*controlp == NULL) {
 1600                                 FILEDESC_XUNLOCK(td->td_proc->p_fd);
 1601                                 error = E2BIG;
 1602                                 unp_freerights(rp, newfds);
 1603                                 goto next;
 1604                         }
 1605 
 1606                         fdp = (int *)
 1607                             CMSG_DATA(mtod(*controlp, struct cmsghdr *));
 1608                         for (i = 0; i < newfds; i++) {
 1609                                 if (fdalloc(td, 0, &f))
 1610                                         panic("unp_externalize fdalloc failed");
 1611                                 fp = *rp++;
 1612                                 td->td_proc->p_fd->fd_ofiles[f] = fp;
 1613                                 FILE_LOCK(fp);
 1614                                 fp->f_msgcount--;
 1615                                 FILE_UNLOCK(fp);
 1616                                 UNP_GLOBAL_WLOCK();
 1617                                 unp_rights--;
 1618                                 UNP_GLOBAL_WUNLOCK();
 1619                                 *fdp++ = f;
 1620                         }
 1621                         FILEDESC_XUNLOCK(td->td_proc->p_fd);
 1622                 } else {
 1623                         /* We can just copy anything else across. */
 1624                         if (error || controlp == NULL)
 1625                                 goto next;
 1626                         *controlp = sbcreatecontrol(NULL, datalen,
 1627                             cm->cmsg_type, cm->cmsg_level);
 1628                         if (*controlp == NULL) {
 1629                                 error = ENOBUFS;
 1630                                 goto next;
 1631                         }
 1632                         bcopy(data,
 1633                             CMSG_DATA(mtod(*controlp, struct cmsghdr *)),
 1634                             datalen);
 1635                 }
 1636                 controlp = &(*controlp)->m_next;
 1637 
 1638 next:
 1639                 if (CMSG_SPACE(datalen) < clen) {
 1640                         clen -= CMSG_SPACE(datalen);
 1641                         cm = (struct cmsghdr *)
 1642                             ((caddr_t)cm + CMSG_SPACE(datalen));
 1643                 } else {
 1644                         clen = 0;
 1645                         cm = NULL;
 1646                 }
 1647         }
 1648 
 1649         m_freem(control);
 1650         return (error);
 1651 }
 1652 
 1653 static void
 1654 unp_zone_change(void *tag)
 1655 {
 1656 
 1657         uma_zone_set_max(unp_zone, maxsockets);
 1658 }
 1659 
 1660 static void
 1661 unp_init(void)
 1662 {
 1663 
 1664         unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, NULL,
 1665             NULL, NULL, UMA_ALIGN_PTR, 0);
 1666         if (unp_zone == NULL)
 1667                 panic("unp_init");
 1668         uma_zone_set_max(unp_zone, maxsockets);
 1669         EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change,
 1670             NULL, EVENTHANDLER_PRI_ANY);
 1671         LIST_INIT(&unp_dhead);
 1672         LIST_INIT(&unp_shead);
 1673         TASK_INIT(&unp_gc_task, 0, unp_gc, NULL);
 1674         UNP_GLOBAL_LOCK_INIT();
 1675 }
 1676 
 1677 static int
 1678 unp_internalize(struct mbuf **controlp, struct thread *td)
 1679 {
 1680         struct mbuf *control = *controlp;
 1681         struct proc *p = td->td_proc;
 1682         struct filedesc *fdescp = p->p_fd;
 1683         struct cmsghdr *cm = mtod(control, struct cmsghdr *);
 1684         struct cmsgcred *cmcred;
 1685         struct file **rp;
 1686         struct file *fp;
 1687         struct timeval *tv;
 1688         int i, fd, *fdp;
 1689         void *data;
 1690         socklen_t clen = control->m_len, datalen;
 1691         int error, oldfds;
 1692         u_int newlen;
 1693 
 1694         UNP_GLOBAL_UNLOCK_ASSERT();
 1695 
 1696         error = 0;
 1697         *controlp = NULL;
 1698         while (cm != NULL) {
 1699                 if (sizeof(*cm) > clen || cm->cmsg_level != SOL_SOCKET
 1700                     || cm->cmsg_len > clen) {
 1701                         error = EINVAL;
 1702                         goto out;
 1703                 }
 1704                 data = CMSG_DATA(cm);
 1705                 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data;
 1706 
 1707                 switch (cm->cmsg_type) {
 1708                 /*
 1709                  * Fill in credential information.
 1710                  */
 1711                 case SCM_CREDS:
 1712                         *controlp = sbcreatecontrol(NULL, sizeof(*cmcred),
 1713                             SCM_CREDS, SOL_SOCKET);
 1714                         if (*controlp == NULL) {
 1715                                 error = ENOBUFS;
 1716                                 goto out;
 1717                         }
 1718                         cmcred = (struct cmsgcred *)
 1719                             CMSG_DATA(mtod(*controlp, struct cmsghdr *));
 1720                         cmcred->cmcred_pid = p->p_pid;
 1721                         cmcred->cmcred_uid = td->td_ucred->cr_ruid;
 1722                         cmcred->cmcred_gid = td->td_ucred->cr_rgid;
 1723                         cmcred->cmcred_euid = td->td_ucred->cr_uid;
 1724                         cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups,
 1725                             CMGROUP_MAX);
 1726                         for (i = 0; i < cmcred->cmcred_ngroups; i++)
 1727                                 cmcred->cmcred_groups[i] =
 1728                                     td->td_ucred->cr_groups[i];
 1729                         break;
 1730 
 1731                 case SCM_RIGHTS:
 1732                         oldfds = datalen / sizeof (int);
 1733                         /*
 1734                          * Check that all the FDs passed in refer to legal
 1735                          * files.  If not, reject the entire operation.
 1736                          */
 1737                         fdp = data;
 1738                         FILEDESC_SLOCK(fdescp);
 1739                         for (i = 0; i < oldfds; i++) {
 1740                                 fd = *fdp++;
 1741                                 if ((unsigned)fd >= fdescp->fd_nfiles ||
 1742                                     fdescp->fd_ofiles[fd] == NULL) {
 1743                                         FILEDESC_SUNLOCK(fdescp);
 1744                                         error = EBADF;
 1745                                         goto out;
 1746                                 }
 1747                                 fp = fdescp->fd_ofiles[fd];
 1748                                 if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) {
 1749                                         FILEDESC_SUNLOCK(fdescp);
 1750                                         error = EOPNOTSUPP;
 1751                                         goto out;
 1752                                 }
 1753 
 1754                         }
 1755 
 1756                         /*
 1757                          * Now replace the integer FDs with pointers to the
 1758                          * associated global file table entry..
 1759                          */
 1760                         newlen = oldfds * sizeof(struct file *);
 1761                         *controlp = sbcreatecontrol(NULL, newlen,
 1762                             SCM_RIGHTS, SOL_SOCKET);
 1763                         if (*controlp == NULL) {
 1764                                 FILEDESC_SUNLOCK(fdescp);
 1765                                 error = E2BIG;
 1766                                 goto out;
 1767                         }
 1768                         fdp = data;
 1769                         rp = (struct file **)
 1770                             CMSG_DATA(mtod(*controlp, struct cmsghdr *));
 1771                         for (i = 0; i < oldfds; i++) {
 1772                                 fp = fdescp->fd_ofiles[*fdp++];
 1773                                 *rp++ = fp;
 1774                                 FILE_LOCK(fp);
 1775                                 fp->f_count++;
 1776                                 fp->f_msgcount++;
 1777                                 FILE_UNLOCK(fp);
 1778                                 UNP_GLOBAL_WLOCK();
 1779                                 unp_rights++;
 1780                                 UNP_GLOBAL_WUNLOCK();
 1781                         }
 1782                         FILEDESC_SUNLOCK(fdescp);
 1783                         break;
 1784 
 1785                 case SCM_TIMESTAMP:
 1786                         *controlp = sbcreatecontrol(NULL, sizeof(*tv),
 1787                             SCM_TIMESTAMP, SOL_SOCKET);
 1788                         if (*controlp == NULL) {
 1789                                 error = ENOBUFS;
 1790                                 goto out;
 1791                         }
 1792                         tv = (struct timeval *)
 1793                             CMSG_DATA(mtod(*controlp, struct cmsghdr *));
 1794                         microtime(tv);
 1795                         break;
 1796 
 1797                 default:
 1798                         error = EINVAL;
 1799                         goto out;
 1800                 }
 1801 
 1802                 controlp = &(*controlp)->m_next;
 1803                 if (CMSG_SPACE(datalen) < clen) {
 1804                         clen -= CMSG_SPACE(datalen);
 1805                         cm = (struct cmsghdr *)
 1806                             ((caddr_t)cm + CMSG_SPACE(datalen));
 1807                 } else {
 1808                         clen = 0;
 1809                         cm = NULL;
 1810                 }
 1811         }
 1812 
 1813 out:
 1814         m_freem(control);
 1815         return (error);
 1816 }
 1817 
 1818 static struct mbuf *
 1819 unp_addsockcred(struct thread *td, struct mbuf *control)
 1820 {
 1821         struct mbuf *m, *n, *n_prev;
 1822         struct sockcred *sc;
 1823         const struct cmsghdr *cm;
 1824         int ngroups;
 1825         int i;
 1826 
 1827         ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX);
 1828         m = sbcreatecontrol(NULL, SOCKCREDSIZE(ngroups), SCM_CREDS, SOL_SOCKET);
 1829         if (m == NULL)
 1830                 return (control);
 1831 
 1832         sc = (struct sockcred *) CMSG_DATA(mtod(m, struct cmsghdr *));
 1833         sc->sc_uid = td->td_ucred->cr_ruid;
 1834         sc->sc_euid = td->td_ucred->cr_uid;
 1835         sc->sc_gid = td->td_ucred->cr_rgid;
 1836         sc->sc_egid = td->td_ucred->cr_gid;
 1837         sc->sc_ngroups = ngroups;
 1838         for (i = 0; i < sc->sc_ngroups; i++)
 1839                 sc->sc_groups[i] = td->td_ucred->cr_groups[i];
 1840 
 1841         /*
 1842          * Unlink SCM_CREDS control messages (struct cmsgcred), since just
 1843          * created SCM_CREDS control message (struct sockcred) has another
 1844          * format.
 1845          */
 1846         if (control != NULL)
 1847                 for (n = control, n_prev = NULL; n != NULL;) {
 1848                         cm = mtod(n, struct cmsghdr *);
 1849                         if (cm->cmsg_level == SOL_SOCKET &&
 1850                             cm->cmsg_type == SCM_CREDS) {
 1851                                 if (n_prev == NULL)
 1852                                         control = n->m_next;
 1853                                 else
 1854                                         n_prev->m_next = n->m_next;
 1855                                 n = m_free(n);
 1856                         } else {
 1857                                 n_prev = n;
 1858                                 n = n->m_next;
 1859                         }
 1860                 }
 1861 
 1862         /* Prepend it to the head. */
 1863         m->m_next = control;
 1864         return (m);
 1865 }
 1866 
 1867 /*
 1868  * unp_defer indicates whether additional work has been defered for a future
 1869  * pass through unp_gc().  It is thread local and does not require explicit
 1870  * synchronization.
 1871  */
 1872 static int      unp_defer;
 1873 
 1874 static int unp_taskcount;
 1875 SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0, "");
 1876 
 1877 static int unp_recycled;
 1878 SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0, "");
 1879 
 1880 static void
 1881 unp_gc(__unused void *arg, int pending)
 1882 {
 1883         struct file *fp, *nextfp;
 1884         struct socket *so;
 1885         struct socket *soa;
 1886         struct file **extra_ref, **fpp;
 1887         int nunref, i;
 1888         int nfiles_snap;
 1889         int nfiles_slack = 20;
 1890 
 1891         unp_taskcount++;
 1892         unp_defer = 0;
 1893 
 1894         /*
 1895          * Before going through all this, set all FDs to be NOT deferred and
 1896          * NOT externally accessible.
 1897          */
 1898         sx_slock(&filelist_lock);
 1899         LIST_FOREACH(fp, &filehead, f_list)
 1900                 fp->f_gcflag &= ~(FMARK|FDEFER);
 1901         do {
 1902                 KASSERT(unp_defer >= 0, ("unp_gc: unp_defer %d", unp_defer));
 1903                 LIST_FOREACH(fp, &filehead, f_list) {
 1904                         FILE_LOCK(fp);
 1905                         /*
 1906                          * If the file is not open, skip it -- could be a
 1907                          * file in the process of being opened, or in the
 1908                          * process of being closed.  If the file is
 1909                          * "closing", it may have been marked for deferred
 1910                          * consideration.  Clear the flag now if so.
 1911                          */
 1912                         if (fp->f_count == 0) {
 1913                                 if (fp->f_gcflag & FDEFER)
 1914                                         unp_defer--;
 1915                                 fp->f_gcflag &= ~(FMARK|FDEFER);
 1916                                 FILE_UNLOCK(fp);
 1917                                 continue;
 1918                         }
 1919 
 1920                         /*
 1921                          * If we already marked it as 'defer' in a
 1922                          * previous pass, then try to process it this
 1923                          * time and un-mark it.
 1924                          */
 1925                         if (fp->f_gcflag & FDEFER) {
 1926                                 fp->f_gcflag &= ~FDEFER;
 1927                                 unp_defer--;
 1928                         } else {
 1929                                 /*
 1930                                  * If it's not deferred, then check if it's
 1931                                  * already marked.. if so skip it
 1932                                  */
 1933                                 if (fp->f_gcflag & FMARK) {
 1934                                         FILE_UNLOCK(fp);
 1935                                         continue;
 1936                                 }
 1937 
 1938                                 /*
 1939                                  * If all references are from messages in
 1940                                  * transit, then skip it. it's not externally
 1941                                  * accessible.
 1942                                  */
 1943                                 if (fp->f_count == fp->f_msgcount) {
 1944                                         FILE_UNLOCK(fp);
 1945                                         continue;
 1946                                 }
 1947 
 1948                                 /*
 1949                                  * If it got this far then it must be
 1950                                  * externally accessible.
 1951                                  */
 1952                                 fp->f_gcflag |= FMARK;
 1953                         }
 1954 
 1955                         /*
 1956                          * Either it was deferred, or it is externally
 1957                          * accessible and not already marked so.  Now check
 1958                          * if it is possibly one of OUR sockets.
 1959                          */
 1960                         if (fp->f_type != DTYPE_SOCKET ||
 1961                             (so = fp->f_data) == NULL) {
 1962                                 FILE_UNLOCK(fp);
 1963                                 continue;
 1964                         }
 1965 
 1966                         if (so->so_proto->pr_domain != &localdomain ||
 1967                             (so->so_proto->pr_flags & PR_RIGHTS) == 0) {
 1968                                 FILE_UNLOCK(fp);                                
 1969                                 continue;
 1970                         }
 1971 
 1972                         /*
 1973                          * Tell any other threads that do a subsequent
 1974                          * fdrop() that we are scanning the message
 1975                          * buffers.
 1976                          */
 1977                         fp->f_gcflag |= FWAIT;
 1978                         FILE_UNLOCK(fp);
 1979 
 1980                         /*
 1981                          * So, Ok, it's one of our sockets and it IS
 1982                          * externally accessible (or was deferred).  Now we
 1983                          * look to see if we hold any file descriptors in its
 1984                          * message buffers. Follow those links and mark them
 1985                          * as accessible too.
 1986                          */
 1987                         SOCKBUF_LOCK(&so->so_rcv);
 1988                         unp_scan(so->so_rcv.sb_mb, unp_mark);
 1989                         SOCKBUF_UNLOCK(&so->so_rcv);
 1990 
 1991                         /*
 1992                          * If socket is in listening state, then sockets
 1993                          * in its accept queue are accessible, and so
 1994                          * are any descriptors in those sockets' receive
 1995                          * queues.
 1996                          */
 1997                         ACCEPT_LOCK();
 1998                         TAILQ_FOREACH(soa, &so->so_comp, so_list) {
 1999                             SOCKBUF_LOCK(&soa->so_rcv);
 2000                             unp_scan(soa->so_rcv.sb_mb, unp_mark);
 2001                             SOCKBUF_UNLOCK(&soa->so_rcv);
 2002                         }
 2003                         ACCEPT_UNLOCK();
 2004 
 2005                         /*
 2006                          * Wake up any threads waiting in fdrop().
 2007                          */
 2008                         FILE_LOCK(fp);
 2009                         fp->f_gcflag &= ~FWAIT;
 2010                         wakeup(&fp->f_gcflag);
 2011                         FILE_UNLOCK(fp);
 2012                 }
 2013         } while (unp_defer);
 2014         sx_sunlock(&filelist_lock);
 2015 
 2016         /*
 2017          * XXXRW: The following comments need updating for a post-SMPng and
 2018          * deferred unp_gc() world, but are still generally accurate.
 2019          *
 2020          * We grab an extra reference to each of the file table entries that
 2021          * are not otherwise accessible and then free the rights that are
 2022          * stored in messages on them.
 2023          *
 2024          * The bug in the orginal code is a little tricky, so I'll describe
 2025          * what's wrong with it here.
 2026          *
 2027          * It is incorrect to simply unp_discard each entry for f_msgcount
 2028          * times -- consider the case of sockets A and B that contain
 2029          * references to each other.  On a last close of some other socket,
 2030          * we trigger a gc since the number of outstanding rights (unp_rights)
 2031          * is non-zero.  If during the sweep phase the gc code unp_discards,
 2032          * we end up doing a (full) closef on the descriptor.  A closef on A
 2033          * results in the following chain.  Closef calls soo_close, which
 2034          * calls soclose.   Soclose calls first (through the switch
 2035          * uipc_usrreq) unp_detach, which re-invokes unp_gc.  Unp_gc simply
 2036          * returns because the previous instance had set unp_gcing, and we
 2037          * return all the way back to soclose, which marks the socket with
 2038          * SS_NOFDREF, and then calls sofree.  Sofree calls sorflush to free
 2039          * up the rights that are queued in messages on the socket A, i.e.,
 2040          * the reference on B.  The sorflush calls via the dom_dispose switch
 2041          * unp_dispose, which unp_scans with unp_discard.  This second
 2042          * instance of unp_discard just calls closef on B.
 2043          *
 2044          * Well, a similar chain occurs on B, resulting in a sorflush on B,
 2045          * which results in another closef on A.  Unfortunately, A is already
 2046          * being closed, and the descriptor has already been marked with
 2047          * SS_NOFDREF, and soclose panics at this point.
 2048          *
 2049          * Here, we first take an extra reference to each inaccessible
 2050          * descriptor.  Then, we call sorflush ourself, since we know it is a
 2051          * Unix domain socket anyhow.  After we destroy all the rights
 2052          * carried in messages, we do a last closef to get rid of our extra
 2053          * reference.  This is the last close, and the unp_detach etc will
 2054          * shut down the socket.
 2055          *
 2056          * 91/09/19, bsy@cs.cmu.edu
 2057          */
 2058 again:
 2059         nfiles_snap = openfiles + nfiles_slack; /* some slack */
 2060         extra_ref = malloc(nfiles_snap * sizeof(struct file *), M_TEMP,
 2061             M_WAITOK);
 2062         sx_slock(&filelist_lock);
 2063         if (nfiles_snap < openfiles) {
 2064                 sx_sunlock(&filelist_lock);
 2065                 free(extra_ref, M_TEMP);
 2066                 nfiles_slack += 20;
 2067                 goto again;
 2068         }
 2069         for (nunref = 0, fp = LIST_FIRST(&filehead), fpp = extra_ref;
 2070             fp != NULL; fp = nextfp) {
 2071                 nextfp = LIST_NEXT(fp, f_list);
 2072                 FILE_LOCK(fp);
 2073 
 2074                 /*
 2075                  * If it's not open, skip it
 2076                  */
 2077                 if (fp->f_count == 0) {
 2078                         FILE_UNLOCK(fp);
 2079                         continue;
 2080                 }
 2081 
 2082                 /*
 2083                  * If all refs are from msgs, and it's not marked accessible
 2084                  * then it must be referenced from some unreachable cycle of
 2085                  * (shut-down) FDs, so include it in our list of FDs to
 2086                  * remove.
 2087                  */
 2088                 if (fp->f_count == fp->f_msgcount && !(fp->f_gcflag & FMARK)) {
 2089                         *fpp++ = fp;
 2090                         nunref++;
 2091                         fp->f_count++;
 2092                 }
 2093                 FILE_UNLOCK(fp);
 2094         }
 2095         sx_sunlock(&filelist_lock);
 2096 
 2097         /*
 2098          * For each FD on our hit list, do the following two things:
 2099          */
 2100         for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) {
 2101                 struct file *tfp = *fpp;
 2102                 FILE_LOCK(tfp);
 2103                 if (tfp->f_type == DTYPE_SOCKET &&
 2104                     tfp->f_data != NULL) {
 2105                         FILE_UNLOCK(tfp);
 2106                         sorflush(tfp->f_data);
 2107                 } else {
 2108                         FILE_UNLOCK(tfp);
 2109                 }
 2110         }
 2111         for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) {
 2112                 closef(*fpp, (struct thread *) NULL);
 2113                 unp_recycled++;
 2114         }
 2115         free(extra_ref, M_TEMP);
 2116 }
 2117 
 2118 static void
 2119 unp_dispose(struct mbuf *m)
 2120 {
 2121 
 2122         if (m)
 2123                 unp_scan(m, unp_discard);
 2124 }
 2125 
 2126 static void
 2127 unp_scan(struct mbuf *m0, void (*op)(struct file *))
 2128 {
 2129         struct mbuf *m;
 2130         struct file **rp;
 2131         struct cmsghdr *cm;
 2132         void *data;
 2133         int i;
 2134         socklen_t clen, datalen;
 2135         int qfds;
 2136 
 2137         while (m0 != NULL) {
 2138                 for (m = m0; m; m = m->m_next) {
 2139                         if (m->m_type != MT_CONTROL)
 2140                                 continue;
 2141 
 2142                         cm = mtod(m, struct cmsghdr *);
 2143                         clen = m->m_len;
 2144 
 2145                         while (cm != NULL) {
 2146                                 if (sizeof(*cm) > clen || cm->cmsg_len > clen)
 2147                                         break;
 2148 
 2149                                 data = CMSG_DATA(cm);
 2150                                 datalen = (caddr_t)cm + cm->cmsg_len
 2151                                     - (caddr_t)data;
 2152 
 2153                                 if (cm->cmsg_level == SOL_SOCKET &&
 2154                                     cm->cmsg_type == SCM_RIGHTS) {
 2155                                         qfds = datalen / sizeof (struct file *);
 2156                                         rp = data;
 2157                                         for (i = 0; i < qfds; i++)
 2158                                                 (*op)(*rp++);
 2159                                 }
 2160 
 2161                                 if (CMSG_SPACE(datalen) < clen) {
 2162                                         clen -= CMSG_SPACE(datalen);
 2163                                         cm = (struct cmsghdr *)
 2164                                             ((caddr_t)cm + CMSG_SPACE(datalen));
 2165                                 } else {
 2166                                         clen = 0;
 2167                                         cm = NULL;
 2168                                 }
 2169                         }
 2170                 }
 2171                 m0 = m0->m_act;
 2172         }
 2173 }
 2174 
 2175 static void
 2176 unp_mark(struct file *fp)
 2177 {
 2178 
 2179         /* XXXRW: Should probably assert file list lock here. */
 2180 
 2181         if (fp->f_gcflag & FMARK)
 2182                 return;
 2183         unp_defer++;
 2184         fp->f_gcflag |= (FMARK|FDEFER);
 2185 }
 2186 
 2187 static void
 2188 unp_discard(struct file *fp)
 2189 {
 2190 
 2191         UNP_GLOBAL_WLOCK();
 2192         FILE_LOCK(fp);
 2193         fp->f_msgcount--;
 2194         unp_rights--;
 2195         FILE_UNLOCK(fp);
 2196         UNP_GLOBAL_WUNLOCK();
 2197         (void) closef(fp, (struct thread *)NULL);
 2198 }
 2199 
 2200 #ifdef DDB
 2201 static void
 2202 db_print_indent(int indent)
 2203 {
 2204         int i;
 2205 
 2206         for (i = 0; i < indent; i++)
 2207                 db_printf(" ");
 2208 }
 2209 
 2210 static void
 2211 db_print_unpflags(int unp_flags)
 2212 {
 2213         int comma;
 2214 
 2215         comma = 0;
 2216         if (unp_flags & UNP_HAVEPC) {
 2217                 db_printf("%sUNP_HAVEPC", comma ? ", " : "");
 2218                 comma = 1;
 2219         }
 2220         if (unp_flags & UNP_HAVEPCCACHED) {
 2221                 db_printf("%sUNP_HAVEPCCACHED", comma ? ", " : "");
 2222                 comma = 1;
 2223         }
 2224         if (unp_flags & UNP_WANTCRED) {
 2225                 db_printf("%sUNP_WANTCRED", comma ? ", " : "");
 2226                 comma = 1;
 2227         }
 2228         if (unp_flags & UNP_CONNWAIT) {
 2229                 db_printf("%sUNP_CONNWAIT", comma ? ", " : "");
 2230                 comma = 1;
 2231         }
 2232         if (unp_flags & UNP_CONNECTING) {
 2233                 db_printf("%sUNP_CONNECTING", comma ? ", " : "");
 2234                 comma = 1;
 2235         }
 2236         if (unp_flags & UNP_BINDING) {
 2237                 db_printf("%sUNP_BINDING", comma ? ", " : "");
 2238                 comma = 1;
 2239         }
 2240 }
 2241 
 2242 static void
 2243 db_print_xucred(int indent, struct xucred *xu)
 2244 {
 2245         int comma, i;
 2246 
 2247         db_print_indent(indent);
 2248         db_printf("cr_version: %u   cr_uid: %u   cr_ngroups: %d\n",
 2249             xu->cr_version, xu->cr_uid, xu->cr_ngroups);
 2250         db_print_indent(indent);
 2251         db_printf("cr_groups: ");
 2252         comma = 0;
 2253         for (i = 0; i < xu->cr_ngroups; i++) {
 2254                 db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]);
 2255                 comma = 1;
 2256         }
 2257         db_printf("\n");
 2258 }
 2259 
 2260 static void
 2261 db_print_unprefs(int indent, struct unp_head *uh)
 2262 {
 2263         struct unpcb *unp;
 2264         int counter;
 2265 
 2266         counter = 0;
 2267         LIST_FOREACH(unp, uh, unp_reflink) {
 2268                 if (counter % 4 == 0)
 2269                         db_print_indent(indent);
 2270                 db_printf("%p  ", unp);
 2271                 if (counter % 4 == 3)
 2272                         db_printf("\n");
 2273                 counter++;
 2274         }
 2275         if (counter != 0 && counter % 4 != 0)
 2276                 db_printf("\n");
 2277 }
 2278 
 2279 DB_SHOW_COMMAND(unpcb, db_show_unpcb)
 2280 {
 2281         struct unpcb *unp;
 2282 
 2283         if (!have_addr) {
 2284                 db_printf("usage: show unpcb <addr>\n");
 2285                 return;
 2286         }
 2287         unp = (struct unpcb *)addr;
 2288 
 2289         db_printf("unp_socket: %p   unp_vnode: %p\n", unp->unp_socket,
 2290             unp->unp_vnode);
 2291 
 2292         db_printf("unp_ino: %d   unp_conn: %p\n", unp->unp_ino,
 2293             unp->unp_conn);
 2294 
 2295         db_printf("unp_refs:\n");
 2296         db_print_unprefs(2, &unp->unp_refs);
 2297 
 2298         /* XXXRW: Would be nice to print the full address, if any. */
 2299         db_printf("unp_addr: %p\n", unp->unp_addr);
 2300 
 2301         db_printf("unp_cc: %d   unp_mbcnt: %d   unp_gencnt: %llu\n",
 2302             unp->unp_cc, unp->unp_mbcnt,
 2303             (unsigned long long)unp->unp_gencnt);
 2304 
 2305         db_printf("unp_flags: %x (", unp->unp_flags);
 2306         db_print_unpflags(unp->unp_flags);
 2307         db_printf(")\n");
 2308 
 2309         db_printf("unp_peercred:\n");
 2310         db_print_xucred(2, &unp->unp_peercred);
 2311 
 2312         db_printf("unp_refcount: %u\n", unp->unp_refcount);
 2313 }
 2314 #endif

Cache object: 2a320fba9496a0fd0967df16fab8eced


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.