The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/rpc/svc_vc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $  */
    2 
    3 /*
    4  * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
    5  * unrestricted use provided that this legend is included on all tape
    6  * media and as a part of the software program in whole or part.  Users
    7  * may copy or modify Sun RPC without charge, but are not authorized
    8  * to license or distribute it to anyone else except as part of a product or
    9  * program developed by the user.
   10  * 
   11  * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
   12  * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
   13  * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
   14  * 
   15  * Sun RPC is provided with no support and without any obligation on the
   16  * part of Sun Microsystems, Inc. to assist in its use, correction,
   17  * modification or enhancement.
   18  * 
   19  * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
   20  * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
   21  * OR ANY PART THEREOF.
   22  * 
   23  * In no event will Sun Microsystems, Inc. be liable for any lost revenue
   24  * or profits or other special, indirect and consequential damages, even if
   25  * Sun has been advised of the possibility of such damages.
   26  * 
   27  * Sun Microsystems, Inc.
   28  * 2550 Garcia Avenue
   29  * Mountain View, California  94043
   30  */
   31 
   32 #if defined(LIBC_SCCS) && !defined(lint)
   33 static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
   34 static char *sccsid = "@(#)svc_tcp.c    2.2 88/08/01 4.0 RPCSRC";
   35 #endif
   36 #include <sys/cdefs.h>
   37 __FBSDID("$FreeBSD: releng/6.4/sys/rpc/svc_vc.c 178459 2008-04-24 10:46:25Z dfr $");
   38 
   39 /*
   40  * svc_vc.c, Server side for Connection Oriented based RPC. 
   41  *
   42  * Actually implements two flavors of transporter -
   43  * a tcp rendezvouser (a listner and connection establisher)
   44  * and a record/tcp stream.
   45  */
   46 
   47 #include <sys/param.h>
   48 #include <sys/lock.h>
   49 #include <sys/kernel.h>
   50 #include <sys/malloc.h>
   51 #include <sys/mbuf.h>
   52 #include <sys/mutex.h>
   53 #include <sys/protosw.h>
   54 #include <sys/queue.h>
   55 #include <sys/socket.h>
   56 #include <sys/socketvar.h>
   57 #include <sys/systm.h>
   58 #include <sys/uio.h>
   59 #include <netinet/tcp.h>
   60 
   61 #include <rpc/rpc.h>
   62 
   63 #include <rpc/rpc_com.h>
   64 
   65 static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *);
   66 static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
   67 static void svc_vc_rendezvous_destroy(SVCXPRT *);
   68 static bool_t svc_vc_null(void);
   69 static void svc_vc_destroy(SVCXPRT *);
   70 static enum xprt_stat svc_vc_stat(SVCXPRT *);
   71 static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *);
   72 static bool_t svc_vc_getargs(SVCXPRT *, xdrproc_t, void *);
   73 static bool_t svc_vc_freeargs(SVCXPRT *, xdrproc_t, void *);
   74 static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *);
   75 static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
   76 static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
   77     void *in);
   78 static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
   79     struct sockaddr *raddr);
   80 static int svc_vc_accept(struct socket *head, struct socket **sop);
   81 static void svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
   82 
   83 static struct xp_ops svc_vc_rendezvous_ops = {
   84         .xp_recv =      svc_vc_rendezvous_recv,
   85         .xp_stat =      svc_vc_rendezvous_stat,
   86         .xp_getargs =   (bool_t (*)(SVCXPRT *, xdrproc_t, void *))svc_vc_null,
   87         .xp_reply =     (bool_t (*)(SVCXPRT *, struct rpc_msg *))svc_vc_null,
   88         .xp_freeargs =  (bool_t (*)(SVCXPRT *, xdrproc_t, void *))svc_vc_null,
   89         .xp_destroy =   svc_vc_rendezvous_destroy,
   90         .xp_control =   svc_vc_rendezvous_control
   91 };
   92 
   93 static struct xp_ops svc_vc_ops = {
   94         .xp_recv =      svc_vc_recv,
   95         .xp_stat =      svc_vc_stat,
   96         .xp_getargs =   svc_vc_getargs,
   97         .xp_reply =     svc_vc_reply,
   98         .xp_freeargs =  svc_vc_freeargs,
   99         .xp_destroy =   svc_vc_destroy,
  100         .xp_control =   svc_vc_control
  101 };
  102 
  103 struct cf_conn {  /* kept in xprt->xp_p1 for actual connection */
  104         enum xprt_stat strm_stat;
  105         struct mbuf *mpending;  /* unparsed data read from the socket */
  106         struct mbuf *mreq;      /* current record being built from mpending */
  107         uint32_t resid;         /* number of bytes needed for fragment */
  108         bool_t eor;             /* reading last fragment of current record */
  109 };
  110 
  111 /*
  112  * Usage:
  113  *      xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
  114  *
  115  * Creates, registers, and returns a (rpc) tcp based transporter.
  116  * Once *xprt is initialized, it is registered as a transporter
  117  * see (svc.h, xprt_register).  This routine returns
  118  * a NULL if a problem occurred.
  119  *
  120  * The filedescriptor passed in is expected to refer to a bound, but
  121  * not yet connected socket.
  122  *
  123  * Since streams do buffered io similar to stdio, the caller can specify
  124  * how big the send and receive buffers are via the second and third parms;
  125  * 0 => use the system default.
  126  */
  127 SVCXPRT *
  128 svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
  129     size_t recvsize)
  130 {
  131         SVCXPRT *xprt;
  132         struct sockaddr* sa;
  133         int error;
  134 
  135         xprt = mem_alloc(sizeof(SVCXPRT));
  136         mtx_init(&xprt->xp_lock, "xprt->xp_lock", NULL, MTX_DEF);
  137         xprt->xp_pool = pool;
  138         xprt->xp_socket = so;
  139         xprt->xp_p1 = NULL;
  140         xprt->xp_p2 = NULL;
  141         xprt->xp_p3 = NULL;
  142         xprt->xp_verf = _null_auth;
  143         xprt->xp_ops = &svc_vc_rendezvous_ops;
  144 
  145         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  146         if (error)
  147                 goto cleanup_svc_vc_create;
  148 
  149         xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
  150         xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
  151         xprt->xp_ltaddr.len = sa->sa_len;
  152         memcpy(xprt->xp_ltaddr.buf, sa, sa->sa_len);
  153         free(sa, M_SONAME);
  154 
  155         xprt->xp_rtaddr.maxlen = 0;
  156 
  157         xprt_register(xprt);
  158 
  159         solisten(so, SOMAXCONN, curthread);
  160 
  161         SOCKBUF_LOCK(&so->so_rcv);
  162         so->so_upcallarg = xprt;
  163         so->so_upcall = svc_vc_soupcall;
  164         so->so_rcv.sb_flags |= SB_UPCALL;
  165         SOCKBUF_UNLOCK(&so->so_rcv);
  166 
  167         return (xprt);
  168 cleanup_svc_vc_create:
  169         if (xprt)
  170                 mem_free(xprt, sizeof(*xprt));
  171         return (NULL);
  172 }
  173 
  174 /*
  175  * Create a new transport for a socket optained via soaccept().
  176  */
  177 SVCXPRT *
  178 svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
  179 {
  180         SVCXPRT *xprt = NULL;
  181         struct cf_conn *cd = NULL;
  182         struct sockaddr* sa = NULL;
  183         int error;
  184 
  185         cd = mem_alloc(sizeof(*cd));
  186         cd->strm_stat = XPRT_IDLE;
  187 
  188         xprt = mem_alloc(sizeof(SVCXPRT));
  189         mtx_init(&xprt->xp_lock, "xprt->xp_lock", NULL, MTX_DEF);
  190         xprt->xp_pool = pool;
  191         xprt->xp_socket = so;
  192         xprt->xp_p1 = cd;
  193         xprt->xp_p2 = NULL;
  194         xprt->xp_p3 = NULL;
  195         xprt->xp_verf = _null_auth;
  196         xprt->xp_ops = &svc_vc_ops;
  197 
  198         xprt->xp_rtaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
  199         xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
  200         xprt->xp_rtaddr.len = raddr->sa_len;
  201         memcpy(xprt->xp_rtaddr.buf, raddr, raddr->sa_len);
  202 
  203         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  204         if (error)
  205                 goto cleanup_svc_vc_create;
  206 
  207         xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
  208         xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
  209         xprt->xp_ltaddr.len = sa->sa_len;
  210         memcpy(xprt->xp_ltaddr.buf, sa, sa->sa_len);
  211         free(sa, M_SONAME);
  212 
  213         xprt_register(xprt);
  214 
  215         SOCKBUF_LOCK(&so->so_rcv);
  216         so->so_upcallarg = xprt;
  217         so->so_upcall = svc_vc_soupcall;
  218         so->so_rcv.sb_flags |= SB_UPCALL;
  219         SOCKBUF_UNLOCK(&so->so_rcv);
  220 
  221         /*
  222          * Throw the transport into the active list in case it already
  223          * has some data buffered.
  224          */
  225         mtx_lock(&xprt->xp_lock);
  226         xprt_active(xprt);
  227         mtx_unlock(&xprt->xp_lock);
  228 
  229         return (xprt);
  230 cleanup_svc_vc_create:
  231         if (xprt) {
  232                 if (xprt->xp_ltaddr.buf)
  233                         mem_free(xprt->xp_ltaddr.buf,
  234                             sizeof(struct sockaddr_storage));
  235                 if (xprt->xp_rtaddr.buf)
  236                         mem_free(xprt->xp_rtaddr.buf,
  237                             sizeof(struct sockaddr_storage));
  238                 mem_free(xprt, sizeof(*xprt));
  239         }
  240         if (cd)
  241                 mem_free(cd, sizeof(*cd));
  242         return (NULL);
  243 }
  244 
  245 /*
  246  * This does all of the accept except the final call to soaccept. The
  247  * caller will call soaccept after dropping its locks (soaccept may
  248  * call malloc).
  249  */
  250 int
  251 svc_vc_accept(struct socket *head, struct socket **sop)
  252 {
  253         int error = 0;
  254         struct socket *so;
  255 
  256         if ((head->so_options & SO_ACCEPTCONN) == 0) {
  257                 error = EINVAL;
  258                 goto done;
  259         }
  260 #ifdef MAC
  261         SOCK_LOCK(head);
  262         error = mac_socket_check_accept(td->td_ucred, head);
  263         SOCK_UNLOCK(head);
  264         if (error != 0)
  265                 goto done;
  266 #endif
  267         ACCEPT_LOCK();
  268         if (TAILQ_EMPTY(&head->so_comp)) {
  269                 ACCEPT_UNLOCK();
  270                 error = EWOULDBLOCK;
  271                 goto done;
  272         }
  273         so = TAILQ_FIRST(&head->so_comp);
  274         KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP"));
  275         KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP"));
  276 
  277         /*
  278          * Before changing the flags on the socket, we have to bump the
  279          * reference count.  Otherwise, if the protocol calls sofree(),
  280          * the socket will be released due to a zero refcount.
  281          * XXX might not need soref() since this is simpler than kern_accept.
  282          */
  283         SOCK_LOCK(so);                  /* soref() and so_state update */
  284         soref(so);                      /* file descriptor reference */
  285 
  286         TAILQ_REMOVE(&head->so_comp, so, so_list);
  287         head->so_qlen--;
  288         so->so_state |= (head->so_state & SS_NBIO);
  289         so->so_qstate &= ~SQ_COMP;
  290         so->so_head = NULL;
  291 
  292         SOCK_UNLOCK(so);
  293         ACCEPT_UNLOCK();
  294 
  295         *sop = so;
  296 
  297         /* connection has been removed from the listen queue */
  298         KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
  299 done:
  300         return (error);
  301 }
  302 
  303 /*ARGSUSED*/
  304 static bool_t
  305 svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg)
  306 {
  307         struct socket *so = NULL;
  308         struct sockaddr *sa = NULL;
  309         struct sockopt opt;
  310         int one = 1;
  311         int error;
  312 
  313         /*
  314          * The socket upcall calls xprt_active() which will eventually
  315          * cause the server to call us here. We attempt to accept a
  316          * connection from the socket and turn it into a new
  317          * transport. If the accept fails, we have drained all pending
  318          * connections so we call xprt_inactive().
  319          *
  320          * The lock protects us in the case where a new connection arrives
  321          * on the socket after our call to accept fails with
  322          * EWOULDBLOCK - the call to xprt_active() in the upcall will
  323          * happen only after our call to xprt_inactive() which ensures
  324          * that we will remain active. It might be possible to use
  325          * SOCKBUF_LOCK for this - its not clear to me what locks are
  326          * held during the upcall.
  327          */
  328         mtx_lock(&xprt->xp_lock);
  329 
  330         error = svc_vc_accept(xprt->xp_socket, &so);
  331 
  332         if (error == EWOULDBLOCK) {
  333                 xprt_inactive(xprt);
  334                 mtx_unlock(&xprt->xp_lock);
  335                 return (FALSE);
  336         }
  337 
  338         if (error) {
  339                 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  340                 xprt->xp_socket->so_upcallarg = NULL;
  341                 xprt->xp_socket->so_upcall = NULL;
  342                 xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
  343                 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  344                 xprt_inactive(xprt);
  345                 mtx_unlock(&xprt->xp_lock);
  346                 return (FALSE);
  347         }
  348 
  349         mtx_unlock(&xprt->xp_lock);
  350 
  351         sa = 0;
  352         error = soaccept(so, &sa);
  353 
  354         if (!error) {
  355                 bzero(&opt, sizeof(struct sockopt));
  356                 opt.sopt_dir = SOPT_SET;
  357                 opt.sopt_level = IPPROTO_TCP;
  358                 opt.sopt_name = TCP_NODELAY;
  359                 opt.sopt_val = &one;
  360                 opt.sopt_valsize = sizeof(one);
  361                 error = sosetopt(so, &opt);
  362         }
  363 
  364         if (error) {
  365                 /*
  366                  * XXX not sure if I need to call sofree or soclose here.
  367                  */
  368                 if (sa)
  369                         free(sa, M_SONAME);
  370                 return (FALSE);
  371         }
  372 
  373         /*
  374          * svc_vc_create_conn will call xprt_register - we don't need
  375          * to do anything with the new connection.
  376          */
  377         svc_vc_create_conn(xprt->xp_pool, so, sa);
  378         free(sa, M_SONAME);
  379 
  380         return (FALSE); /* there is never an rpc msg to be processed */
  381 }
  382 
  383 /*ARGSUSED*/
  384 static enum xprt_stat
  385 svc_vc_rendezvous_stat(SVCXPRT *xprt)
  386 {
  387 
  388         return (XPRT_IDLE);
  389 }
  390 
  391 static void
  392 svc_vc_destroy_common(SVCXPRT *xprt)
  393 {
  394         SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  395         xprt->xp_socket->so_upcallarg = NULL;
  396         xprt->xp_socket->so_upcall = NULL;
  397         xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
  398         SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  399 
  400         xprt_unregister(xprt);
  401 
  402         mtx_destroy(&xprt->xp_lock);
  403         if (xprt->xp_socket)
  404                 (void)soclose(xprt->xp_socket);
  405 
  406         if (xprt->xp_rtaddr.buf)
  407                 (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
  408         if (xprt->xp_ltaddr.buf)
  409                 (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
  410         (void) mem_free(xprt, sizeof (SVCXPRT));
  411         
  412 }
  413 
  414 static void
  415 svc_vc_rendezvous_destroy(SVCXPRT *xprt)
  416 {
  417 
  418         svc_vc_destroy_common(xprt);
  419 }
  420 
  421 static void
  422 svc_vc_destroy(SVCXPRT *xprt)
  423 {
  424         struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
  425 
  426         svc_vc_destroy_common(xprt);
  427 
  428         if (cd->mreq)
  429                 m_freem(cd->mreq);
  430         if (cd->mpending)
  431                 m_freem(cd->mpending);
  432         mem_free(cd, sizeof(*cd));
  433 }
  434 
  435 /*ARGSUSED*/
  436 static bool_t
  437 svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
  438 {
  439         return (FALSE);
  440 }
  441 
  442 static bool_t
  443 svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
  444 {
  445 
  446         return (FALSE);
  447 }
  448 
  449 static enum xprt_stat
  450 svc_vc_stat(SVCXPRT *xprt)
  451 {
  452         struct cf_conn *cd;
  453         struct mbuf *m;
  454         size_t n;
  455 
  456         cd = (struct cf_conn *)(xprt->xp_p1);
  457 
  458         if (cd->strm_stat == XPRT_DIED)
  459                 return (XPRT_DIED);
  460 
  461         /*
  462          * Return XPRT_MOREREQS if we have buffered data and we are
  463          * mid-record or if we have enough data for a record marker.
  464          */
  465         if (cd->mpending) {
  466                 if (cd->resid)
  467                         return (XPRT_MOREREQS);
  468                 n = 0;
  469                 m = cd->mpending;
  470                 while (m && n < sizeof(uint32_t)) {
  471                         n += m->m_len;
  472                         m = m->m_next;
  473                 }
  474                 if (n >= sizeof(uint32_t))
  475                         return (XPRT_MOREREQS);
  476         }
  477 
  478         return (XPRT_IDLE);
  479 }
  480 
  481 static bool_t
  482 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg)
  483 {
  484         struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
  485         struct uio uio;
  486         struct mbuf *m;
  487         int error, rcvflag;
  488 
  489         for (;;) {
  490                 /*
  491                  * If we have an mbuf chain in cd->mpending, try to parse a
  492                  * record from it, leaving the result in cd->mreq. If we don't
  493                  * have a complete record, leave the partial result in
  494                  * cd->mreq and try to read more from the socket.
  495                  */
  496                 if (cd->mpending) {
  497                         /*
  498                          * If cd->resid is non-zero, we have part of the
  499                          * record already, otherwise we are expecting a record
  500                          * marker.
  501                          */
  502                         if (!cd->resid) {
  503                                 /*
  504                                  * See if there is enough data buffered to
  505                                  * make up a record marker. Make sure we can
  506                                  * handle the case where the record marker is
  507                                  * split across more than one mbuf.
  508                                  */
  509                                 size_t n = 0;
  510                                 uint32_t header;
  511 
  512                                 m = cd->mpending;
  513                                 while (n < sizeof(uint32_t) && m) {
  514                                         n += m->m_len;
  515                                         m = m->m_next;
  516                                 }
  517                                 if (n < sizeof(uint32_t))
  518                                         goto readmore;
  519                                 cd->mpending = m_pullup(cd->mpending, sizeof(uint32_t));
  520                                 memcpy(&header, mtod(cd->mpending, uint32_t *),
  521                                     sizeof(header));
  522                                 header = ntohl(header);
  523                                 cd->eor = (header & 0x80000000) != 0;
  524                                 cd->resid = header & 0x7fffffff;
  525                                 m_adj(cd->mpending, sizeof(uint32_t));
  526                         }
  527 
  528                         /*
  529                          * Start pulling off mbufs from cd->mpending
  530                          * until we either have a complete record or
  531                          * we run out of data. We use m_split to pull
  532                          * data - it will pull as much as possible and
  533                          * split the last mbuf if necessary.
  534                          */
  535                         while (cd->mpending && cd->resid) {
  536                                 m = cd->mpending;
  537                                 cd->mpending = m_split(cd->mpending, cd->resid,
  538                                     M_WAIT);
  539                                 if (cd->mreq)
  540                                         m_last(cd->mreq)->m_next = m;
  541                                 else
  542                                         cd->mreq = m;
  543                                 while (m) {
  544                                         cd->resid -= m->m_len;
  545                                         m = m->m_next;
  546                                 }
  547                         }
  548 
  549                         /*
  550                          * If cd->resid is zero now, we have managed to
  551                          * receive a record fragment from the stream. Check
  552                          * for the end-of-record mark to see if we need more.
  553                          */
  554                         if (cd->resid == 0) {
  555                                 if (!cd->eor)
  556                                         continue;
  557 
  558                                 /*
  559                                  * Success - we have a complete record in
  560                                  * cd->mreq.
  561                                  */
  562                                 xdrmbuf_create(&xprt->xp_xdrreq, cd->mreq, XDR_DECODE);
  563                                 cd->mreq = NULL;
  564                                 if (! xdr_callmsg(&xprt->xp_xdrreq, msg)) {
  565                                         XDR_DESTROY(&xprt->xp_xdrreq);
  566                                         return (FALSE);
  567                                 }
  568                                 xprt->xp_xid = msg->rm_xid;
  569 
  570                                 return (TRUE);
  571                         }
  572                 }
  573 
  574         readmore:
  575                 /*
  576                  * The socket upcall calls xprt_active() which will eventually
  577                  * cause the server to call us here. We attempt to
  578                  * read as much as possible from the socket and put
  579                  * the result in cd->mpending. If the read fails,
  580                  * we have drained both cd->mpending and the socket so
  581                  * we can call xprt_inactive().
  582                  *
  583                  * The lock protects us in the case where a new packet arrives
  584                  * on the socket after our call to soreceive fails with
  585                  * EWOULDBLOCK - the call to xprt_active() in the upcall will
  586                  * happen only after our call to xprt_inactive() which ensures
  587                  * that we will remain active. It might be possible to use
  588                  * SOCKBUF_LOCK for this - its not clear to me what locks are
  589                  * held during the upcall.
  590                  */
  591                 mtx_lock(&xprt->xp_lock);
  592 
  593                 uio.uio_resid = 1000000000;
  594                 uio.uio_td = curthread;
  595                 m = NULL;
  596                 rcvflag = MSG_DONTWAIT;
  597                 error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL,
  598                     &rcvflag);
  599 
  600                 if (error == EWOULDBLOCK) {
  601                         xprt_inactive(xprt);
  602                         mtx_unlock(&xprt->xp_lock);
  603                         return (FALSE);
  604                 }
  605 
  606                 if (error) {
  607                         SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  608                         xprt->xp_socket->so_upcallarg = NULL;
  609                         xprt->xp_socket->so_upcall = NULL;
  610                         xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
  611                         SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  612                         xprt_inactive(xprt);
  613                         cd->strm_stat = XPRT_DIED;
  614                         mtx_unlock(&xprt->xp_lock);
  615                         return (FALSE);
  616                 }
  617 
  618                 if (!m) {
  619                         /*
  620                          * EOF - the other end has closed the socket.
  621                          */
  622                         cd->strm_stat = XPRT_DIED;
  623                         mtx_unlock(&xprt->xp_lock);
  624                         return (FALSE);
  625                 }
  626 
  627                 if (cd->mpending)
  628                         m_last(cd->mpending)->m_next = m;
  629                 else
  630                         cd->mpending = m;
  631 
  632                 mtx_unlock(&xprt->xp_lock);
  633         }
  634 }
  635 
  636 static bool_t
  637 svc_vc_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
  638 {
  639 
  640         return (xdr_args(&xprt->xp_xdrreq, args_ptr));
  641 }
  642 
  643 static bool_t
  644 svc_vc_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
  645 {
  646         XDR xdrs;
  647 
  648         /*
  649          * Free the request mbuf here - this allows us to handle
  650          * protocols where not all requests have replies
  651          * (i.e. NLM). Note that xdrmbuf_destroy handles being called
  652          * twice correctly - the mbuf will only be freed once.
  653          */
  654         XDR_DESTROY(&xprt->xp_xdrreq);
  655 
  656         xdrs.x_op = XDR_FREE;
  657         return (xdr_args(&xdrs, args_ptr));
  658 }
  659 
  660 static bool_t
  661 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg)
  662 {
  663         struct mbuf *mrep;
  664         bool_t stat = FALSE;
  665         int error;
  666 
  667         /*
  668          * Leave space for record mark.
  669          */
  670         MGETHDR(mrep, M_WAIT, MT_DATA);
  671         MCLGET(mrep, M_WAIT);
  672         mrep->m_len = 0;
  673         mrep->m_data += sizeof(uint32_t);
  674 
  675         xdrmbuf_create(&xprt->xp_xdrrep, mrep, XDR_ENCODE);
  676         msg->rm_xid = xprt->xp_xid;
  677         if (xdr_replymsg(&xprt->xp_xdrrep, msg)) {
  678                 m_fixhdr(mrep);
  679 
  680                 /*
  681                  * Prepend a record marker containing the reply length.
  682                  */
  683                 M_PREPEND(mrep, sizeof(uint32_t), M_WAIT);
  684                 *mtod(mrep, uint32_t *) =
  685                         htonl(0x80000000 | (mrep->m_pkthdr.len
  686                                 - sizeof(uint32_t)));
  687                 error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
  688                     0, curthread);
  689                 if (!error) {
  690                         stat = TRUE;
  691                 }
  692         } else {
  693                 m_freem(mrep);
  694         }
  695 
  696         /*
  697          * This frees the request mbuf chain as well. The reply mbuf
  698          * chain was consumed by sosend.
  699          */
  700         XDR_DESTROY(&xprt->xp_xdrreq);
  701         XDR_DESTROY(&xprt->xp_xdrrep);
  702         xprt->xp_p2 = NULL;
  703 
  704         return (stat);
  705 }
  706 
  707 static bool_t
  708 svc_vc_null()
  709 {
  710 
  711         return (FALSE);
  712 }
  713 
  714 static void
  715 svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
  716 {
  717         SVCXPRT *xprt = (SVCXPRT *) arg;
  718 
  719         mtx_lock(&xprt->xp_lock);
  720         xprt_active(xprt);
  721         mtx_unlock(&xprt->xp_lock);
  722 }
  723 
  724 #if 0
  725 /*
  726  * Get the effective UID of the sending process. Used by rpcbind, keyserv
  727  * and rpc.yppasswdd on AF_LOCAL.
  728  */
  729 int
  730 __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
  731         int sock, ret;
  732         gid_t egid;
  733         uid_t euid;
  734         struct sockaddr *sa;
  735 
  736         sock = transp->xp_fd;
  737         sa = (struct sockaddr *)transp->xp_rtaddr.buf;
  738         if (sa->sa_family == AF_LOCAL) {
  739                 ret = getpeereid(sock, &euid, &egid);
  740                 if (ret == 0)
  741                         *uid = euid;
  742                 return (ret);
  743         } else
  744                 return (-1);
  745 }
  746 #endif

Cache object: 131a319898c30ec9a6f6b76452d3d634


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.