The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/rpc/svc_vc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $  */
    2 
    3 /*
    4  * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
    5  * unrestricted use provided that this legend is included on all tape
    6  * media and as a part of the software program in whole or part.  Users
    7  * may copy or modify Sun RPC without charge, but are not authorized
    8  * to license or distribute it to anyone else except as part of a product or
    9  * program developed by the user.
   10  * 
   11  * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
   12  * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
   13  * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
   14  * 
   15  * Sun RPC is provided with no support and without any obligation on the
   16  * part of Sun Microsystems, Inc. to assist in its use, correction,
   17  * modification or enhancement.
   18  * 
   19  * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
   20  * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
   21  * OR ANY PART THEREOF.
   22  * 
   23  * In no event will Sun Microsystems, Inc. be liable for any lost revenue
   24  * or profits or other special, indirect and consequential damages, even if
   25  * Sun has been advised of the possibility of such damages.
   26  * 
   27  * Sun Microsystems, Inc.
   28  * 2550 Garcia Avenue
   29  * Mountain View, California  94043
   30  */
   31 
   32 #if defined(LIBC_SCCS) && !defined(lint)
   33 static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
   34 static char *sccsid = "@(#)svc_tcp.c    2.2 88/08/01 4.0 RPCSRC";
   35 #endif
   36 #include <sys/cdefs.h>
   37 __FBSDID("$FreeBSD$");
   38 
   39 /*
   40  * svc_vc.c, Server side for Connection Oriented based RPC. 
   41  *
   42  * Actually implements two flavors of transporter -
   43  * a tcp rendezvouser (a listner and connection establisher)
   44  * and a record/tcp stream.
   45  */
   46 
   47 #include <sys/param.h>
   48 #include <sys/lock.h>
   49 #include <sys/kernel.h>
   50 #include <sys/malloc.h>
   51 #include <sys/mbuf.h>
   52 #include <sys/mutex.h>
   53 #include <sys/protosw.h>
   54 #include <sys/queue.h>
   55 #include <sys/socket.h>
   56 #include <sys/socketvar.h>
   57 #include <sys/systm.h>
   58 #include <sys/uio.h>
   59 #include <netinet/tcp.h>
   60 
   61 #include <rpc/rpc.h>
   62 
   63 #include <rpc/rpc_com.h>
   64 
   65 static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *);
   66 static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
   67 static void svc_vc_rendezvous_destroy(SVCXPRT *);
   68 static bool_t svc_vc_null(void);
   69 static void svc_vc_destroy(SVCXPRT *);
   70 static enum xprt_stat svc_vc_stat(SVCXPRT *);
   71 static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *);
   72 static bool_t svc_vc_getargs(SVCXPRT *, xdrproc_t, void *);
   73 static bool_t svc_vc_freeargs(SVCXPRT *, xdrproc_t, void *);
   74 static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *);
   75 static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
   76 static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
   77     void *in);
   78 static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
   79     struct sockaddr *raddr);
   80 static int svc_vc_accept(struct socket *head, struct socket **sop);
   81 static void svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
   82 
   83 static struct xp_ops svc_vc_rendezvous_ops = {
   84         .xp_recv =      svc_vc_rendezvous_recv,
   85         .xp_stat =      svc_vc_rendezvous_stat,
   86         .xp_getargs =   (bool_t (*)(SVCXPRT *, xdrproc_t, void *))svc_vc_null,
   87         .xp_reply =     (bool_t (*)(SVCXPRT *, struct rpc_msg *))svc_vc_null,
   88         .xp_freeargs =  (bool_t (*)(SVCXPRT *, xdrproc_t, void *))svc_vc_null,
   89         .xp_destroy =   svc_vc_rendezvous_destroy,
   90         .xp_control =   svc_vc_rendezvous_control
   91 };
   92 
   93 static struct xp_ops svc_vc_ops = {
   94         .xp_recv =      svc_vc_recv,
   95         .xp_stat =      svc_vc_stat,
   96         .xp_getargs =   svc_vc_getargs,
   97         .xp_reply =     svc_vc_reply,
   98         .xp_freeargs =  svc_vc_freeargs,
   99         .xp_destroy =   svc_vc_destroy,
  100         .xp_control =   svc_vc_control
  101 };
  102 
  103 struct cf_conn {  /* kept in xprt->xp_p1 for actual connection */
  104         enum xprt_stat strm_stat;
  105         struct mbuf *mpending;  /* unparsed data read from the socket */
  106         struct mbuf *mreq;      /* current record being built from mpending */
  107         uint32_t resid;         /* number of bytes needed for fragment */
  108         bool_t eor;             /* reading last fragment of current record */
  109 };
  110 
  111 /*
  112  * Usage:
  113  *      xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
  114  *
  115  * Creates, registers, and returns a (rpc) tcp based transporter.
  116  * Once *xprt is initialized, it is registered as a transporter
  117  * see (svc.h, xprt_register).  This routine returns
  118  * a NULL if a problem occurred.
  119  *
  120  * The filedescriptor passed in is expected to refer to a bound, but
  121  * not yet connected socket.
  122  *
  123  * Since streams do buffered io similar to stdio, the caller can specify
  124  * how big the send and receive buffers are via the second and third parms;
  125  * 0 => use the system default.
  126  */
  127 SVCXPRT *
  128 svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
  129     size_t recvsize)
  130 {
  131         SVCXPRT *xprt;
  132         struct sockaddr* sa;
  133         int error;
  134 
  135         if (so->so_state & SS_ISCONNECTED) {
  136                 error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
  137                 if (error)
  138                         return (NULL);
  139                 xprt = svc_vc_create_conn(pool, so, sa);
  140                 free(sa, M_SONAME);
  141                 return (xprt);
  142         }
  143 
  144         xprt = mem_alloc(sizeof(SVCXPRT));
  145         mtx_init(&xprt->xp_lock, "xprt->xp_lock", NULL, MTX_DEF);
  146         xprt->xp_pool = pool;
  147         xprt->xp_socket = so;
  148         xprt->xp_p1 = NULL;
  149         xprt->xp_p2 = NULL;
  150         xprt->xp_p3 = NULL;
  151         xprt->xp_verf = _null_auth;
  152         xprt->xp_ops = &svc_vc_rendezvous_ops;
  153 
  154         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  155         if (error)
  156                 goto cleanup_svc_vc_create;
  157 
  158         xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
  159         xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
  160         xprt->xp_ltaddr.len = sa->sa_len;
  161         memcpy(xprt->xp_ltaddr.buf, sa, sa->sa_len);
  162         free(sa, M_SONAME);
  163 
  164         xprt->xp_rtaddr.maxlen = 0;
  165 
  166         xprt_register(xprt);
  167 
  168         solisten(so, SOMAXCONN, curthread);
  169 
  170         SOCKBUF_LOCK(&so->so_rcv);
  171         so->so_upcallarg = xprt;
  172         so->so_upcall = svc_vc_soupcall;
  173         so->so_rcv.sb_flags |= SB_UPCALL;
  174         SOCKBUF_UNLOCK(&so->so_rcv);
  175 
  176         return (xprt);
  177 cleanup_svc_vc_create:
  178         if (xprt)
  179                 mem_free(xprt, sizeof(*xprt));
  180         return (NULL);
  181 }
  182 
  183 /*
  184  * Create a new transport for a socket optained via soaccept().
  185  */
  186 SVCXPRT *
  187 svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
  188 {
  189         SVCXPRT *xprt = NULL;
  190         struct cf_conn *cd = NULL;
  191         struct sockaddr* sa = NULL;
  192         struct sockopt opt;
  193         int one = 1;
  194         int error;
  195 
  196         bzero(&opt, sizeof(struct sockopt));
  197         opt.sopt_dir = SOPT_SET;
  198         opt.sopt_level = SOL_SOCKET;
  199         opt.sopt_name = SO_KEEPALIVE;
  200         opt.sopt_val = &one;
  201         opt.sopt_valsize = sizeof(one);
  202         error = sosetopt(so, &opt);
  203         if (error)
  204                 return (NULL);
  205 
  206         if (so->so_proto->pr_protocol == IPPROTO_TCP) {
  207                 bzero(&opt, sizeof(struct sockopt));
  208                 opt.sopt_dir = SOPT_SET;
  209                 opt.sopt_level = IPPROTO_TCP;
  210                 opt.sopt_name = TCP_NODELAY;
  211                 opt.sopt_val = &one;
  212                 opt.sopt_valsize = sizeof(one);
  213                 error = sosetopt(so, &opt);
  214                 if (error)
  215                         return (NULL);
  216         }
  217 
  218         cd = mem_alloc(sizeof(*cd));
  219         cd->strm_stat = XPRT_IDLE;
  220 
  221         xprt = mem_alloc(sizeof(SVCXPRT));
  222         mtx_init(&xprt->xp_lock, "xprt->xp_lock", NULL, MTX_DEF);
  223         xprt->xp_pool = pool;
  224         xprt->xp_socket = so;
  225         xprt->xp_p1 = cd;
  226         xprt->xp_p2 = NULL;
  227         xprt->xp_p3 = NULL;
  228         xprt->xp_verf = _null_auth;
  229         xprt->xp_ops = &svc_vc_ops;
  230 
  231         xprt->xp_rtaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
  232         xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
  233         xprt->xp_rtaddr.len = raddr->sa_len;
  234         memcpy(xprt->xp_rtaddr.buf, raddr, raddr->sa_len);
  235 
  236         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  237         if (error)
  238                 goto cleanup_svc_vc_create;
  239 
  240         xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
  241         xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
  242         xprt->xp_ltaddr.len = sa->sa_len;
  243         memcpy(xprt->xp_ltaddr.buf, sa, sa->sa_len);
  244         free(sa, M_SONAME);
  245 
  246         xprt_register(xprt);
  247 
  248         SOCKBUF_LOCK(&so->so_rcv);
  249         so->so_upcallarg = xprt;
  250         so->so_upcall = svc_vc_soupcall;
  251         so->so_rcv.sb_flags |= SB_UPCALL;
  252         SOCKBUF_UNLOCK(&so->so_rcv);
  253 
  254         /*
  255          * Throw the transport into the active list in case it already
  256          * has some data buffered.
  257          */
  258         mtx_lock(&xprt->xp_lock);
  259         xprt_active(xprt);
  260         mtx_unlock(&xprt->xp_lock);
  261 
  262         return (xprt);
  263 cleanup_svc_vc_create:
  264         if (xprt) {
  265                 if (xprt->xp_ltaddr.buf)
  266                         mem_free(xprt->xp_ltaddr.buf,
  267                             sizeof(struct sockaddr_storage));
  268                 if (xprt->xp_rtaddr.buf)
  269                         mem_free(xprt->xp_rtaddr.buf,
  270                             sizeof(struct sockaddr_storage));
  271                 mem_free(xprt, sizeof(*xprt));
  272         }
  273         if (cd)
  274                 mem_free(cd, sizeof(*cd));
  275         return (NULL);
  276 }
  277 
  278 /*
  279  * This does all of the accept except the final call to soaccept. The
  280  * caller will call soaccept after dropping its locks (soaccept may
  281  * call malloc).
  282  */
  283 int
  284 svc_vc_accept(struct socket *head, struct socket **sop)
  285 {
  286         int error = 0;
  287         struct socket *so;
  288 
  289         if ((head->so_options & SO_ACCEPTCONN) == 0) {
  290                 error = EINVAL;
  291                 goto done;
  292         }
  293 #ifdef MAC
  294         SOCK_LOCK(head);
  295         error = mac_socket_check_accept(td->td_ucred, head);
  296         SOCK_UNLOCK(head);
  297         if (error != 0)
  298                 goto done;
  299 #endif
  300         ACCEPT_LOCK();
  301         if (TAILQ_EMPTY(&head->so_comp)) {
  302                 ACCEPT_UNLOCK();
  303                 error = EWOULDBLOCK;
  304                 goto done;
  305         }
  306         so = TAILQ_FIRST(&head->so_comp);
  307         KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP"));
  308         KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP"));
  309 
  310         /*
  311          * Before changing the flags on the socket, we have to bump the
  312          * reference count.  Otherwise, if the protocol calls sofree(),
  313          * the socket will be released due to a zero refcount.
  314          * XXX might not need soref() since this is simpler than kern_accept.
  315          */
  316         SOCK_LOCK(so);                  /* soref() and so_state update */
  317         soref(so);                      /* file descriptor reference */
  318 
  319         TAILQ_REMOVE(&head->so_comp, so, so_list);
  320         head->so_qlen--;
  321         so->so_state |= (head->so_state & SS_NBIO);
  322         so->so_qstate &= ~SQ_COMP;
  323         so->so_head = NULL;
  324 
  325         SOCK_UNLOCK(so);
  326         ACCEPT_UNLOCK();
  327 
  328         *sop = so;
  329 
  330         /* connection has been removed from the listen queue */
  331         KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
  332 done:
  333         return (error);
  334 }
  335 
  336 /*ARGSUSED*/
  337 static bool_t
  338 svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg)
  339 {
  340         struct socket *so = NULL;
  341         struct sockaddr *sa = NULL;
  342         int error;
  343 
  344         /*
  345          * The socket upcall calls xprt_active() which will eventually
  346          * cause the server to call us here. We attempt to accept a
  347          * connection from the socket and turn it into a new
  348          * transport. If the accept fails, we have drained all pending
  349          * connections so we call xprt_inactive().
  350          *
  351          * The lock protects us in the case where a new connection arrives
  352          * on the socket after our call to accept fails with
  353          * EWOULDBLOCK - the call to xprt_active() in the upcall will
  354          * happen only after our call to xprt_inactive() which ensures
  355          * that we will remain active. It might be possible to use
  356          * SOCKBUF_LOCK for this - its not clear to me what locks are
  357          * held during the upcall.
  358          */
  359         mtx_lock(&xprt->xp_lock);
  360 
  361         error = svc_vc_accept(xprt->xp_socket, &so);
  362 
  363         if (error == EWOULDBLOCK) {
  364                 xprt_inactive(xprt);
  365                 mtx_unlock(&xprt->xp_lock);
  366                 return (FALSE);
  367         }
  368 
  369         if (error) {
  370                 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  371                 xprt->xp_socket->so_upcallarg = NULL;
  372                 xprt->xp_socket->so_upcall = NULL;
  373                 xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
  374                 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  375                 xprt_inactive(xprt);
  376                 mtx_unlock(&xprt->xp_lock);
  377                 return (FALSE);
  378         }
  379 
  380         mtx_unlock(&xprt->xp_lock);
  381 
  382         sa = 0;
  383         error = soaccept(so, &sa);
  384 
  385         if (error) {
  386                 /*
  387                  * XXX not sure if I need to call sofree or soclose here.
  388                  */
  389                 if (sa)
  390                         free(sa, M_SONAME);
  391                 return (FALSE);
  392         }
  393 
  394         /*
  395          * svc_vc_create_conn will call xprt_register - we don't need
  396          * to do anything with the new connection.
  397          */
  398         if (!svc_vc_create_conn(xprt->xp_pool, so, sa))
  399                 soclose(so);
  400 
  401         free(sa, M_SONAME);
  402 
  403         return (FALSE); /* there is never an rpc msg to be processed */
  404 }
  405 
  406 /*ARGSUSED*/
  407 static enum xprt_stat
  408 svc_vc_rendezvous_stat(SVCXPRT *xprt)
  409 {
  410 
  411         return (XPRT_IDLE);
  412 }
  413 
  414 static void
  415 svc_vc_destroy_common(SVCXPRT *xprt)
  416 {
  417         SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  418         xprt->xp_socket->so_upcallarg = NULL;
  419         xprt->xp_socket->so_upcall = NULL;
  420         xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
  421         SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  422 
  423         xprt_unregister(xprt);
  424 
  425         mtx_destroy(&xprt->xp_lock);
  426         if (xprt->xp_socket)
  427                 (void)soclose(xprt->xp_socket);
  428 
  429         if (xprt->xp_rtaddr.buf)
  430                 (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
  431         if (xprt->xp_ltaddr.buf)
  432                 (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
  433         (void) mem_free(xprt, sizeof (SVCXPRT));
  434         
  435 }
  436 
  437 static void
  438 svc_vc_rendezvous_destroy(SVCXPRT *xprt)
  439 {
  440 
  441         svc_vc_destroy_common(xprt);
  442 }
  443 
  444 static void
  445 svc_vc_destroy(SVCXPRT *xprt)
  446 {
  447         struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
  448 
  449         svc_vc_destroy_common(xprt);
  450 
  451         if (cd->mreq)
  452                 m_freem(cd->mreq);
  453         if (cd->mpending)
  454                 m_freem(cd->mpending);
  455         mem_free(cd, sizeof(*cd));
  456 }
  457 
  458 /*ARGSUSED*/
  459 static bool_t
  460 svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
  461 {
  462         return (FALSE);
  463 }
  464 
  465 static bool_t
  466 svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
  467 {
  468 
  469         return (FALSE);
  470 }
  471 
  472 static enum xprt_stat
  473 svc_vc_stat(SVCXPRT *xprt)
  474 {
  475         struct cf_conn *cd;
  476         struct mbuf *m;
  477         size_t n;
  478 
  479         cd = (struct cf_conn *)(xprt->xp_p1);
  480 
  481         if (cd->strm_stat == XPRT_DIED)
  482                 return (XPRT_DIED);
  483 
  484         /*
  485          * Return XPRT_MOREREQS if we have buffered data and we are
  486          * mid-record or if we have enough data for a record marker.
  487          */
  488         if (cd->mpending) {
  489                 if (cd->resid)
  490                         return (XPRT_MOREREQS);
  491                 n = 0;
  492                 m = cd->mpending;
  493                 while (m && n < sizeof(uint32_t)) {
  494                         n += m->m_len;
  495                         m = m->m_next;
  496                 }
  497                 if (n >= sizeof(uint32_t))
  498                         return (XPRT_MOREREQS);
  499         }
  500 
  501         return (XPRT_IDLE);
  502 }
  503 
  504 static bool_t
  505 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg)
  506 {
  507         struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
  508         struct uio uio;
  509         struct mbuf *m;
  510         int error, rcvflag;
  511 
  512         for (;;) {
  513                 /*
  514                  * If we have an mbuf chain in cd->mpending, try to parse a
  515                  * record from it, leaving the result in cd->mreq. If we don't
  516                  * have a complete record, leave the partial result in
  517                  * cd->mreq and try to read more from the socket.
  518                  */
  519                 if (cd->mpending) {
  520                         /*
  521                          * If cd->resid is non-zero, we have part of the
  522                          * record already, otherwise we are expecting a record
  523                          * marker.
  524                          */
  525                         if (!cd->resid) {
  526                                 /*
  527                                  * See if there is enough data buffered to
  528                                  * make up a record marker. Make sure we can
  529                                  * handle the case where the record marker is
  530                                  * split across more than one mbuf.
  531                                  */
  532                                 size_t n = 0;
  533                                 uint32_t header;
  534 
  535                                 m = cd->mpending;
  536                                 while (n < sizeof(uint32_t) && m) {
  537                                         n += m->m_len;
  538                                         m = m->m_next;
  539                                 }
  540                                 if (n < sizeof(uint32_t))
  541                                         goto readmore;
  542                                 cd->mpending = m_pullup(cd->mpending, sizeof(uint32_t));
  543                                 memcpy(&header, mtod(cd->mpending, uint32_t *),
  544                                     sizeof(header));
  545                                 header = ntohl(header);
  546                                 cd->eor = (header & 0x80000000) != 0;
  547                                 cd->resid = header & 0x7fffffff;
  548                                 m_adj(cd->mpending, sizeof(uint32_t));
  549                         }
  550 
  551                         /*
  552                          * Start pulling off mbufs from cd->mpending
  553                          * until we either have a complete record or
  554                          * we run out of data. We use m_split to pull
  555                          * data - it will pull as much as possible and
  556                          * split the last mbuf if necessary.
  557                          */
  558                         while (cd->mpending && cd->resid) {
  559                                 m = cd->mpending;
  560                                 cd->mpending = m_split(cd->mpending, cd->resid,
  561                                     M_WAIT);
  562                                 if (cd->mreq)
  563                                         m_last(cd->mreq)->m_next = m;
  564                                 else
  565                                         cd->mreq = m;
  566                                 while (m) {
  567                                         cd->resid -= m->m_len;
  568                                         m = m->m_next;
  569                                 }
  570                         }
  571 
  572                         /*
  573                          * If cd->resid is zero now, we have managed to
  574                          * receive a record fragment from the stream. Check
  575                          * for the end-of-record mark to see if we need more.
  576                          */
  577                         if (cd->resid == 0) {
  578                                 if (!cd->eor)
  579                                         continue;
  580 
  581                                 /*
  582                                  * Success - we have a complete record in
  583                                  * cd->mreq.
  584                                  */
  585                                 xdrmbuf_create(&xprt->xp_xdrreq, cd->mreq, XDR_DECODE);
  586                                 cd->mreq = NULL;
  587                                 if (! xdr_callmsg(&xprt->xp_xdrreq, msg)) {
  588                                         XDR_DESTROY(&xprt->xp_xdrreq);
  589                                         return (FALSE);
  590                                 }
  591                                 xprt->xp_xid = msg->rm_xid;
  592 
  593                                 return (TRUE);
  594                         }
  595                 }
  596 
  597         readmore:
  598                 /*
  599                  * The socket upcall calls xprt_active() which will eventually
  600                  * cause the server to call us here. We attempt to
  601                  * read as much as possible from the socket and put
  602                  * the result in cd->mpending. If the read fails,
  603                  * we have drained both cd->mpending and the socket so
  604                  * we can call xprt_inactive().
  605                  *
  606                  * The lock protects us in the case where a new packet arrives
  607                  * on the socket after our call to soreceive fails with
  608                  * EWOULDBLOCK - the call to xprt_active() in the upcall will
  609                  * happen only after our call to xprt_inactive() which ensures
  610                  * that we will remain active. It might be possible to use
  611                  * SOCKBUF_LOCK for this - its not clear to me what locks are
  612                  * held during the upcall.
  613                  */
  614                 mtx_lock(&xprt->xp_lock);
  615 
  616                 uio.uio_resid = 1000000000;
  617                 uio.uio_td = curthread;
  618                 m = NULL;
  619                 rcvflag = MSG_DONTWAIT;
  620                 error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL,
  621                     &rcvflag);
  622 
  623                 if (error == EWOULDBLOCK) {
  624                         xprt_inactive(xprt);
  625                         mtx_unlock(&xprt->xp_lock);
  626                         return (FALSE);
  627                 }
  628 
  629                 if (error) {
  630                         SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  631                         xprt->xp_socket->so_upcallarg = NULL;
  632                         xprt->xp_socket->so_upcall = NULL;
  633                         xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
  634                         SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  635                         xprt_inactive(xprt);
  636                         cd->strm_stat = XPRT_DIED;
  637                         mtx_unlock(&xprt->xp_lock);
  638                         return (FALSE);
  639                 }
  640 
  641                 if (!m) {
  642                         /*
  643                          * EOF - the other end has closed the socket.
  644                          */
  645                         cd->strm_stat = XPRT_DIED;
  646                         mtx_unlock(&xprt->xp_lock);
  647                         return (FALSE);
  648                 }
  649 
  650                 if (cd->mpending)
  651                         m_last(cd->mpending)->m_next = m;
  652                 else
  653                         cd->mpending = m;
  654 
  655                 mtx_unlock(&xprt->xp_lock);
  656         }
  657 }
  658 
  659 static bool_t
  660 svc_vc_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
  661 {
  662 
  663         return (xdr_args(&xprt->xp_xdrreq, args_ptr));
  664 }
  665 
  666 static bool_t
  667 svc_vc_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
  668 {
  669         XDR xdrs;
  670 
  671         /*
  672          * Free the request mbuf here - this allows us to handle
  673          * protocols where not all requests have replies
  674          * (i.e. NLM). Note that xdrmbuf_destroy handles being called
  675          * twice correctly - the mbuf will only be freed once.
  676          */
  677         XDR_DESTROY(&xprt->xp_xdrreq);
  678 
  679         xdrs.x_op = XDR_FREE;
  680         return (xdr_args(&xdrs, args_ptr));
  681 }
  682 
  683 static bool_t
  684 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg)
  685 {
  686         struct mbuf *mrep;
  687         bool_t stat = FALSE;
  688         int error;
  689 
  690         /*
  691          * Leave space for record mark.
  692          */
  693         MGETHDR(mrep, M_WAIT, MT_DATA);
  694         MCLGET(mrep, M_WAIT);
  695         mrep->m_len = 0;
  696         mrep->m_data += sizeof(uint32_t);
  697 
  698         xdrmbuf_create(&xprt->xp_xdrrep, mrep, XDR_ENCODE);
  699         msg->rm_xid = xprt->xp_xid;
  700         if (xdr_replymsg(&xprt->xp_xdrrep, msg)) {
  701                 m_fixhdr(mrep);
  702 
  703                 /*
  704                  * Prepend a record marker containing the reply length.
  705                  */
  706                 M_PREPEND(mrep, sizeof(uint32_t), M_WAIT);
  707                 *mtod(mrep, uint32_t *) =
  708                         htonl(0x80000000 | (mrep->m_pkthdr.len
  709                                 - sizeof(uint32_t)));
  710                 error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
  711                     0, curthread);
  712                 if (!error) {
  713                         stat = TRUE;
  714                 }
  715         } else {
  716                 m_freem(mrep);
  717         }
  718 
  719         /*
  720          * This frees the request mbuf chain as well. The reply mbuf
  721          * chain was consumed by sosend.
  722          */
  723         XDR_DESTROY(&xprt->xp_xdrreq);
  724         XDR_DESTROY(&xprt->xp_xdrrep);
  725         xprt->xp_p2 = NULL;
  726 
  727         return (stat);
  728 }
  729 
  730 static bool_t
  731 svc_vc_null()
  732 {
  733 
  734         return (FALSE);
  735 }
  736 
  737 static void
  738 svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
  739 {
  740         SVCXPRT *xprt = (SVCXPRT *) arg;
  741 
  742         mtx_lock(&xprt->xp_lock);
  743         xprt_active(xprt);
  744         mtx_unlock(&xprt->xp_lock);
  745 }
  746 
  747 #if 0
  748 /*
  749  * Get the effective UID of the sending process. Used by rpcbind, keyserv
  750  * and rpc.yppasswdd on AF_LOCAL.
  751  */
  752 int
  753 __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
  754         int sock, ret;
  755         gid_t egid;
  756         uid_t euid;
  757         struct sockaddr *sa;
  758 
  759         sock = transp->xp_fd;
  760         sa = (struct sockaddr *)transp->xp_rtaddr.buf;
  761         if (sa->sa_family == AF_LOCAL) {
  762                 ret = getpeereid(sock, &euid, &egid);
  763                 if (ret == 0)
  764                         *uid = euid;
  765                 return (ret);
  766         } else
  767                 return (-1);
  768 }
  769 #endif

Cache object: 00c1a43f316f345dc4bf2af4e78f0fb4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.