The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/rpc/svc_vc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $  */
    2 
    3 /*
    4  * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
    5  * unrestricted use provided that this legend is included on all tape
    6  * media and as a part of the software program in whole or part.  Users
    7  * may copy or modify Sun RPC without charge, but are not authorized
    8  * to license or distribute it to anyone else except as part of a product or
    9  * program developed by the user.
   10  * 
   11  * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
   12  * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
   13  * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
   14  * 
   15  * Sun RPC is provided with no support and without any obligation on the
   16  * part of Sun Microsystems, Inc. to assist in its use, correction,
   17  * modification or enhancement.
   18  * 
   19  * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
   20  * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
   21  * OR ANY PART THEREOF.
   22  * 
   23  * In no event will Sun Microsystems, Inc. be liable for any lost revenue
   24  * or profits or other special, indirect and consequential damages, even if
   25  * Sun has been advised of the possibility of such damages.
   26  * 
   27  * Sun Microsystems, Inc.
   28  * 2550 Garcia Avenue
   29  * Mountain View, California  94043
   30  */
   31 
   32 #if defined(LIBC_SCCS) && !defined(lint)
   33 static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
   34 static char *sccsid = "@(#)svc_tcp.c    2.2 88/08/01 4.0 RPCSRC";
   35 #endif
   36 #include <sys/cdefs.h>
   37 __FBSDID("$FreeBSD: releng/7.4/sys/rpc/svc_vc.c 192142 2009-05-15 13:58:45Z dfr $");
   38 
   39 /*
   40  * svc_vc.c, Server side for Connection Oriented based RPC. 
   41  *
   42  * Actually implements two flavors of transporter -
   43  * a tcp rendezvouser (a listner and connection establisher)
   44  * and a record/tcp stream.
   45  */
   46 
   47 #include <sys/param.h>
   48 #include <sys/lock.h>
   49 #include <sys/kernel.h>
   50 #include <sys/malloc.h>
   51 #include <sys/mbuf.h>
   52 #include <sys/mutex.h>
   53 #include <sys/protosw.h>
   54 #include <sys/queue.h>
   55 #include <sys/socket.h>
   56 #include <sys/socketvar.h>
   57 #include <sys/sx.h>
   58 #include <sys/systm.h>
   59 #include <sys/uio.h>
   60 #include <netinet/tcp.h>
   61 
   62 #include <rpc/rpc.h>
   63 
   64 #include <rpc/rpc_com.h>
   65 
   66 static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *);
   67 static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
   68 static void svc_vc_rendezvous_destroy(SVCXPRT *);
   69 static bool_t svc_vc_null(void);
   70 static void svc_vc_destroy(SVCXPRT *);
   71 static enum xprt_stat svc_vc_stat(SVCXPRT *);
   72 static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *);
   73 static bool_t svc_vc_getargs(SVCXPRT *, xdrproc_t, void *);
   74 static bool_t svc_vc_freeargs(SVCXPRT *, xdrproc_t, void *);
   75 static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *);
   76 static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
   77 static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
   78     void *in);
   79 static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
   80     struct sockaddr *raddr);
   81 static int svc_vc_accept(struct socket *head, struct socket **sop);
   82 static void svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
   83 
   84 static struct xp_ops svc_vc_rendezvous_ops = {
   85         .xp_recv =      svc_vc_rendezvous_recv,
   86         .xp_stat =      svc_vc_rendezvous_stat,
   87         .xp_getargs =   (bool_t (*)(SVCXPRT *, xdrproc_t, void *))svc_vc_null,
   88         .xp_reply =     (bool_t (*)(SVCXPRT *, struct rpc_msg *))svc_vc_null,
   89         .xp_freeargs =  (bool_t (*)(SVCXPRT *, xdrproc_t, void *))svc_vc_null,
   90         .xp_destroy =   svc_vc_rendezvous_destroy,
   91         .xp_control =   svc_vc_rendezvous_control
   92 };
   93 
   94 static struct xp_ops svc_vc_ops = {
   95         .xp_recv =      svc_vc_recv,
   96         .xp_stat =      svc_vc_stat,
   97         .xp_getargs =   svc_vc_getargs,
   98         .xp_reply =     svc_vc_reply,
   99         .xp_freeargs =  svc_vc_freeargs,
  100         .xp_destroy =   svc_vc_destroy,
  101         .xp_control =   svc_vc_control
  102 };
  103 
  104 struct cf_conn {  /* kept in xprt->xp_p1 for actual connection */
  105         enum xprt_stat strm_stat;
  106         struct mbuf *mpending;  /* unparsed data read from the socket */
  107         struct mbuf *mreq;      /* current record being built from mpending */
  108         uint32_t resid;         /* number of bytes needed for fragment */
  109         bool_t eor;             /* reading last fragment of current record */
  110 };
  111 
  112 /*
  113  * Usage:
  114  *      xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
  115  *
  116  * Creates, registers, and returns a (rpc) tcp based transporter.
  117  * Once *xprt is initialized, it is registered as a transporter
  118  * see (svc.h, xprt_register).  This routine returns
  119  * a NULL if a problem occurred.
  120  *
  121  * The filedescriptor passed in is expected to refer to a bound, but
  122  * not yet connected socket.
  123  *
  124  * Since streams do buffered io similar to stdio, the caller can specify
  125  * how big the send and receive buffers are via the second and third parms;
  126  * 0 => use the system default.
  127  */
  128 SVCXPRT *
  129 svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
  130     size_t recvsize)
  131 {
  132         SVCXPRT *xprt;
  133         struct sockaddr* sa;
  134         int error;
  135 
  136         if (so->so_state & SS_ISCONNECTED) {
  137                 error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
  138                 if (error)
  139                         return (NULL);
  140                 xprt = svc_vc_create_conn(pool, so, sa);
  141                 free(sa, M_SONAME);
  142                 return (xprt);
  143         }
  144 
  145         xprt = mem_alloc(sizeof(SVCXPRT));
  146         sx_init(&xprt->xp_lock, "xprt->xp_lock");
  147         xprt->xp_pool = pool;
  148         xprt->xp_socket = so;
  149         xprt->xp_p1 = NULL;
  150         xprt->xp_p2 = NULL;
  151         xprt->xp_p3 = NULL;
  152         xprt->xp_verf = _null_auth;
  153         xprt->xp_ops = &svc_vc_rendezvous_ops;
  154 
  155         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  156         if (error)
  157                 goto cleanup_svc_vc_create;
  158 
  159         xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
  160         xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
  161         xprt->xp_ltaddr.len = sa->sa_len;
  162         memcpy(xprt->xp_ltaddr.buf, sa, sa->sa_len);
  163         free(sa, M_SONAME);
  164 
  165         xprt->xp_rtaddr.maxlen = 0;
  166 
  167         xprt_register(xprt);
  168 
  169         solisten(so, SOMAXCONN, curthread);
  170 
  171         SOCKBUF_LOCK(&so->so_rcv);
  172         so->so_upcallarg = xprt;
  173         so->so_upcall = svc_vc_soupcall;
  174         so->so_rcv.sb_flags |= SB_UPCALL;
  175         SOCKBUF_UNLOCK(&so->so_rcv);
  176 
  177         return (xprt);
  178 cleanup_svc_vc_create:
  179         if (xprt)
  180                 mem_free(xprt, sizeof(*xprt));
  181         return (NULL);
  182 }
  183 
  184 /*
  185  * Create a new transport for a socket optained via soaccept().
  186  */
  187 SVCXPRT *
  188 svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
  189 {
  190         SVCXPRT *xprt = NULL;
  191         struct cf_conn *cd = NULL;
  192         struct sockaddr* sa = NULL;
  193         struct sockopt opt;
  194         int one = 1;
  195         int error;
  196 
  197         bzero(&opt, sizeof(struct sockopt));
  198         opt.sopt_dir = SOPT_SET;
  199         opt.sopt_level = SOL_SOCKET;
  200         opt.sopt_name = SO_KEEPALIVE;
  201         opt.sopt_val = &one;
  202         opt.sopt_valsize = sizeof(one);
  203         error = sosetopt(so, &opt);
  204         if (error)
  205                 return (NULL);
  206 
  207         if (so->so_proto->pr_protocol == IPPROTO_TCP) {
  208                 bzero(&opt, sizeof(struct sockopt));
  209                 opt.sopt_dir = SOPT_SET;
  210                 opt.sopt_level = IPPROTO_TCP;
  211                 opt.sopt_name = TCP_NODELAY;
  212                 opt.sopt_val = &one;
  213                 opt.sopt_valsize = sizeof(one);
  214                 error = sosetopt(so, &opt);
  215                 if (error)
  216                         return (NULL);
  217         }
  218 
  219         cd = mem_alloc(sizeof(*cd));
  220         cd->strm_stat = XPRT_IDLE;
  221 
  222         xprt = mem_alloc(sizeof(SVCXPRT));
  223         sx_init(&xprt->xp_lock, "xprt->xp_lock");
  224         xprt->xp_pool = pool;
  225         xprt->xp_socket = so;
  226         xprt->xp_p1 = cd;
  227         xprt->xp_p2 = NULL;
  228         xprt->xp_p3 = NULL;
  229         xprt->xp_verf = _null_auth;
  230         xprt->xp_ops = &svc_vc_ops;
  231 
  232         xprt->xp_rtaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
  233         xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
  234         xprt->xp_rtaddr.len = raddr->sa_len;
  235         memcpy(xprt->xp_rtaddr.buf, raddr, raddr->sa_len);
  236 
  237         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  238         if (error)
  239                 goto cleanup_svc_vc_create;
  240 
  241         xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
  242         xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
  243         xprt->xp_ltaddr.len = sa->sa_len;
  244         memcpy(xprt->xp_ltaddr.buf, sa, sa->sa_len);
  245         free(sa, M_SONAME);
  246 
  247         xprt_register(xprt);
  248 
  249         SOCKBUF_LOCK(&so->so_rcv);
  250         so->so_upcallarg = xprt;
  251         so->so_upcall = svc_vc_soupcall;
  252         so->so_rcv.sb_flags |= SB_UPCALL;
  253         SOCKBUF_UNLOCK(&so->so_rcv);
  254 
  255         /*
  256          * Throw the transport into the active list in case it already
  257          * has some data buffered.
  258          */
  259         sx_xlock(&xprt->xp_lock);
  260         xprt_active(xprt);
  261         sx_xunlock(&xprt->xp_lock);
  262 
  263         return (xprt);
  264 cleanup_svc_vc_create:
  265         if (xprt) {
  266                 if (xprt->xp_ltaddr.buf)
  267                         mem_free(xprt->xp_ltaddr.buf,
  268                             sizeof(struct sockaddr_storage));
  269                 if (xprt->xp_rtaddr.buf)
  270                         mem_free(xprt->xp_rtaddr.buf,
  271                             sizeof(struct sockaddr_storage));
  272                 mem_free(xprt, sizeof(*xprt));
  273         }
  274         if (cd)
  275                 mem_free(cd, sizeof(*cd));
  276         return (NULL);
  277 }
  278 
  279 /*
  280  * This does all of the accept except the final call to soaccept. The
  281  * caller will call soaccept after dropping its locks (soaccept may
  282  * call malloc).
  283  */
  284 int
  285 svc_vc_accept(struct socket *head, struct socket **sop)
  286 {
  287         int error = 0;
  288         struct socket *so;
  289 
  290         if ((head->so_options & SO_ACCEPTCONN) == 0) {
  291                 error = EINVAL;
  292                 goto done;
  293         }
  294 #ifdef MAC
  295         SOCK_LOCK(head);
  296         error = mac_socket_check_accept(td->td_ucred, head);
  297         SOCK_UNLOCK(head);
  298         if (error != 0)
  299                 goto done;
  300 #endif
  301         ACCEPT_LOCK();
  302         if (TAILQ_EMPTY(&head->so_comp)) {
  303                 ACCEPT_UNLOCK();
  304                 error = EWOULDBLOCK;
  305                 goto done;
  306         }
  307         so = TAILQ_FIRST(&head->so_comp);
  308         KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP"));
  309         KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP"));
  310 
  311         /*
  312          * Before changing the flags on the socket, we have to bump the
  313          * reference count.  Otherwise, if the protocol calls sofree(),
  314          * the socket will be released due to a zero refcount.
  315          * XXX might not need soref() since this is simpler than kern_accept.
  316          */
  317         SOCK_LOCK(so);                  /* soref() and so_state update */
  318         soref(so);                      /* file descriptor reference */
  319 
  320         TAILQ_REMOVE(&head->so_comp, so, so_list);
  321         head->so_qlen--;
  322         so->so_state |= (head->so_state & SS_NBIO);
  323         so->so_qstate &= ~SQ_COMP;
  324         so->so_head = NULL;
  325 
  326         SOCK_UNLOCK(so);
  327         ACCEPT_UNLOCK();
  328 
  329         *sop = so;
  330 
  331         /* connection has been removed from the listen queue */
  332         KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
  333 done:
  334         return (error);
  335 }
  336 
  337 /*ARGSUSED*/
  338 static bool_t
  339 svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg)
  340 {
  341         struct socket *so = NULL;
  342         struct sockaddr *sa = NULL;
  343         int error;
  344 
  345         /*
  346          * The socket upcall calls xprt_active() which will eventually
  347          * cause the server to call us here. We attempt to accept a
  348          * connection from the socket and turn it into a new
  349          * transport. If the accept fails, we have drained all pending
  350          * connections so we call xprt_inactive().
  351          */
  352         sx_xlock(&xprt->xp_lock);
  353 
  354         error = svc_vc_accept(xprt->xp_socket, &so);
  355 
  356         if (error == EWOULDBLOCK) {
  357                 /*
  358                  * We must re-test for new connections after taking
  359                  * the lock to protect us in the case where a new
  360                  * connection arrives after our call to accept fails
  361                  * with EWOULDBLOCK. The pool lock protects us from
  362                  * racing the upcall after our TAILQ_EMPTY() call
  363                  * returns false.
  364                  */
  365                 ACCEPT_LOCK();
  366                 mtx_lock(&xprt->xp_pool->sp_lock);
  367                 if (TAILQ_EMPTY(&xprt->xp_socket->so_comp))
  368                         xprt_inactive_locked(xprt);
  369                 mtx_unlock(&xprt->xp_pool->sp_lock);
  370                 ACCEPT_UNLOCK();
  371                 sx_xunlock(&xprt->xp_lock);
  372                 return (FALSE);
  373         }
  374 
  375         if (error) {
  376                 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  377                 xprt->xp_socket->so_upcallarg = NULL;
  378                 xprt->xp_socket->so_upcall = NULL;
  379                 xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
  380                 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  381                 xprt_inactive(xprt);
  382                 sx_xunlock(&xprt->xp_lock);
  383                 return (FALSE);
  384         }
  385 
  386         sx_xunlock(&xprt->xp_lock);
  387 
  388         sa = 0;
  389         error = soaccept(so, &sa);
  390 
  391         if (error) {
  392                 /*
  393                  * XXX not sure if I need to call sofree or soclose here.
  394                  */
  395                 if (sa)
  396                         free(sa, M_SONAME);
  397                 return (FALSE);
  398         }
  399 
  400         /*
  401          * svc_vc_create_conn will call xprt_register - we don't need
  402          * to do anything with the new connection.
  403          */
  404         if (!svc_vc_create_conn(xprt->xp_pool, so, sa))
  405                 soclose(so);
  406 
  407         free(sa, M_SONAME);
  408 
  409         return (FALSE); /* there is never an rpc msg to be processed */
  410 }
  411 
  412 /*ARGSUSED*/
  413 static enum xprt_stat
  414 svc_vc_rendezvous_stat(SVCXPRT *xprt)
  415 {
  416 
  417         return (XPRT_IDLE);
  418 }
  419 
  420 static void
  421 svc_vc_destroy_common(SVCXPRT *xprt)
  422 {
  423         SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  424         xprt->xp_socket->so_upcallarg = NULL;
  425         xprt->xp_socket->so_upcall = NULL;
  426         xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
  427         SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  428 
  429         xprt_unregister(xprt);
  430 
  431         sx_destroy(&xprt->xp_lock);
  432         if (xprt->xp_socket)
  433                 (void)soclose(xprt->xp_socket);
  434 
  435         if (xprt->xp_rtaddr.buf)
  436                 (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
  437         if (xprt->xp_ltaddr.buf)
  438                 (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
  439         (void) mem_free(xprt, sizeof (SVCXPRT));
  440         
  441 }
  442 
  443 static void
  444 svc_vc_rendezvous_destroy(SVCXPRT *xprt)
  445 {
  446 
  447         svc_vc_destroy_common(xprt);
  448 }
  449 
  450 static void
  451 svc_vc_destroy(SVCXPRT *xprt)
  452 {
  453         struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
  454 
  455         svc_vc_destroy_common(xprt);
  456 
  457         if (cd->mreq)
  458                 m_freem(cd->mreq);
  459         if (cd->mpending)
  460                 m_freem(cd->mpending);
  461         mem_free(cd, sizeof(*cd));
  462 }
  463 
  464 /*ARGSUSED*/
  465 static bool_t
  466 svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
  467 {
  468         return (FALSE);
  469 }
  470 
  471 static bool_t
  472 svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
  473 {
  474 
  475         return (FALSE);
  476 }
  477 
  478 static enum xprt_stat
  479 svc_vc_stat(SVCXPRT *xprt)
  480 {
  481         struct cf_conn *cd;
  482         struct mbuf *m;
  483         size_t n;
  484 
  485         cd = (struct cf_conn *)(xprt->xp_p1);
  486 
  487         if (cd->strm_stat == XPRT_DIED)
  488                 return (XPRT_DIED);
  489 
  490         /*
  491          * Return XPRT_MOREREQS if we have buffered data and we are
  492          * mid-record or if we have enough data for a record
  493          * marker. Since this is only a hint, we read mpending and
  494          * resid outside the lock. We do need to take the lock if we
  495          * have to traverse the mbuf chain.
  496          */
  497         if (cd->mpending) {
  498                 if (cd->resid)
  499                         return (XPRT_MOREREQS);
  500                 n = 0;
  501                 sx_xlock(&xprt->xp_lock);
  502                 m = cd->mpending;
  503                 while (m && n < sizeof(uint32_t)) {
  504                         n += m->m_len;
  505                         m = m->m_next;
  506                 }
  507                 sx_xunlock(&xprt->xp_lock);
  508                 if (n >= sizeof(uint32_t))
  509                         return (XPRT_MOREREQS);
  510         }
  511 
  512         if (soreadable(xprt->xp_socket))
  513                 return (XPRT_MOREREQS);
  514 
  515         return (XPRT_IDLE);
  516 }
  517 
  518 static bool_t
  519 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg)
  520 {
  521         struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
  522         struct uio uio;
  523         struct mbuf *m;
  524         int error, rcvflag;
  525 
  526         /*
  527          * Serialise access to the socket and our own record parsing
  528          * state.
  529          */
  530         sx_xlock(&xprt->xp_lock);
  531 
  532         for (;;) {
  533                 /*
  534                  * If we have an mbuf chain in cd->mpending, try to parse a
  535                  * record from it, leaving the result in cd->mreq. If we don't
  536                  * have a complete record, leave the partial result in
  537                  * cd->mreq and try to read more from the socket.
  538                  */
  539                 if (cd->mpending) {
  540                         /*
  541                          * If cd->resid is non-zero, we have part of the
  542                          * record already, otherwise we are expecting a record
  543                          * marker.
  544                          */
  545                         if (!cd->resid) {
  546                                 /*
  547                                  * See if there is enough data buffered to
  548                                  * make up a record marker. Make sure we can
  549                                  * handle the case where the record marker is
  550                                  * split across more than one mbuf.
  551                                  */
  552                                 size_t n = 0;
  553                                 uint32_t header;
  554 
  555                                 m = cd->mpending;
  556                                 while (n < sizeof(uint32_t) && m) {
  557                                         n += m->m_len;
  558                                         m = m->m_next;
  559                                 }
  560                                 if (n < sizeof(uint32_t))
  561                                         goto readmore;
  562                                 cd->mpending = m_pullup(cd->mpending, sizeof(uint32_t));
  563                                 memcpy(&header, mtod(cd->mpending, uint32_t *),
  564                                     sizeof(header));
  565                                 header = ntohl(header);
  566                                 cd->eor = (header & 0x80000000) != 0;
  567                                 cd->resid = header & 0x7fffffff;
  568                                 m_adj(cd->mpending, sizeof(uint32_t));
  569                         }
  570 
  571                         /*
  572                          * Start pulling off mbufs from cd->mpending
  573                          * until we either have a complete record or
  574                          * we run out of data. We use m_split to pull
  575                          * data - it will pull as much as possible and
  576                          * split the last mbuf if necessary.
  577                          */
  578                         while (cd->mpending && cd->resid) {
  579                                 m = cd->mpending;
  580                                 cd->mpending = m_split(cd->mpending, cd->resid,
  581                                     M_WAIT);
  582                                 if (cd->mreq)
  583                                         m_last(cd->mreq)->m_next = m;
  584                                 else
  585                                         cd->mreq = m;
  586                                 while (m) {
  587                                         cd->resid -= m->m_len;
  588                                         m = m->m_next;
  589                                 }
  590                         }
  591 
  592                         /*
  593                          * If cd->resid is zero now, we have managed to
  594                          * receive a record fragment from the stream. Check
  595                          * for the end-of-record mark to see if we need more.
  596                          */
  597                         if (cd->resid == 0) {
  598                                 if (!cd->eor)
  599                                         continue;
  600 
  601                                 /*
  602                                  * Success - we have a complete record in
  603                                  * cd->mreq.
  604                                  */
  605                                 xdrmbuf_create(&xprt->xp_xdrreq, cd->mreq, XDR_DECODE);
  606                                 cd->mreq = NULL;
  607                                 sx_xunlock(&xprt->xp_lock);
  608                                 if (! xdr_callmsg(&xprt->xp_xdrreq, msg)) {
  609                                         XDR_DESTROY(&xprt->xp_xdrreq);
  610                                         return (FALSE);
  611                                 }
  612                                 xprt->xp_xid = msg->rm_xid;
  613 
  614                                 return (TRUE);
  615                         }
  616                 }
  617 
  618         readmore:
  619                 /*
  620                  * The socket upcall calls xprt_active() which will eventually
  621                  * cause the server to call us here. We attempt to
  622                  * read as much as possible from the socket and put
  623                  * the result in cd->mpending. If the read fails,
  624                  * we have drained both cd->mpending and the socket so
  625                  * we can call xprt_inactive().
  626                  */
  627                 uio.uio_resid = 1000000000;
  628                 uio.uio_td = curthread;
  629                 m = NULL;
  630                 rcvflag = MSG_DONTWAIT;
  631                 error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL,
  632                     &rcvflag);
  633 
  634                 if (error == EWOULDBLOCK) {
  635                         /*
  636                          * We must re-test for readability after
  637                          * taking the lock to protect us in the case
  638                          * where a new packet arrives on the socket
  639                          * after our call to soreceive fails with
  640                          * EWOULDBLOCK. The pool lock protects us from
  641                          * racing the upcall after our soreadable()
  642                          * call returns false.
  643                          */
  644                         mtx_lock(&xprt->xp_pool->sp_lock);
  645                         if (!soreadable(xprt->xp_socket))
  646                                 xprt_inactive_locked(xprt);
  647                         mtx_unlock(&xprt->xp_pool->sp_lock);
  648                         sx_xunlock(&xprt->xp_lock);
  649                         return (FALSE);
  650                 }
  651 
  652                 if (error) {
  653                         SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  654                         xprt->xp_socket->so_upcallarg = NULL;
  655                         xprt->xp_socket->so_upcall = NULL;
  656                         xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
  657                         SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  658                         xprt_inactive(xprt);
  659                         cd->strm_stat = XPRT_DIED;
  660                         sx_xunlock(&xprt->xp_lock);
  661                         return (FALSE);
  662                 }
  663 
  664                 if (!m) {
  665                         /*
  666                          * EOF - the other end has closed the socket.
  667                          */
  668                         xprt_inactive(xprt);
  669                         cd->strm_stat = XPRT_DIED;
  670                         sx_xunlock(&xprt->xp_lock);
  671                         return (FALSE);
  672                 }
  673 
  674                 if (cd->mpending)
  675                         m_last(cd->mpending)->m_next = m;
  676                 else
  677                         cd->mpending = m;
  678         }
  679 }
  680 
  681 static bool_t
  682 svc_vc_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
  683 {
  684 
  685         return (xdr_args(&xprt->xp_xdrreq, args_ptr));
  686 }
  687 
  688 static bool_t
  689 svc_vc_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
  690 {
  691         XDR xdrs;
  692 
  693         /*
  694          * Free the request mbuf here - this allows us to handle
  695          * protocols where not all requests have replies
  696          * (i.e. NLM). Note that xdrmbuf_destroy handles being called
  697          * twice correctly - the mbuf will only be freed once.
  698          */
  699         XDR_DESTROY(&xprt->xp_xdrreq);
  700 
  701         xdrs.x_op = XDR_FREE;
  702         return (xdr_args(&xdrs, args_ptr));
  703 }
  704 
  705 static bool_t
  706 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg)
  707 {
  708         struct mbuf *mrep;
  709         bool_t stat = FALSE;
  710         int error;
  711 
  712         /*
  713          * Leave space for record mark.
  714          */
  715         MGETHDR(mrep, M_WAIT, MT_DATA);
  716         MCLGET(mrep, M_WAIT);
  717         mrep->m_len = 0;
  718         mrep->m_data += sizeof(uint32_t);
  719 
  720         xdrmbuf_create(&xprt->xp_xdrrep, mrep, XDR_ENCODE);
  721         msg->rm_xid = xprt->xp_xid;
  722         if (xdr_replymsg(&xprt->xp_xdrrep, msg)) {
  723                 m_fixhdr(mrep);
  724 
  725                 /*
  726                  * Prepend a record marker containing the reply length.
  727                  */
  728                 M_PREPEND(mrep, sizeof(uint32_t), M_WAIT);
  729                 *mtod(mrep, uint32_t *) =
  730                         htonl(0x80000000 | (mrep->m_pkthdr.len
  731                                 - sizeof(uint32_t)));
  732                 error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
  733                     0, curthread);
  734                 if (!error) {
  735                         stat = TRUE;
  736                 }
  737         } else {
  738                 m_freem(mrep);
  739         }
  740 
  741         /*
  742          * This frees the request mbuf chain as well. The reply mbuf
  743          * chain was consumed by sosend.
  744          */
  745         XDR_DESTROY(&xprt->xp_xdrreq);
  746         XDR_DESTROY(&xprt->xp_xdrrep);
  747         xprt->xp_p2 = NULL;
  748 
  749         return (stat);
  750 }
  751 
  752 static bool_t
  753 svc_vc_null()
  754 {
  755 
  756         return (FALSE);
  757 }
  758 
  759 static void
  760 svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
  761 {
  762         SVCXPRT *xprt = (SVCXPRT *) arg;
  763 
  764         xprt_active(xprt);
  765 }
  766 
  767 #if 0
  768 /*
  769  * Get the effective UID of the sending process. Used by rpcbind, keyserv
  770  * and rpc.yppasswdd on AF_LOCAL.
  771  */
  772 int
  773 __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
  774         int sock, ret;
  775         gid_t egid;
  776         uid_t euid;
  777         struct sockaddr *sa;
  778 
  779         sock = transp->xp_fd;
  780         sa = (struct sockaddr *)transp->xp_rtaddr.buf;
  781         if (sa->sa_family == AF_LOCAL) {
  782                 ret = getpeereid(sock, &euid, &egid);
  783                 if (ret == 0)
  784                         *uid = euid;
  785                 return (ret);
  786         } else
  787                 return (-1);
  788 }
  789 #endif

Cache object: 0cb0a45c05ec16f166accaa860717106


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.