The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/rpc/svc_vc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $  */
    2 
    3 /*
    4  * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
    5  * unrestricted use provided that this legend is included on all tape
    6  * media and as a part of the software program in whole or part.  Users
    7  * may copy or modify Sun RPC without charge, but are not authorized
    8  * to license or distribute it to anyone else except as part of a product or
    9  * program developed by the user.
   10  * 
   11  * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
   12  * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
   13  * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
   14  * 
   15  * Sun RPC is provided with no support and without any obligation on the
   16  * part of Sun Microsystems, Inc. to assist in its use, correction,
   17  * modification or enhancement.
   18  * 
   19  * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
   20  * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
   21  * OR ANY PART THEREOF.
   22  * 
   23  * In no event will Sun Microsystems, Inc. be liable for any lost revenue
   24  * or profits or other special, indirect and consequential damages, even if
   25  * Sun has been advised of the possibility of such damages.
   26  * 
   27  * Sun Microsystems, Inc.
   28  * 2550 Garcia Avenue
   29  * Mountain View, California  94043
   30  */
   31 
   32 #if defined(LIBC_SCCS) && !defined(lint)
   33 static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
   34 static char *sccsid = "@(#)svc_tcp.c    2.2 88/08/01 4.0 RPCSRC";
   35 #endif
   36 #include <sys/cdefs.h>
   37 __FBSDID("$FreeBSD$");
   38 
   39 /*
   40  * svc_vc.c, Server side for Connection Oriented based RPC. 
   41  *
   42  * Actually implements two flavors of transporter -
   43  * a tcp rendezvouser (a listner and connection establisher)
   44  * and a record/tcp stream.
   45  */
   46 
   47 #include <sys/param.h>
   48 #include <sys/lock.h>
   49 #include <sys/kernel.h>
   50 #include <sys/malloc.h>
   51 #include <sys/mbuf.h>
   52 #include <sys/mutex.h>
   53 #include <sys/proc.h>
   54 #include <sys/protosw.h>
   55 #include <sys/queue.h>
   56 #include <sys/socket.h>
   57 #include <sys/socketvar.h>
   58 #include <sys/sx.h>
   59 #include <sys/systm.h>
   60 #include <sys/uio.h>
   61 
   62 #include <net/vnet.h>
   63 
   64 #include <netinet/tcp.h>
   65 
   66 #include <rpc/rpc.h>
   67 
   68 #include <rpc/rpc_com.h>
   69 
   70 #include <security/mac/mac_framework.h>
   71 
   72 static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *,
   73     struct sockaddr **, struct mbuf **);
   74 static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
   75 static void svc_vc_rendezvous_destroy(SVCXPRT *);
   76 static bool_t svc_vc_null(void);
   77 static void svc_vc_destroy(SVCXPRT *);
   78 static enum xprt_stat svc_vc_stat(SVCXPRT *);
   79 static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *,
   80     struct sockaddr **, struct mbuf **);
   81 static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *,
   82     struct sockaddr *, struct mbuf *);
   83 static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
   84 static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
   85     void *in);
   86 static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
   87     struct sockaddr *raddr);
   88 static int svc_vc_accept(struct socket *head, struct socket **sop);
   89 static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
   90 
   91 static struct xp_ops svc_vc_rendezvous_ops = {
   92         .xp_recv =      svc_vc_rendezvous_recv,
   93         .xp_stat =      svc_vc_rendezvous_stat,
   94         .xp_reply =     (bool_t (*)(SVCXPRT *, struct rpc_msg *,
   95                 struct sockaddr *, struct mbuf *))svc_vc_null,
   96         .xp_destroy =   svc_vc_rendezvous_destroy,
   97         .xp_control =   svc_vc_rendezvous_control
   98 };
   99 
  100 static struct xp_ops svc_vc_ops = {
  101         .xp_recv =      svc_vc_recv,
  102         .xp_stat =      svc_vc_stat,
  103         .xp_reply =     svc_vc_reply,
  104         .xp_destroy =   svc_vc_destroy,
  105         .xp_control =   svc_vc_control
  106 };
  107 
  108 struct cf_conn {  /* kept in xprt->xp_p1 for actual connection */
  109         enum xprt_stat strm_stat;
  110         struct mbuf *mpending;  /* unparsed data read from the socket */
  111         struct mbuf *mreq;      /* current record being built from mpending */
  112         uint32_t resid;         /* number of bytes needed for fragment */
  113         bool_t eor;             /* reading last fragment of current record */
  114 };
  115 
  116 /*
  117  * Usage:
  118  *      xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
  119  *
  120  * Creates, registers, and returns a (rpc) tcp based transporter.
  121  * Once *xprt is initialized, it is registered as a transporter
  122  * see (svc.h, xprt_register).  This routine returns
  123  * a NULL if a problem occurred.
  124  *
  125  * The filedescriptor passed in is expected to refer to a bound, but
  126  * not yet connected socket.
  127  *
  128  * Since streams do buffered io similar to stdio, the caller can specify
  129  * how big the send and receive buffers are via the second and third parms;
  130  * 0 => use the system default.
  131  */
  132 SVCXPRT *
  133 svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
  134     size_t recvsize)
  135 {
  136         SVCXPRT *xprt;
  137         struct sockaddr* sa;
  138         int error;
  139 
  140         SOCK_LOCK(so);
  141         if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) {
  142                 SOCK_UNLOCK(so);
  143                 error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
  144                 if (error)
  145                         return (NULL);
  146                 xprt = svc_vc_create_conn(pool, so, sa);
  147                 free(sa, M_SONAME);
  148                 return (xprt);
  149         }
  150         SOCK_UNLOCK(so);
  151 
  152         xprt = svc_xprt_alloc();
  153         sx_init(&xprt->xp_lock, "xprt->xp_lock");
  154         xprt->xp_pool = pool;
  155         xprt->xp_socket = so;
  156         xprt->xp_p1 = NULL;
  157         xprt->xp_p2 = NULL;
  158         xprt->xp_ops = &svc_vc_rendezvous_ops;
  159 
  160         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  161         if (error) {
  162                 goto cleanup_svc_vc_create;
  163         }
  164 
  165         memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
  166         free(sa, M_SONAME);
  167 
  168         xprt_register(xprt);
  169 
  170         solisten(so, SOMAXCONN, curthread);
  171 
  172         SOCKBUF_LOCK(&so->so_rcv);
  173         xprt->xp_upcallset = 1;
  174         soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
  175         SOCKBUF_UNLOCK(&so->so_rcv);
  176 
  177         return (xprt);
  178 cleanup_svc_vc_create:
  179         if (xprt)
  180                 svc_xprt_free(xprt);
  181         return (NULL);
  182 }
  183 
  184 /*
  185  * Create a new transport for a socket optained via soaccept().
  186  */
  187 SVCXPRT *
  188 svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
  189 {
  190         SVCXPRT *xprt = NULL;
  191         struct cf_conn *cd = NULL;
  192         struct sockaddr* sa = NULL;
  193         struct sockopt opt;
  194         int one = 1;
  195         int error;
  196 
  197         bzero(&opt, sizeof(struct sockopt));
  198         opt.sopt_dir = SOPT_SET;
  199         opt.sopt_level = SOL_SOCKET;
  200         opt.sopt_name = SO_KEEPALIVE;
  201         opt.sopt_val = &one;
  202         opt.sopt_valsize = sizeof(one);
  203         error = sosetopt(so, &opt);
  204         if (error) {
  205                 return (NULL);
  206         }
  207 
  208         if (so->so_proto->pr_protocol == IPPROTO_TCP) {
  209                 bzero(&opt, sizeof(struct sockopt));
  210                 opt.sopt_dir = SOPT_SET;
  211                 opt.sopt_level = IPPROTO_TCP;
  212                 opt.sopt_name = TCP_NODELAY;
  213                 opt.sopt_val = &one;
  214                 opt.sopt_valsize = sizeof(one);
  215                 error = sosetopt(so, &opt);
  216                 if (error) {
  217                         return (NULL);
  218                 }
  219         }
  220 
  221         cd = mem_alloc(sizeof(*cd));
  222         cd->strm_stat = XPRT_IDLE;
  223 
  224         xprt = svc_xprt_alloc();
  225         sx_init(&xprt->xp_lock, "xprt->xp_lock");
  226         xprt->xp_pool = pool;
  227         xprt->xp_socket = so;
  228         xprt->xp_p1 = cd;
  229         xprt->xp_p2 = NULL;
  230         xprt->xp_ops = &svc_vc_ops;
  231 
  232         /*
  233          * See http://www.connectathon.org/talks96/nfstcp.pdf - client
  234          * has a 5 minute timer, server has a 6 minute timer.
  235          */
  236         xprt->xp_idletimeout = 6 * 60;
  237 
  238         memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);
  239 
  240         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  241         if (error)
  242                 goto cleanup_svc_vc_create;
  243 
  244         memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
  245         free(sa, M_SONAME);
  246 
  247         xprt_register(xprt);
  248 
  249         SOCKBUF_LOCK(&so->so_rcv);
  250         xprt->xp_upcallset = 1;
  251         soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
  252         SOCKBUF_UNLOCK(&so->so_rcv);
  253 
  254         /*
  255          * Throw the transport into the active list in case it already
  256          * has some data buffered.
  257          */
  258         sx_xlock(&xprt->xp_lock);
  259         xprt_active(xprt);
  260         sx_xunlock(&xprt->xp_lock);
  261 
  262         return (xprt);
  263 cleanup_svc_vc_create:
  264         if (xprt) {
  265                 mem_free(xprt, sizeof(*xprt));
  266         }
  267         if (cd)
  268                 mem_free(cd, sizeof(*cd));
  269         return (NULL);
  270 }
  271 
  272 /*
  273  * This does all of the accept except the final call to soaccept. The
  274  * caller will call soaccept after dropping its locks (soaccept may
  275  * call malloc).
  276  */
  277 int
  278 svc_vc_accept(struct socket *head, struct socket **sop)
  279 {
  280         int error = 0;
  281         struct socket *so;
  282 
  283         if ((head->so_options & SO_ACCEPTCONN) == 0) {
  284                 error = EINVAL;
  285                 goto done;
  286         }
  287 #ifdef MAC
  288         error = mac_socket_check_accept(curthread->td_ucred, head);
  289         if (error != 0)
  290                 goto done;
  291 #endif
  292         ACCEPT_LOCK();
  293         if (TAILQ_EMPTY(&head->so_comp)) {
  294                 ACCEPT_UNLOCK();
  295                 error = EWOULDBLOCK;
  296                 goto done;
  297         }
  298         so = TAILQ_FIRST(&head->so_comp);
  299         KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP"));
  300         KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP"));
  301 
  302         /*
  303          * Before changing the flags on the socket, we have to bump the
  304          * reference count.  Otherwise, if the protocol calls sofree(),
  305          * the socket will be released due to a zero refcount.
  306          * XXX might not need soref() since this is simpler than kern_accept.
  307          */
  308         SOCK_LOCK(so);                  /* soref() and so_state update */
  309         soref(so);                      /* file descriptor reference */
  310 
  311         TAILQ_REMOVE(&head->so_comp, so, so_list);
  312         head->so_qlen--;
  313         so->so_state |= (head->so_state & SS_NBIO);
  314         so->so_qstate &= ~SQ_COMP;
  315         so->so_head = NULL;
  316 
  317         SOCK_UNLOCK(so);
  318         ACCEPT_UNLOCK();
  319 
  320         *sop = so;
  321 
  322         /* connection has been removed from the listen queue */
  323         KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
  324 done:
  325         return (error);
  326 }
  327 
  328 /*ARGSUSED*/
  329 static bool_t
  330 svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
  331     struct sockaddr **addrp, struct mbuf **mp)
  332 {
  333         struct socket *so = NULL;
  334         struct sockaddr *sa = NULL;
  335         int error;
  336         SVCXPRT *new_xprt;
  337 
  338         /*
  339          * The socket upcall calls xprt_active() which will eventually
  340          * cause the server to call us here. We attempt to accept a
  341          * connection from the socket and turn it into a new
  342          * transport. If the accept fails, we have drained all pending
  343          * connections so we call xprt_inactive().
  344          */
  345         sx_xlock(&xprt->xp_lock);
  346 
  347         error = svc_vc_accept(xprt->xp_socket, &so);
  348 
  349         if (error == EWOULDBLOCK) {
  350                 /*
  351                  * We must re-test for new connections after taking
  352                  * the lock to protect us in the case where a new
  353                  * connection arrives after our call to accept fails
  354                  * with EWOULDBLOCK. The pool lock protects us from
  355                  * racing the upcall after our TAILQ_EMPTY() call
  356                  * returns false.
  357                  */
  358                 ACCEPT_LOCK();
  359                 mtx_lock(&xprt->xp_pool->sp_lock);
  360                 if (TAILQ_EMPTY(&xprt->xp_socket->so_comp))
  361                         xprt_inactive_locked(xprt);
  362                 mtx_unlock(&xprt->xp_pool->sp_lock);
  363                 ACCEPT_UNLOCK();
  364                 sx_xunlock(&xprt->xp_lock);
  365                 return (FALSE);
  366         }
  367 
  368         if (error) {
  369                 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  370                 if (xprt->xp_upcallset) {
  371                         xprt->xp_upcallset = 0;
  372                         soupcall_clear(xprt->xp_socket, SO_RCV);
  373                 }
  374                 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  375                 xprt_inactive(xprt);
  376                 sx_xunlock(&xprt->xp_lock);
  377                 return (FALSE);
  378         }
  379 
  380         sx_xunlock(&xprt->xp_lock);
  381 
  382         sa = 0;
  383         error = soaccept(so, &sa);
  384 
  385         if (error) {
  386                 /*
  387                  * XXX not sure if I need to call sofree or soclose here.
  388                  */
  389                 if (sa)
  390                         free(sa, M_SONAME);
  391                 return (FALSE);
  392         }
  393 
  394         /*
  395          * svc_vc_create_conn will call xprt_register - we don't need
  396          * to do anything with the new connection except derefence it.
  397          */
  398         new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa);
  399         if (!new_xprt) {
  400                 soclose(so);
  401         } else {
  402                 SVC_RELEASE(new_xprt);
  403         }
  404 
  405         free(sa, M_SONAME);
  406 
  407         return (FALSE); /* there is never an rpc msg to be processed */
  408 }
  409 
  410 /*ARGSUSED*/
  411 static enum xprt_stat
  412 svc_vc_rendezvous_stat(SVCXPRT *xprt)
  413 {
  414 
  415         return (XPRT_IDLE);
  416 }
  417 
  418 static void
  419 svc_vc_destroy_common(SVCXPRT *xprt)
  420 {
  421         SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  422         if (xprt->xp_upcallset) {
  423                 xprt->xp_upcallset = 0;
  424                 soupcall_clear(xprt->xp_socket, SO_RCV);
  425         }
  426         SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  427 
  428         sx_destroy(&xprt->xp_lock);
  429         if (xprt->xp_socket)
  430                 (void)soclose(xprt->xp_socket);
  431 
  432         if (xprt->xp_netid)
  433                 (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
  434         svc_xprt_free(xprt);
  435 }
  436 
  437 static void
  438 svc_vc_rendezvous_destroy(SVCXPRT *xprt)
  439 {
  440 
  441         svc_vc_destroy_common(xprt);
  442 }
  443 
  444 static void
  445 svc_vc_destroy(SVCXPRT *xprt)
  446 {
  447         struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
  448 
  449         svc_vc_destroy_common(xprt);
  450 
  451         if (cd->mreq)
  452                 m_freem(cd->mreq);
  453         if (cd->mpending)
  454                 m_freem(cd->mpending);
  455         mem_free(cd, sizeof(*cd));
  456 }
  457 
  458 /*ARGSUSED*/
  459 static bool_t
  460 svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
  461 {
  462         return (FALSE);
  463 }
  464 
  465 static bool_t
  466 svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
  467 {
  468 
  469         return (FALSE);
  470 }
  471 
  472 static enum xprt_stat
  473 svc_vc_stat(SVCXPRT *xprt)
  474 {
  475         struct cf_conn *cd;
  476         struct mbuf *m;
  477         size_t n;
  478 
  479         cd = (struct cf_conn *)(xprt->xp_p1);
  480 
  481         if (cd->strm_stat == XPRT_DIED)
  482                 return (XPRT_DIED);
  483 
  484         /*
  485          * Return XPRT_MOREREQS if we have buffered data and we are
  486          * mid-record or if we have enough data for a record
  487          * marker. Since this is only a hint, we read mpending and
  488          * resid outside the lock. We do need to take the lock if we
  489          * have to traverse the mbuf chain.
  490          */
  491         if (cd->mpending) {
  492                 if (cd->resid)
  493                         return (XPRT_MOREREQS);
  494                 n = 0;
  495                 sx_xlock(&xprt->xp_lock);
  496                 m = cd->mpending;
  497                 while (m && n < sizeof(uint32_t)) {
  498                         n += m->m_len;
  499                         m = m->m_next;
  500                 }
  501                 sx_xunlock(&xprt->xp_lock);
  502                 if (n >= sizeof(uint32_t))
  503                         return (XPRT_MOREREQS);
  504         }
  505 
  506         if (soreadable(xprt->xp_socket))
  507                 return (XPRT_MOREREQS);
  508 
  509         return (XPRT_IDLE);
  510 }
  511 
  512 static bool_t
  513 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
  514     struct sockaddr **addrp, struct mbuf **mp)
  515 {
  516         struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
  517         struct uio uio;
  518         struct mbuf *m;
  519         XDR xdrs;
  520         int error, rcvflag;
  521 
  522         /*
  523          * Serialise access to the socket and our own record parsing
  524          * state.
  525          */
  526         sx_xlock(&xprt->xp_lock);
  527 
  528         for (;;) {
  529                 /*
  530                  * If we have an mbuf chain in cd->mpending, try to parse a
  531                  * record from it, leaving the result in cd->mreq. If we don't
  532                  * have a complete record, leave the partial result in
  533                  * cd->mreq and try to read more from the socket.
  534                  */
  535                 if (cd->mpending) {
  536                         /*
  537                          * If cd->resid is non-zero, we have part of the
  538                          * record already, otherwise we are expecting a record
  539                          * marker.
  540                          */
  541                         if (!cd->resid) {
  542                                 /*
  543                                  * See if there is enough data buffered to
  544                                  * make up a record marker. Make sure we can
  545                                  * handle the case where the record marker is
  546                                  * split across more than one mbuf.
  547                                  */
  548                                 size_t n = 0;
  549                                 uint32_t header;
  550 
  551                                 m = cd->mpending;
  552                                 while (n < sizeof(uint32_t) && m) {
  553                                         n += m->m_len;
  554                                         m = m->m_next;
  555                                 }
  556                                 if (n < sizeof(uint32_t))
  557                                         goto readmore;
  558                                 m_copydata(cd->mpending, 0, sizeof(header),
  559                                     (char *)&header);
  560                                 header = ntohl(header);
  561                                 cd->eor = (header & 0x80000000) != 0;
  562                                 cd->resid = header & 0x7fffffff;
  563                                 m_adj(cd->mpending, sizeof(uint32_t));
  564                         }
  565 
  566                         /*
  567                          * Start pulling off mbufs from cd->mpending
  568                          * until we either have a complete record or
  569                          * we run out of data. We use m_split to pull
  570                          * data - it will pull as much as possible and
  571                          * split the last mbuf if necessary.
  572                          */
  573                         while (cd->mpending && cd->resid) {
  574                                 m = cd->mpending;
  575                                 if (cd->mpending->m_next
  576                                     || cd->mpending->m_len > cd->resid)
  577                                         cd->mpending = m_split(cd->mpending,
  578                                             cd->resid, M_WAIT);
  579                                 else
  580                                         cd->mpending = NULL;
  581                                 if (cd->mreq)
  582                                         m_last(cd->mreq)->m_next = m;
  583                                 else
  584                                         cd->mreq = m;
  585                                 while (m) {
  586                                         cd->resid -= m->m_len;
  587                                         m = m->m_next;
  588                                 }
  589                         }
  590 
  591                         /*
  592                          * If cd->resid is zero now, we have managed to
  593                          * receive a record fragment from the stream. Check
  594                          * for the end-of-record mark to see if we need more.
  595                          */
  596                         if (cd->resid == 0) {
  597                                 if (!cd->eor)
  598                                         continue;
  599 
  600                                 /*
  601                                  * Success - we have a complete record in
  602                                  * cd->mreq.
  603                                  */
  604                                 xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE);
  605                                 cd->mreq = NULL;
  606                                 sx_xunlock(&xprt->xp_lock);
  607 
  608                                 if (! xdr_callmsg(&xdrs, msg)) {
  609                                         XDR_DESTROY(&xdrs);
  610                                         return (FALSE);
  611                                 }
  612 
  613                                 *addrp = NULL;
  614                                 *mp = xdrmbuf_getall(&xdrs);
  615                                 XDR_DESTROY(&xdrs);
  616 
  617                                 return (TRUE);
  618                         }
  619                 }
  620 
  621         readmore:
  622                 /*
  623                  * The socket upcall calls xprt_active() which will eventually
  624                  * cause the server to call us here. We attempt to
  625                  * read as much as possible from the socket and put
  626                  * the result in cd->mpending. If the read fails,
  627                  * we have drained both cd->mpending and the socket so
  628                  * we can call xprt_inactive().
  629                  */
  630                 uio.uio_resid = 1000000000;
  631                 uio.uio_td = curthread;
  632                 m = NULL;
  633                 rcvflag = MSG_DONTWAIT;
  634                 error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL,
  635                     &rcvflag);
  636 
  637                 if (error == EWOULDBLOCK) {
  638                         /*
  639                          * We must re-test for readability after
  640                          * taking the lock to protect us in the case
  641                          * where a new packet arrives on the socket
  642                          * after our call to soreceive fails with
  643                          * EWOULDBLOCK. The pool lock protects us from
  644                          * racing the upcall after our soreadable()
  645                          * call returns false.
  646                          */
  647                         mtx_lock(&xprt->xp_pool->sp_lock);
  648                         if (!soreadable(xprt->xp_socket))
  649                                 xprt_inactive_locked(xprt);
  650                         mtx_unlock(&xprt->xp_pool->sp_lock);
  651                         sx_xunlock(&xprt->xp_lock);
  652                         return (FALSE);
  653                 }
  654 
  655                 if (error) {
  656                         SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  657                         if (xprt->xp_upcallset) {
  658                                 xprt->xp_upcallset = 0;
  659                                 soupcall_clear(xprt->xp_socket, SO_RCV);
  660                         }
  661                         SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  662                         xprt_inactive(xprt);
  663                         cd->strm_stat = XPRT_DIED;
  664                         sx_xunlock(&xprt->xp_lock);
  665                         return (FALSE);
  666                 }
  667 
  668                 if (!m) {
  669                         /*
  670                          * EOF - the other end has closed the socket.
  671                          */
  672                         xprt_inactive(xprt);
  673                         cd->strm_stat = XPRT_DIED;
  674                         sx_xunlock(&xprt->xp_lock);
  675                         return (FALSE);
  676                 }
  677 
  678                 if (cd->mpending)
  679                         m_last(cd->mpending)->m_next = m;
  680                 else
  681                         cd->mpending = m;
  682         }
  683 }
  684 
  685 static bool_t
  686 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
  687     struct sockaddr *addr, struct mbuf *m)
  688 {
  689         XDR xdrs;
  690         struct mbuf *mrep;
  691         bool_t stat = TRUE;
  692         int error;
  693 
  694         /*
  695          * Leave space for record mark.
  696          */
  697         MGETHDR(mrep, M_WAIT, MT_DATA);
  698         mrep->m_len = 0;
  699         mrep->m_data += sizeof(uint32_t);
  700 
  701         xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
  702 
  703         if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
  704             msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
  705                 if (!xdr_replymsg(&xdrs, msg))
  706                         stat = FALSE;
  707                 else
  708                         xdrmbuf_append(&xdrs, m);
  709         } else {
  710                 stat = xdr_replymsg(&xdrs, msg);
  711         }
  712 
  713         if (stat) {
  714                 m_fixhdr(mrep);
  715 
  716                 /*
  717                  * Prepend a record marker containing the reply length.
  718                  */
  719                 M_PREPEND(mrep, sizeof(uint32_t), M_WAIT);
  720                 *mtod(mrep, uint32_t *) =
  721                         htonl(0x80000000 | (mrep->m_pkthdr.len
  722                                 - sizeof(uint32_t)));
  723                 error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
  724                     0, curthread);
  725                 if (!error) {
  726                         stat = TRUE;
  727                 }
  728         } else {
  729                 m_freem(mrep);
  730         }
  731 
  732         XDR_DESTROY(&xdrs);
  733         xprt->xp_p2 = NULL;
  734 
  735         return (stat);
  736 }
  737 
  738 static bool_t
  739 svc_vc_null()
  740 {
  741 
  742         return (FALSE);
  743 }
  744 
  745 static int
  746 svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
  747 {
  748         SVCXPRT *xprt = (SVCXPRT *) arg;
  749 
  750         xprt_active(xprt);
  751         return (SU_OK);
  752 }
  753 
  754 #if 0
  755 /*
  756  * Get the effective UID of the sending process. Used by rpcbind, keyserv
  757  * and rpc.yppasswdd on AF_LOCAL.
  758  */
  759 int
  760 __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
  761         int sock, ret;
  762         gid_t egid;
  763         uid_t euid;
  764         struct sockaddr *sa;
  765 
  766         sock = transp->xp_fd;
  767         sa = (struct sockaddr *)transp->xp_rtaddr;
  768         if (sa->sa_family == AF_LOCAL) {
  769                 ret = getpeereid(sock, &euid, &egid);
  770                 if (ret == 0)
  771                         *uid = euid;
  772                 return (ret);
  773         } else
  774                 return (-1);
  775 }
  776 #endif

Cache object: f9db925571ad11ddb6a9468dfb364ca9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.