The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/rpc/svc_vc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $  */
    2 
    3 /*-
    4  * Copyright (c) 2009, Sun Microsystems, Inc.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without 
    8  * modification, are permitted provided that the following conditions are met:
    9  * - Redistributions of source code must retain the above copyright notice, 
   10  *   this list of conditions and the following disclaimer.
   11  * - Redistributions in binary form must reproduce the above copyright notice, 
   12  *   this list of conditions and the following disclaimer in the documentation 
   13  *   and/or other materials provided with the distribution.
   14  * - Neither the name of Sun Microsystems, Inc. nor the names of its 
   15  *   contributors may be used to endorse or promote products derived 
   16  *   from this software without specific prior written permission.
   17  * 
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
   19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
   21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 
   22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
   23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
   24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
   25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
   26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
   27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
   28  * POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 
   31 #if defined(LIBC_SCCS) && !defined(lint)
   32 static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
   33 static char *sccsid = "@(#)svc_tcp.c    2.2 88/08/01 4.0 RPCSRC";
   34 #endif
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD$");
   37 
   38 /*
   39  * svc_vc.c, Server side for Connection Oriented based RPC. 
   40  *
   41  * Actually implements two flavors of transporter -
   42  * a tcp rendezvouser (a listner and connection establisher)
   43  * and a record/tcp stream.
   44  */
   45 
   46 #include <sys/param.h>
   47 #include <sys/limits.h>
   48 #include <sys/lock.h>
   49 #include <sys/kernel.h>
   50 #include <sys/malloc.h>
   51 #include <sys/mbuf.h>
   52 #include <sys/mutex.h>
   53 #include <sys/proc.h>
   54 #include <sys/protosw.h>
   55 #include <sys/queue.h>
   56 #include <sys/socket.h>
   57 #include <sys/socketvar.h>
   58 #include <sys/sx.h>
   59 #include <sys/systm.h>
   60 #include <sys/uio.h>
   61 
   62 #include <net/vnet.h>
   63 
   64 #include <netinet/tcp.h>
   65 
   66 #include <rpc/rpc.h>
   67 
   68 #include <rpc/krpc.h>
   69 #include <rpc/rpc_com.h>
   70 
   71 #include <security/mac/mac_framework.h>
   72 
   73 static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *,
   74     struct sockaddr **, struct mbuf **);
   75 static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
   76 static void svc_vc_rendezvous_destroy(SVCXPRT *);
   77 static bool_t svc_vc_null(void);
   78 static void svc_vc_destroy(SVCXPRT *);
   79 static enum xprt_stat svc_vc_stat(SVCXPRT *);
   80 static bool_t svc_vc_ack(SVCXPRT *, uint32_t *);
   81 static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *,
   82     struct sockaddr **, struct mbuf **);
   83 static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *,
   84     struct sockaddr *, struct mbuf *, uint32_t *seq);
   85 static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
   86 static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
   87     void *in);
   88 static void svc_vc_backchannel_destroy(SVCXPRT *);
   89 static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *);
   90 static bool_t svc_vc_backchannel_recv(SVCXPRT *, struct rpc_msg *,
   91     struct sockaddr **, struct mbuf **);
   92 static bool_t svc_vc_backchannel_reply(SVCXPRT *, struct rpc_msg *,
   93     struct sockaddr *, struct mbuf *, uint32_t *);
   94 static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq,
   95     void *in);
   96 static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
   97     struct sockaddr *raddr);
   98 static int svc_vc_accept(struct socket *head, struct socket **sop);
   99 static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
  100 
  101 static struct xp_ops svc_vc_rendezvous_ops = {
  102         .xp_recv =      svc_vc_rendezvous_recv,
  103         .xp_stat =      svc_vc_rendezvous_stat,
  104         .xp_reply =     (bool_t (*)(SVCXPRT *, struct rpc_msg *,
  105                 struct sockaddr *, struct mbuf *, uint32_t *))svc_vc_null,
  106         .xp_destroy =   svc_vc_rendezvous_destroy,
  107         .xp_control =   svc_vc_rendezvous_control
  108 };
  109 
  110 static struct xp_ops svc_vc_ops = {
  111         .xp_recv =      svc_vc_recv,
  112         .xp_stat =      svc_vc_stat,
  113         .xp_ack =       svc_vc_ack,
  114         .xp_reply =     svc_vc_reply,
  115         .xp_destroy =   svc_vc_destroy,
  116         .xp_control =   svc_vc_control
  117 };
  118 
  119 static struct xp_ops svc_vc_backchannel_ops = {
  120         .xp_recv =      svc_vc_backchannel_recv,
  121         .xp_stat =      svc_vc_backchannel_stat,
  122         .xp_reply =     svc_vc_backchannel_reply,
  123         .xp_destroy =   svc_vc_backchannel_destroy,
  124         .xp_control =   svc_vc_backchannel_control
  125 };
  126 
  127 /*
  128  * Usage:
  129  *      xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
  130  *
  131  * Creates, registers, and returns a (rpc) tcp based transporter.
  132  * Once *xprt is initialized, it is registered as a transporter
  133  * see (svc.h, xprt_register).  This routine returns
  134  * a NULL if a problem occurred.
  135  *
  136  * The filedescriptor passed in is expected to refer to a bound, but
  137  * not yet connected socket.
  138  *
  139  * Since streams do buffered io similar to stdio, the caller can specify
  140  * how big the send and receive buffers are via the second and third parms;
  141  * 0 => use the system default.
  142  */
  143 SVCXPRT *
  144 svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
  145     size_t recvsize)
  146 {
  147         SVCXPRT *xprt;
  148         struct sockaddr* sa;
  149         int error;
  150 
  151         SOCK_LOCK(so);
  152         if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) {
  153                 SOCK_UNLOCK(so);
  154                 error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
  155                 if (error)
  156                         return (NULL);
  157                 xprt = svc_vc_create_conn(pool, so, sa);
  158                 free(sa, M_SONAME);
  159                 return (xprt);
  160         }
  161         SOCK_UNLOCK(so);
  162 
  163         xprt = svc_xprt_alloc();
  164         sx_init(&xprt->xp_lock, "xprt->xp_lock");
  165         xprt->xp_pool = pool;
  166         xprt->xp_socket = so;
  167         xprt->xp_p1 = NULL;
  168         xprt->xp_p2 = NULL;
  169         xprt->xp_ops = &svc_vc_rendezvous_ops;
  170 
  171         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  172         if (error) {
  173                 goto cleanup_svc_vc_create;
  174         }
  175 
  176         memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
  177         free(sa, M_SONAME);
  178 
  179         xprt_register(xprt);
  180 
  181         solisten(so, -1, curthread);
  182 
  183         SOCKBUF_LOCK(&so->so_rcv);
  184         xprt->xp_upcallset = 1;
  185         soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
  186         SOCKBUF_UNLOCK(&so->so_rcv);
  187 
  188         return (xprt);
  189 
  190 cleanup_svc_vc_create:
  191         sx_destroy(&xprt->xp_lock);
  192         svc_xprt_free(xprt);
  193 
  194         return (NULL);
  195 }
  196 
  197 /*
  198  * Create a new transport for a socket optained via soaccept().
  199  */
  200 SVCXPRT *
  201 svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
  202 {
  203         SVCXPRT *xprt;
  204         struct cf_conn *cd;
  205         struct sockaddr* sa = NULL;
  206         struct sockopt opt;
  207         int one = 1;
  208         int error;
  209 
  210         bzero(&opt, sizeof(struct sockopt));
  211         opt.sopt_dir = SOPT_SET;
  212         opt.sopt_level = SOL_SOCKET;
  213         opt.sopt_name = SO_KEEPALIVE;
  214         opt.sopt_val = &one;
  215         opt.sopt_valsize = sizeof(one);
  216         error = sosetopt(so, &opt);
  217         if (error) {
  218                 return (NULL);
  219         }
  220 
  221         if (so->so_proto->pr_protocol == IPPROTO_TCP) {
  222                 bzero(&opt, sizeof(struct sockopt));
  223                 opt.sopt_dir = SOPT_SET;
  224                 opt.sopt_level = IPPROTO_TCP;
  225                 opt.sopt_name = TCP_NODELAY;
  226                 opt.sopt_val = &one;
  227                 opt.sopt_valsize = sizeof(one);
  228                 error = sosetopt(so, &opt);
  229                 if (error) {
  230                         return (NULL);
  231                 }
  232         }
  233 
  234         cd = mem_alloc(sizeof(*cd));
  235         cd->strm_stat = XPRT_IDLE;
  236 
  237         xprt = svc_xprt_alloc();
  238         sx_init(&xprt->xp_lock, "xprt->xp_lock");
  239         xprt->xp_pool = pool;
  240         xprt->xp_socket = so;
  241         xprt->xp_p1 = cd;
  242         xprt->xp_p2 = NULL;
  243         xprt->xp_ops = &svc_vc_ops;
  244 
  245         /*
  246          * See http://www.connectathon.org/talks96/nfstcp.pdf - client
  247          * has a 5 minute timer, server has a 6 minute timer.
  248          */
  249         xprt->xp_idletimeout = 6 * 60;
  250 
  251         memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);
  252 
  253         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  254         if (error)
  255                 goto cleanup_svc_vc_create;
  256 
  257         memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
  258         free(sa, M_SONAME);
  259 
  260         xprt_register(xprt);
  261 
  262         SOCKBUF_LOCK(&so->so_rcv);
  263         xprt->xp_upcallset = 1;
  264         soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
  265         SOCKBUF_UNLOCK(&so->so_rcv);
  266 
  267         /*
  268          * Throw the transport into the active list in case it already
  269          * has some data buffered.
  270          */
  271         sx_xlock(&xprt->xp_lock);
  272         xprt_active(xprt);
  273         sx_xunlock(&xprt->xp_lock);
  274 
  275         return (xprt);
  276 cleanup_svc_vc_create:
  277         sx_destroy(&xprt->xp_lock);
  278         svc_xprt_free(xprt);
  279         mem_free(cd, sizeof(*cd));
  280 
  281         return (NULL);
  282 }
  283 
  284 /*
  285  * Create a new transport for a backchannel on a clnt_vc socket.
  286  */
  287 SVCXPRT *
  288 svc_vc_create_backchannel(SVCPOOL *pool)
  289 {
  290         SVCXPRT *xprt = NULL;
  291         struct cf_conn *cd = NULL;
  292 
  293         cd = mem_alloc(sizeof(*cd));
  294         cd->strm_stat = XPRT_IDLE;
  295 
  296         xprt = svc_xprt_alloc();
  297         sx_init(&xprt->xp_lock, "xprt->xp_lock");
  298         xprt->xp_pool = pool;
  299         xprt->xp_socket = NULL;
  300         xprt->xp_p1 = cd;
  301         xprt->xp_p2 = NULL;
  302         xprt->xp_ops = &svc_vc_backchannel_ops;
  303         return (xprt);
  304 }
  305 
  306 /*
  307  * This does all of the accept except the final call to soaccept. The
  308  * caller will call soaccept after dropping its locks (soaccept may
  309  * call malloc).
  310  */
  311 int
  312 svc_vc_accept(struct socket *head, struct socket **sop)
  313 {
  314         int error = 0;
  315         struct socket *so;
  316 
  317         if ((head->so_options & SO_ACCEPTCONN) == 0) {
  318                 error = EINVAL;
  319                 goto done;
  320         }
  321 #ifdef MAC
  322         error = mac_socket_check_accept(curthread->td_ucred, head);
  323         if (error != 0)
  324                 goto done;
  325 #endif
  326         ACCEPT_LOCK();
  327         if (TAILQ_EMPTY(&head->so_comp)) {
  328                 ACCEPT_UNLOCK();
  329                 error = EWOULDBLOCK;
  330                 goto done;
  331         }
  332         so = TAILQ_FIRST(&head->so_comp);
  333         KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP"));
  334         KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP"));
  335 
  336         /*
  337          * Before changing the flags on the socket, we have to bump the
  338          * reference count.  Otherwise, if the protocol calls sofree(),
  339          * the socket will be released due to a zero refcount.
  340          * XXX might not need soref() since this is simpler than kern_accept.
  341          */
  342         SOCK_LOCK(so);                  /* soref() and so_state update */
  343         soref(so);                      /* file descriptor reference */
  344 
  345         TAILQ_REMOVE(&head->so_comp, so, so_list);
  346         head->so_qlen--;
  347         so->so_state |= (head->so_state & SS_NBIO);
  348         so->so_qstate &= ~SQ_COMP;
  349         so->so_head = NULL;
  350 
  351         SOCK_UNLOCK(so);
  352         ACCEPT_UNLOCK();
  353 
  354         *sop = so;
  355 
  356         /* connection has been removed from the listen queue */
  357         KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
  358 done:
  359         return (error);
  360 }
  361 
  362 /*ARGSUSED*/
  363 static bool_t
  364 svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
  365     struct sockaddr **addrp, struct mbuf **mp)
  366 {
  367         struct socket *so = NULL;
  368         struct sockaddr *sa = NULL;
  369         int error;
  370         SVCXPRT *new_xprt;
  371 
  372         /*
  373          * The socket upcall calls xprt_active() which will eventually
  374          * cause the server to call us here. We attempt to accept a
  375          * connection from the socket and turn it into a new
  376          * transport. If the accept fails, we have drained all pending
  377          * connections so we call xprt_inactive().
  378          */
  379         sx_xlock(&xprt->xp_lock);
  380 
  381         error = svc_vc_accept(xprt->xp_socket, &so);
  382 
  383         if (error == EWOULDBLOCK) {
  384                 /*
  385                  * We must re-test for new connections after taking
  386                  * the lock to protect us in the case where a new
  387                  * connection arrives after our call to accept fails
  388                  * with EWOULDBLOCK.
  389                  */
  390                 ACCEPT_LOCK();
  391                 if (TAILQ_EMPTY(&xprt->xp_socket->so_comp))
  392                         xprt_inactive_self(xprt);
  393                 ACCEPT_UNLOCK();
  394                 sx_xunlock(&xprt->xp_lock);
  395                 return (FALSE);
  396         }
  397 
  398         if (error) {
  399                 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  400                 if (xprt->xp_upcallset) {
  401                         xprt->xp_upcallset = 0;
  402                         soupcall_clear(xprt->xp_socket, SO_RCV);
  403                 }
  404                 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  405                 xprt_inactive_self(xprt);
  406                 sx_xunlock(&xprt->xp_lock);
  407                 return (FALSE);
  408         }
  409 
  410         sx_xunlock(&xprt->xp_lock);
  411 
  412         sa = 0;
  413         error = soaccept(so, &sa);
  414 
  415         if (error) {
  416                 /*
  417                  * XXX not sure if I need to call sofree or soclose here.
  418                  */
  419                 if (sa)
  420                         free(sa, M_SONAME);
  421                 return (FALSE);
  422         }
  423 
  424         /*
  425          * svc_vc_create_conn will call xprt_register - we don't need
  426          * to do anything with the new connection except derefence it.
  427          */
  428         new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa);
  429         if (!new_xprt) {
  430                 soclose(so);
  431         } else {
  432                 SVC_RELEASE(new_xprt);
  433         }
  434 
  435         free(sa, M_SONAME);
  436 
  437         return (FALSE); /* there is never an rpc msg to be processed */
  438 }
  439 
  440 /*ARGSUSED*/
  441 static enum xprt_stat
  442 svc_vc_rendezvous_stat(SVCXPRT *xprt)
  443 {
  444 
  445         return (XPRT_IDLE);
  446 }
  447 
  448 static void
  449 svc_vc_destroy_common(SVCXPRT *xprt)
  450 {
  451         SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  452         if (xprt->xp_upcallset) {
  453                 xprt->xp_upcallset = 0;
  454                 soupcall_clear(xprt->xp_socket, SO_RCV);
  455         }
  456         SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  457 
  458         if (xprt->xp_socket)
  459                 (void)soclose(xprt->xp_socket);
  460 
  461         if (xprt->xp_netid)
  462                 (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
  463         svc_xprt_free(xprt);
  464 }
  465 
  466 static void
  467 svc_vc_rendezvous_destroy(SVCXPRT *xprt)
  468 {
  469 
  470         svc_vc_destroy_common(xprt);
  471 }
  472 
  473 static void
  474 svc_vc_destroy(SVCXPRT *xprt)
  475 {
  476         struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
  477 
  478         svc_vc_destroy_common(xprt);
  479 
  480         if (cd->mreq)
  481                 m_freem(cd->mreq);
  482         if (cd->mpending)
  483                 m_freem(cd->mpending);
  484         mem_free(cd, sizeof(*cd));
  485 }
  486 
  487 static void
  488 svc_vc_backchannel_destroy(SVCXPRT *xprt)
  489 {
  490         struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
  491         struct mbuf *m, *m2;
  492 
  493         svc_xprt_free(xprt);
  494         m = cd->mreq;
  495         while (m != NULL) {
  496                 m2 = m;
  497                 m = m->m_nextpkt;
  498                 m_freem(m2);
  499         }
  500         mem_free(cd, sizeof(*cd));
  501 }
  502 
  503 /*ARGSUSED*/
  504 static bool_t
  505 svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
  506 {
  507         return (FALSE);
  508 }
  509 
  510 static bool_t
  511 svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
  512 {
  513 
  514         return (FALSE);
  515 }
  516 
  517 static bool_t
  518 svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in)
  519 {
  520 
  521         return (FALSE);
  522 }
  523 
  524 static enum xprt_stat
  525 svc_vc_stat(SVCXPRT *xprt)
  526 {
  527         struct cf_conn *cd;
  528 
  529         cd = (struct cf_conn *)(xprt->xp_p1);
  530 
  531         if (cd->strm_stat == XPRT_DIED)
  532                 return (XPRT_DIED);
  533 
  534         if (cd->mreq != NULL && cd->resid == 0 && cd->eor)
  535                 return (XPRT_MOREREQS);
  536 
  537         if (soreadable(xprt->xp_socket))
  538                 return (XPRT_MOREREQS);
  539 
  540         return (XPRT_IDLE);
  541 }
  542 
  543 static bool_t
  544 svc_vc_ack(SVCXPRT *xprt, uint32_t *ack)
  545 {
  546 
  547         *ack = atomic_load_acq_32(&xprt->xp_snt_cnt);
  548         *ack -= xprt->xp_socket->so_snd.sb_cc;
  549         return (TRUE);
  550 }
  551 
  552 static enum xprt_stat
  553 svc_vc_backchannel_stat(SVCXPRT *xprt)
  554 {
  555         struct cf_conn *cd;
  556 
  557         cd = (struct cf_conn *)(xprt->xp_p1);
  558 
  559         if (cd->mreq != NULL)
  560                 return (XPRT_MOREREQS);
  561 
  562         return (XPRT_IDLE);
  563 }
  564 
  565 /*
  566  * If we have an mbuf chain in cd->mpending, try to parse a record from it,
  567  * leaving the result in cd->mreq. If we don't have a complete record, leave
  568  * the partial result in cd->mreq and try to read more from the socket.
  569  */
  570 static int
  571 svc_vc_process_pending(SVCXPRT *xprt)
  572 {
  573         struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
  574         struct socket *so = xprt->xp_socket;
  575         struct mbuf *m;
  576 
  577         /*
  578          * If cd->resid is non-zero, we have part of the
  579          * record already, otherwise we are expecting a record
  580          * marker.
  581          */
  582         if (!cd->resid && cd->mpending) {
  583                 /*
  584                  * See if there is enough data buffered to
  585                  * make up a record marker. Make sure we can
  586                  * handle the case where the record marker is
  587                  * split across more than one mbuf.
  588                  */
  589                 size_t n = 0;
  590                 uint32_t header;
  591 
  592                 m = cd->mpending;
  593                 while (n < sizeof(uint32_t) && m) {
  594                         n += m->m_len;
  595                         m = m->m_next;
  596                 }
  597                 if (n < sizeof(uint32_t)) {
  598                         so->so_rcv.sb_lowat = sizeof(uint32_t) - n;
  599                         return (FALSE);
  600                 }
  601                 m_copydata(cd->mpending, 0, sizeof(header),
  602                     (char *)&header);
  603                 header = ntohl(header);
  604                 cd->eor = (header & 0x80000000) != 0;
  605                 cd->resid = header & 0x7fffffff;
  606                 m_adj(cd->mpending, sizeof(uint32_t));
  607         }
  608 
  609         /*
  610          * Start pulling off mbufs from cd->mpending
  611          * until we either have a complete record or
  612          * we run out of data. We use m_split to pull
  613          * data - it will pull as much as possible and
  614          * split the last mbuf if necessary.
  615          */
  616         while (cd->mpending && cd->resid) {
  617                 m = cd->mpending;
  618                 if (cd->mpending->m_next
  619                     || cd->mpending->m_len > cd->resid)
  620                         cd->mpending = m_split(cd->mpending,
  621                             cd->resid, M_WAITOK);
  622                 else
  623                         cd->mpending = NULL;
  624                 if (cd->mreq)
  625                         m_last(cd->mreq)->m_next = m;
  626                 else
  627                         cd->mreq = m;
  628                 while (m) {
  629                         cd->resid -= m->m_len;
  630                         m = m->m_next;
  631                 }
  632         }
  633 
  634         /*
  635          * Block receive upcalls if we have more data pending,
  636          * otherwise report our need.
  637          */
  638         if (cd->mpending)
  639                 so->so_rcv.sb_lowat = INT_MAX;
  640         else
  641                 so->so_rcv.sb_lowat =
  642                     imax(1, imin(cd->resid, so->so_rcv.sb_hiwat / 2));
  643         return (TRUE);
  644 }
  645 
  646 static bool_t
  647 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
  648     struct sockaddr **addrp, struct mbuf **mp)
  649 {
  650         struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
  651         struct uio uio;
  652         struct mbuf *m;
  653         struct socket* so = xprt->xp_socket;
  654         XDR xdrs;
  655         int error, rcvflag;
  656 
  657         /*
  658          * Serialise access to the socket and our own record parsing
  659          * state.
  660          */
  661         sx_xlock(&xprt->xp_lock);
  662 
  663         for (;;) {
  664                 /* If we have no request ready, check pending queue. */
  665                 while (cd->mpending &&
  666                     (cd->mreq == NULL || cd->resid != 0 || !cd->eor)) {
  667                         if (!svc_vc_process_pending(xprt))
  668                                 break;
  669                 }
  670 
  671                 /* Process and return complete request in cd->mreq. */
  672                 if (cd->mreq != NULL && cd->resid == 0 && cd->eor) {
  673 
  674                         xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE);
  675                         cd->mreq = NULL;
  676 
  677                         /* Check for next request in a pending queue. */
  678                         svc_vc_process_pending(xprt);
  679                         if (cd->mreq == NULL || cd->resid != 0) {
  680                                 SOCKBUF_LOCK(&so->so_rcv);
  681                                 if (!soreadable(so))
  682                                         xprt_inactive_self(xprt);
  683                                 SOCKBUF_UNLOCK(&so->so_rcv);
  684                         }
  685 
  686                         sx_xunlock(&xprt->xp_lock);
  687 
  688                         if (! xdr_callmsg(&xdrs, msg)) {
  689                                 XDR_DESTROY(&xdrs);
  690                                 return (FALSE);
  691                         }
  692 
  693                         *addrp = NULL;
  694                         *mp = xdrmbuf_getall(&xdrs);
  695                         XDR_DESTROY(&xdrs);
  696 
  697                         return (TRUE);
  698                 }
  699 
  700                 /*
  701                  * The socket upcall calls xprt_active() which will eventually
  702                  * cause the server to call us here. We attempt to
  703                  * read as much as possible from the socket and put
  704                  * the result in cd->mpending. If the read fails,
  705                  * we have drained both cd->mpending and the socket so
  706                  * we can call xprt_inactive().
  707                  */
  708                 uio.uio_resid = 1000000000;
  709                 uio.uio_td = curthread;
  710                 m = NULL;
  711                 rcvflag = MSG_DONTWAIT;
  712                 error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
  713 
  714                 if (error == EWOULDBLOCK) {
  715                         /*
  716                          * We must re-test for readability after
  717                          * taking the lock to protect us in the case
  718                          * where a new packet arrives on the socket
  719                          * after our call to soreceive fails with
  720                          * EWOULDBLOCK.
  721                          */
  722                         SOCKBUF_LOCK(&so->so_rcv);
  723                         if (!soreadable(so))
  724                                 xprt_inactive_self(xprt);
  725                         SOCKBUF_UNLOCK(&so->so_rcv);
  726                         sx_xunlock(&xprt->xp_lock);
  727                         return (FALSE);
  728                 }
  729 
  730                 if (error) {
  731                         SOCKBUF_LOCK(&so->so_rcv);
  732                         if (xprt->xp_upcallset) {
  733                                 xprt->xp_upcallset = 0;
  734                                 soupcall_clear(so, SO_RCV);
  735                         }
  736                         SOCKBUF_UNLOCK(&so->so_rcv);
  737                         xprt_inactive_self(xprt);
  738                         cd->strm_stat = XPRT_DIED;
  739                         sx_xunlock(&xprt->xp_lock);
  740                         return (FALSE);
  741                 }
  742 
  743                 if (!m) {
  744                         /*
  745                          * EOF - the other end has closed the socket.
  746                          */
  747                         xprt_inactive_self(xprt);
  748                         cd->strm_stat = XPRT_DIED;
  749                         sx_xunlock(&xprt->xp_lock);
  750                         return (FALSE);
  751                 }
  752 
  753                 if (cd->mpending)
  754                         m_last(cd->mpending)->m_next = m;
  755                 else
  756                         cd->mpending = m;
  757         }
  758 }
  759 
  760 static bool_t
  761 svc_vc_backchannel_recv(SVCXPRT *xprt, struct rpc_msg *msg,
  762     struct sockaddr **addrp, struct mbuf **mp)
  763 {
  764         struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
  765         struct ct_data *ct;
  766         struct mbuf *m;
  767         XDR xdrs;
  768 
  769         sx_xlock(&xprt->xp_lock);
  770         ct = (struct ct_data *)xprt->xp_p2;
  771         if (ct == NULL) {
  772                 sx_xunlock(&xprt->xp_lock);
  773                 return (FALSE);
  774         }
  775         mtx_lock(&ct->ct_lock);
  776         m = cd->mreq;
  777         if (m == NULL) {
  778                 xprt_inactive_self(xprt);
  779                 mtx_unlock(&ct->ct_lock);
  780                 sx_xunlock(&xprt->xp_lock);
  781                 return (FALSE);
  782         }
  783         cd->mreq = m->m_nextpkt;
  784         mtx_unlock(&ct->ct_lock);
  785         sx_xunlock(&xprt->xp_lock);
  786 
  787         xdrmbuf_create(&xdrs, m, XDR_DECODE);
  788         if (! xdr_callmsg(&xdrs, msg)) {
  789                 XDR_DESTROY(&xdrs);
  790                 return (FALSE);
  791         }
  792         *addrp = NULL;
  793         *mp = xdrmbuf_getall(&xdrs);
  794         XDR_DESTROY(&xdrs);
  795         return (TRUE);
  796 }
  797 
  798 static bool_t
  799 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
  800     struct sockaddr *addr, struct mbuf *m, uint32_t *seq)
  801 {
  802         XDR xdrs;
  803         struct mbuf *mrep;
  804         bool_t stat = TRUE;
  805         int error, len;
  806 
  807         /*
  808          * Leave space for record mark.
  809          */
  810         MGETHDR(mrep, M_WAIT, MT_DATA);
  811         mrep->m_len = 0;
  812         mrep->m_data += sizeof(uint32_t);
  813 
  814         xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
  815 
  816         if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
  817             msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
  818                 if (!xdr_replymsg(&xdrs, msg))
  819                         stat = FALSE;
  820                 else
  821                         xdrmbuf_append(&xdrs, m);
  822         } else {
  823                 stat = xdr_replymsg(&xdrs, msg);
  824         }
  825 
  826         if (stat) {
  827                 m_fixhdr(mrep);
  828 
  829                 /*
  830                  * Prepend a record marker containing the reply length.
  831                  */
  832                 M_PREPEND(mrep, sizeof(uint32_t), M_WAIT);
  833                 len = mrep->m_pkthdr.len;
  834                 *mtod(mrep, uint32_t *) =
  835                         htonl(0x80000000 | (len - sizeof(uint32_t)));
  836                 atomic_add_acq_32(&xprt->xp_snd_cnt, len);
  837                 error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
  838                     0, curthread);
  839                 if (!error) {
  840                         atomic_add_rel_32(&xprt->xp_snt_cnt, len);
  841                         if (seq)
  842                                 *seq = xprt->xp_snd_cnt;
  843                         stat = TRUE;
  844                 } else
  845                         atomic_subtract_32(&xprt->xp_snd_cnt, len);
  846         } else {
  847                 m_freem(mrep);
  848         }
  849 
  850         XDR_DESTROY(&xdrs);
  851         xprt->xp_p2 = NULL;
  852 
  853         return (stat);
  854 }
  855 
  856 static bool_t
  857 svc_vc_backchannel_reply(SVCXPRT *xprt, struct rpc_msg *msg,
  858     struct sockaddr *addr, struct mbuf *m, uint32_t *seq)
  859 {
  860         struct ct_data *ct;
  861         XDR xdrs;
  862         struct mbuf *mrep;
  863         bool_t stat = TRUE;
  864         int error;
  865 
  866         /*
  867          * Leave space for record mark.
  868          */
  869         MGETHDR(mrep, M_WAITOK, MT_DATA);
  870         mrep->m_len = 0;
  871         mrep->m_data += sizeof(uint32_t);
  872 
  873         xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
  874 
  875         if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
  876             msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
  877                 if (!xdr_replymsg(&xdrs, msg))
  878                         stat = FALSE;
  879                 else
  880                         xdrmbuf_append(&xdrs, m);
  881         } else {
  882                 stat = xdr_replymsg(&xdrs, msg);
  883         }
  884 
  885         if (stat) {
  886                 m_fixhdr(mrep);
  887 
  888                 /*
  889                  * Prepend a record marker containing the reply length.
  890                  */
  891                 M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK);
  892                 *mtod(mrep, uint32_t *) =
  893                         htonl(0x80000000 | (mrep->m_pkthdr.len
  894                                 - sizeof(uint32_t)));
  895                 sx_xlock(&xprt->xp_lock);
  896                 ct = (struct ct_data *)xprt->xp_p2;
  897                 if (ct != NULL)
  898                         error = sosend(ct->ct_socket, NULL, NULL, mrep, NULL,
  899                             0, curthread);
  900                 else
  901                         error = EPIPE;
  902                 sx_xunlock(&xprt->xp_lock);
  903                 if (!error) {
  904                         stat = TRUE;
  905                 }
  906         } else {
  907                 m_freem(mrep);
  908         }
  909 
  910         XDR_DESTROY(&xdrs);
  911 
  912         return (stat);
  913 }
  914 
  915 static bool_t
  916 svc_vc_null()
  917 {
  918 
  919         return (FALSE);
  920 }
  921 
  922 static int
  923 svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
  924 {
  925         SVCXPRT *xprt = (SVCXPRT *) arg;
  926 
  927         if (soreadable(xprt->xp_socket))
  928                 xprt_active(xprt);
  929         return (SU_OK);
  930 }
  931 
  932 #if 0
  933 /*
  934  * Get the effective UID of the sending process. Used by rpcbind, keyserv
  935  * and rpc.yppasswdd on AF_LOCAL.
  936  */
  937 int
  938 __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
  939         int sock, ret;
  940         gid_t egid;
  941         uid_t euid;
  942         struct sockaddr *sa;
  943 
  944         sock = transp->xp_fd;
  945         sa = (struct sockaddr *)transp->xp_rtaddr;
  946         if (sa->sa_family == AF_LOCAL) {
  947                 ret = getpeereid(sock, &euid, &egid);
  948                 if (ret == 0)
  949                         *uid = euid;
  950                 return (ret);
  951         } else
  952                 return (-1);
  953 }
  954 #endif

Cache object: 4af03e95f02610b3a79d73dd20d7e9bd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.