The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/rpc/svc_vc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $  */
    2 
    3 /*-
    4  * SPDX-License-Identifier: BSD-3-Clause
    5  *
    6  * Copyright (c) 2009, Sun Microsystems, Inc.
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without 
   10  * modification, are permitted provided that the following conditions are met:
   11  * - Redistributions of source code must retain the above copyright notice, 
   12  *   this list of conditions and the following disclaimer.
   13  * - Redistributions in binary form must reproduce the above copyright notice, 
   14  *   this list of conditions and the following disclaimer in the documentation 
   15  *   and/or other materials provided with the distribution.
   16  * - Neither the name of Sun Microsystems, Inc. nor the names of its 
   17  *   contributors may be used to endorse or promote products derived 
   18  *   from this software without specific prior written permission.
   19  * 
   20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
   21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
   23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 
   24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
   30  * POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #if defined(LIBC_SCCS) && !defined(lint)
   34 static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
   35 static char *sccsid = "@(#)svc_tcp.c    2.2 88/08/01 4.0 RPCSRC";
   36 #endif
   37 #include <sys/cdefs.h>
   38 __FBSDID("$FreeBSD$");
   39 
   40 /*
   41  * svc_vc.c, Server side for Connection Oriented based RPC. 
   42  *
   43  * Actually implements two flavors of transporter -
   44  * a tcp rendezvouser (a listner and connection establisher)
   45  * and a record/tcp stream.
   46  */
   47 
   48 #include <sys/param.h>
   49 #include <sys/limits.h>
   50 #include <sys/lock.h>
   51 #include <sys/kernel.h>
   52 #include <sys/malloc.h>
   53 #include <sys/mbuf.h>
   54 #include <sys/mutex.h>
   55 #include <sys/proc.h>
   56 #include <sys/protosw.h>
   57 #include <sys/queue.h>
   58 #include <sys/socket.h>
   59 #include <sys/socketvar.h>
   60 #include <sys/sx.h>
   61 #include <sys/systm.h>
   62 #include <sys/uio.h>
   63 
   64 #include <net/vnet.h>
   65 
   66 #include <netinet/tcp.h>
   67 
   68 #include <rpc/rpc.h>
   69 
   70 #include <rpc/krpc.h>
   71 #include <rpc/rpc_com.h>
   72 
   73 #include <security/mac/mac_framework.h>
   74 
   75 static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *,
   76     struct sockaddr **, struct mbuf **);
   77 static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
   78 static void svc_vc_rendezvous_destroy(SVCXPRT *);
   79 static bool_t svc_vc_null(void);
   80 static void svc_vc_destroy(SVCXPRT *);
   81 static enum xprt_stat svc_vc_stat(SVCXPRT *);
   82 static bool_t svc_vc_ack(SVCXPRT *, uint32_t *);
   83 static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *,
   84     struct sockaddr **, struct mbuf **);
   85 static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *,
   86     struct sockaddr *, struct mbuf *, uint32_t *seq);
   87 static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
   88 static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
   89     void *in);
   90 static void svc_vc_backchannel_destroy(SVCXPRT *);
   91 static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *);
   92 static bool_t svc_vc_backchannel_recv(SVCXPRT *, struct rpc_msg *,
   93     struct sockaddr **, struct mbuf **);
   94 static bool_t svc_vc_backchannel_reply(SVCXPRT *, struct rpc_msg *,
   95     struct sockaddr *, struct mbuf *, uint32_t *);
   96 static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq,
   97     void *in);
   98 static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
   99     struct sockaddr *raddr);
  100 static int svc_vc_accept(struct socket *head, struct socket **sop);
  101 static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
  102 static int svc_vc_rendezvous_soupcall(struct socket *, void *, int);
  103 
  104 static struct xp_ops svc_vc_rendezvous_ops = {
  105         .xp_recv =      svc_vc_rendezvous_recv,
  106         .xp_stat =      svc_vc_rendezvous_stat,
  107         .xp_reply =     (bool_t (*)(SVCXPRT *, struct rpc_msg *,
  108                 struct sockaddr *, struct mbuf *, uint32_t *))svc_vc_null,
  109         .xp_destroy =   svc_vc_rendezvous_destroy,
  110         .xp_control =   svc_vc_rendezvous_control
  111 };
  112 
  113 static struct xp_ops svc_vc_ops = {
  114         .xp_recv =      svc_vc_recv,
  115         .xp_stat =      svc_vc_stat,
  116         .xp_ack =       svc_vc_ack,
  117         .xp_reply =     svc_vc_reply,
  118         .xp_destroy =   svc_vc_destroy,
  119         .xp_control =   svc_vc_control
  120 };
  121 
  122 static struct xp_ops svc_vc_backchannel_ops = {
  123         .xp_recv =      svc_vc_backchannel_recv,
  124         .xp_stat =      svc_vc_backchannel_stat,
  125         .xp_reply =     svc_vc_backchannel_reply,
  126         .xp_destroy =   svc_vc_backchannel_destroy,
  127         .xp_control =   svc_vc_backchannel_control
  128 };
  129 
  130 /*
  131  * Usage:
  132  *      xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
  133  *
  134  * Creates, registers, and returns a (rpc) tcp based transporter.
  135  * Once *xprt is initialized, it is registered as a transporter
  136  * see (svc.h, xprt_register).  This routine returns
  137  * a NULL if a problem occurred.
  138  *
  139  * The filedescriptor passed in is expected to refer to a bound, but
  140  * not yet connected socket.
  141  *
  142  * Since streams do buffered io similar to stdio, the caller can specify
  143  * how big the send and receive buffers are via the second and third parms;
  144  * 0 => use the system default.
  145  */
  146 SVCXPRT *
  147 svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
  148     size_t recvsize)
  149 {
  150         SVCXPRT *xprt;
  151         struct sockaddr* sa;
  152         int error;
  153 
  154         SOCK_LOCK(so);
  155         if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) {
  156                 SOCK_UNLOCK(so);
  157                 CURVNET_SET(so->so_vnet);
  158                 error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
  159                 CURVNET_RESTORE();
  160                 if (error)
  161                         return (NULL);
  162                 xprt = svc_vc_create_conn(pool, so, sa);
  163                 free(sa, M_SONAME);
  164                 return (xprt);
  165         }
  166         SOCK_UNLOCK(so);
  167 
  168         xprt = svc_xprt_alloc();
  169         sx_init(&xprt->xp_lock, "xprt->xp_lock");
  170         xprt->xp_pool = pool;
  171         xprt->xp_socket = so;
  172         xprt->xp_p1 = NULL;
  173         xprt->xp_p2 = NULL;
  174         xprt->xp_ops = &svc_vc_rendezvous_ops;
  175 
  176         CURVNET_SET(so->so_vnet);
  177         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  178         CURVNET_RESTORE();
  179         if (error) {
  180                 goto cleanup_svc_vc_create;
  181         }
  182 
  183         memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
  184         free(sa, M_SONAME);
  185 
  186         xprt_register(xprt);
  187 
  188         solisten(so, -1, curthread);
  189 
  190         SOLISTEN_LOCK(so);
  191         xprt->xp_upcallset = 1;
  192         solisten_upcall_set(so, svc_vc_rendezvous_soupcall, xprt);
  193         SOLISTEN_UNLOCK(so);
  194 
  195         return (xprt);
  196 
  197 cleanup_svc_vc_create:
  198         sx_destroy(&xprt->xp_lock);
  199         svc_xprt_free(xprt);
  200 
  201         return (NULL);
  202 }
  203 
  204 /*
  205  * Create a new transport for a socket optained via soaccept().
  206  */
  207 SVCXPRT *
  208 svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
  209 {
  210         SVCXPRT *xprt;
  211         struct cf_conn *cd;
  212         struct sockaddr* sa = NULL;
  213         struct sockopt opt;
  214         int one = 1;
  215         int error;
  216 
  217         bzero(&opt, sizeof(struct sockopt));
  218         opt.sopt_dir = SOPT_SET;
  219         opt.sopt_level = SOL_SOCKET;
  220         opt.sopt_name = SO_KEEPALIVE;
  221         opt.sopt_val = &one;
  222         opt.sopt_valsize = sizeof(one);
  223         error = sosetopt(so, &opt);
  224         if (error) {
  225                 return (NULL);
  226         }
  227 
  228         if (so->so_proto->pr_protocol == IPPROTO_TCP) {
  229                 bzero(&opt, sizeof(struct sockopt));
  230                 opt.sopt_dir = SOPT_SET;
  231                 opt.sopt_level = IPPROTO_TCP;
  232                 opt.sopt_name = TCP_NODELAY;
  233                 opt.sopt_val = &one;
  234                 opt.sopt_valsize = sizeof(one);
  235                 error = sosetopt(so, &opt);
  236                 if (error) {
  237                         return (NULL);
  238                 }
  239         }
  240 
  241         cd = mem_alloc(sizeof(*cd));
  242         cd->strm_stat = XPRT_IDLE;
  243 
  244         xprt = svc_xprt_alloc();
  245         sx_init(&xprt->xp_lock, "xprt->xp_lock");
  246         xprt->xp_pool = pool;
  247         xprt->xp_socket = so;
  248         xprt->xp_p1 = cd;
  249         xprt->xp_p2 = NULL;
  250         xprt->xp_ops = &svc_vc_ops;
  251 
  252         /*
  253          * See http://www.connectathon.org/talks96/nfstcp.pdf - client
  254          * has a 5 minute timer, server has a 6 minute timer.
  255          */
  256         xprt->xp_idletimeout = 6 * 60;
  257 
  258         memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);
  259 
  260         CURVNET_SET(so->so_vnet);
  261         error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
  262         CURVNET_RESTORE();
  263         if (error)
  264                 goto cleanup_svc_vc_create;
  265 
  266         memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
  267         free(sa, M_SONAME);
  268 
  269         xprt_register(xprt);
  270 
  271         SOCKBUF_LOCK(&so->so_rcv);
  272         xprt->xp_upcallset = 1;
  273         soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
  274         SOCKBUF_UNLOCK(&so->so_rcv);
  275 
  276         /*
  277          * Throw the transport into the active list in case it already
  278          * has some data buffered.
  279          */
  280         sx_xlock(&xprt->xp_lock);
  281         xprt_active(xprt);
  282         sx_xunlock(&xprt->xp_lock);
  283 
  284         return (xprt);
  285 cleanup_svc_vc_create:
  286         sx_destroy(&xprt->xp_lock);
  287         svc_xprt_free(xprt);
  288         mem_free(cd, sizeof(*cd));
  289 
  290         return (NULL);
  291 }
  292 
  293 /*
  294  * Create a new transport for a backchannel on a clnt_vc socket.
  295  */
  296 SVCXPRT *
  297 svc_vc_create_backchannel(SVCPOOL *pool)
  298 {
  299         SVCXPRT *xprt = NULL;
  300         struct cf_conn *cd = NULL;
  301 
  302         cd = mem_alloc(sizeof(*cd));
  303         cd->strm_stat = XPRT_IDLE;
  304 
  305         xprt = svc_xprt_alloc();
  306         sx_init(&xprt->xp_lock, "xprt->xp_lock");
  307         xprt->xp_pool = pool;
  308         xprt->xp_socket = NULL;
  309         xprt->xp_p1 = cd;
  310         xprt->xp_p2 = NULL;
  311         xprt->xp_ops = &svc_vc_backchannel_ops;
  312         return (xprt);
  313 }
  314 
  315 /*
  316  * This does all of the accept except the final call to soaccept. The
  317  * caller will call soaccept after dropping its locks (soaccept may
  318  * call malloc).
  319  */
  320 int
  321 svc_vc_accept(struct socket *head, struct socket **sop)
  322 {
  323         struct socket *so;
  324         int error = 0;
  325         short nbio;
  326 
  327         /* XXXGL: shouldn't that be an assertion? */
  328         if ((head->so_options & SO_ACCEPTCONN) == 0) {
  329                 error = EINVAL;
  330                 goto done;
  331         }
  332 #ifdef MAC
  333         error = mac_socket_check_accept(curthread->td_ucred, head);
  334         if (error != 0)
  335                 goto done;
  336 #endif
  337         /*
  338          * XXXGL: we want non-blocking semantics.  The socket could be a
  339          * socket created by kernel as well as socket shared with userland,
  340          * so we can't be sure about presense of SS_NBIO.  We also shall not
  341          * toggle it on the socket, since that may surprise userland.  So we
  342          * set SS_NBIO only temporarily.
  343          */
  344         SOLISTEN_LOCK(head);
  345         nbio = head->so_state & SS_NBIO;
  346         head->so_state |= SS_NBIO;
  347         error = solisten_dequeue(head, &so, 0);
  348         head->so_state &= (nbio & ~SS_NBIO);
  349         if (error)
  350                 goto done;
  351 
  352         so->so_state |= nbio;
  353         *sop = so;
  354 
  355         /* connection has been removed from the listen queue */
  356         KNOTE_UNLOCKED(&head->so_rdsel.si_note, 0);
  357 done:
  358         return (error);
  359 }
  360 
  361 /*ARGSUSED*/
  362 static bool_t
  363 svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
  364     struct sockaddr **addrp, struct mbuf **mp)
  365 {
  366         struct socket *so = NULL;
  367         struct sockaddr *sa = NULL;
  368         int error;
  369         SVCXPRT *new_xprt;
  370 
  371         /*
  372          * The socket upcall calls xprt_active() which will eventually
  373          * cause the server to call us here. We attempt to accept a
  374          * connection from the socket and turn it into a new
  375          * transport. If the accept fails, we have drained all pending
  376          * connections so we call xprt_inactive().
  377          */
  378         sx_xlock(&xprt->xp_lock);
  379 
  380         error = svc_vc_accept(xprt->xp_socket, &so);
  381 
  382         if (error == EWOULDBLOCK) {
  383                 /*
  384                  * We must re-test for new connections after taking
  385                  * the lock to protect us in the case where a new
  386                  * connection arrives after our call to accept fails
  387                  * with EWOULDBLOCK.
  388                  */
  389                 SOLISTEN_LOCK(xprt->xp_socket);
  390                 if (TAILQ_EMPTY(&xprt->xp_socket->sol_comp))
  391                         xprt_inactive_self(xprt);
  392                 SOLISTEN_UNLOCK(xprt->xp_socket);
  393                 sx_xunlock(&xprt->xp_lock);
  394                 return (FALSE);
  395         }
  396 
  397         if (error) {
  398                 SOLISTEN_LOCK(xprt->xp_socket);
  399                 if (xprt->xp_upcallset) {
  400                         xprt->xp_upcallset = 0;
  401                         soupcall_clear(xprt->xp_socket, SO_RCV);
  402                 }
  403                 SOLISTEN_UNLOCK(xprt->xp_socket);
  404                 xprt_inactive_self(xprt);
  405                 sx_xunlock(&xprt->xp_lock);
  406                 return (FALSE);
  407         }
  408 
  409         sx_xunlock(&xprt->xp_lock);
  410 
  411         sa = NULL;
  412         error = soaccept(so, &sa);
  413 
  414         if (error) {
  415                 /*
  416                  * XXX not sure if I need to call sofree or soclose here.
  417                  */
  418                 if (sa)
  419                         free(sa, M_SONAME);
  420                 return (FALSE);
  421         }
  422 
  423         /*
  424          * svc_vc_create_conn will call xprt_register - we don't need
  425          * to do anything with the new connection except derefence it.
  426          */
  427         new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa);
  428         if (!new_xprt) {
  429                 soclose(so);
  430         } else {
  431                 SVC_RELEASE(new_xprt);
  432         }
  433 
  434         free(sa, M_SONAME);
  435 
  436         return (FALSE); /* there is never an rpc msg to be processed */
  437 }
  438 
  439 /*ARGSUSED*/
  440 static enum xprt_stat
  441 svc_vc_rendezvous_stat(SVCXPRT *xprt)
  442 {
  443 
  444         return (XPRT_IDLE);
  445 }
  446 
  447 static void
  448 svc_vc_destroy_common(SVCXPRT *xprt)
  449 {
  450 
  451         if (xprt->xp_socket)
  452                 (void)soclose(xprt->xp_socket);
  453 
  454         if (xprt->xp_netid)
  455                 (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
  456         svc_xprt_free(xprt);
  457 }
  458 
  459 static void
  460 svc_vc_rendezvous_destroy(SVCXPRT *xprt)
  461 {
  462 
  463         SOLISTEN_LOCK(xprt->xp_socket);
  464         if (xprt->xp_upcallset) {
  465                 xprt->xp_upcallset = 0;
  466                 solisten_upcall_set(xprt->xp_socket, NULL, NULL);
  467         }
  468         SOLISTEN_UNLOCK(xprt->xp_socket);
  469 
  470         svc_vc_destroy_common(xprt);
  471 }
  472 
  473 static void
  474 svc_vc_destroy(SVCXPRT *xprt)
  475 {
  476         struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
  477         CLIENT *cl = (CLIENT *)xprt->xp_p2;
  478 
  479         SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
  480         if (xprt->xp_upcallset) {
  481                 xprt->xp_upcallset = 0;
  482                 soupcall_clear(xprt->xp_socket, SO_RCV);
  483         }
  484         SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
  485 
  486         if (cl != NULL)
  487                 CLNT_RELEASE(cl);
  488 
  489         svc_vc_destroy_common(xprt);
  490 
  491         if (cd->mreq)
  492                 m_freem(cd->mreq);
  493         if (cd->mpending)
  494                 m_freem(cd->mpending);
  495         mem_free(cd, sizeof(*cd));
  496 }
  497 
  498 static void
  499 svc_vc_backchannel_destroy(SVCXPRT *xprt)
  500 {
  501         struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
  502         struct mbuf *m, *m2;
  503 
  504         svc_xprt_free(xprt);
  505         m = cd->mreq;
  506         while (m != NULL) {
  507                 m2 = m;
  508                 m = m->m_nextpkt;
  509                 m_freem(m2);
  510         }
  511         mem_free(cd, sizeof(*cd));
  512 }
  513 
  514 /*ARGSUSED*/
  515 static bool_t
  516 svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
  517 {
  518         return (FALSE);
  519 }
  520 
  521 static bool_t
  522 svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
  523 {
  524 
  525         return (FALSE);
  526 }
  527 
  528 static bool_t
  529 svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in)
  530 {
  531 
  532         return (FALSE);
  533 }
  534 
  535 static enum xprt_stat
  536 svc_vc_stat(SVCXPRT *xprt)
  537 {
  538         struct cf_conn *cd;
  539 
  540         cd = (struct cf_conn *)(xprt->xp_p1);
  541 
  542         if (cd->strm_stat == XPRT_DIED)
  543                 return (XPRT_DIED);
  544 
  545         if (cd->mreq != NULL && cd->resid == 0 && cd->eor)
  546                 return (XPRT_MOREREQS);
  547 
  548         if (soreadable(xprt->xp_socket))
  549                 return (XPRT_MOREREQS);
  550 
  551         return (XPRT_IDLE);
  552 }
  553 
  554 static bool_t
  555 svc_vc_ack(SVCXPRT *xprt, uint32_t *ack)
  556 {
  557 
  558         *ack = atomic_load_acq_32(&xprt->xp_snt_cnt);
  559         *ack -= sbused(&xprt->xp_socket->so_snd);
  560         return (TRUE);
  561 }
  562 
  563 static enum xprt_stat
  564 svc_vc_backchannel_stat(SVCXPRT *xprt)
  565 {
  566         struct cf_conn *cd;
  567 
  568         cd = (struct cf_conn *)(xprt->xp_p1);
  569 
  570         if (cd->mreq != NULL)
  571                 return (XPRT_MOREREQS);
  572 
  573         return (XPRT_IDLE);
  574 }
  575 
  576 /*
  577  * If we have an mbuf chain in cd->mpending, try to parse a record from it,
  578  * leaving the result in cd->mreq. If we don't have a complete record, leave
  579  * the partial result in cd->mreq and try to read more from the socket.
  580  */
  581 static int
  582 svc_vc_process_pending(SVCXPRT *xprt)
  583 {
  584         struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
  585         struct socket *so = xprt->xp_socket;
  586         struct mbuf *m;
  587 
  588         /*
  589          * If cd->resid is non-zero, we have part of the
  590          * record already, otherwise we are expecting a record
  591          * marker.
  592          */
  593         if (!cd->resid && cd->mpending) {
  594                 /*
  595                  * See if there is enough data buffered to
  596                  * make up a record marker. Make sure we can
  597                  * handle the case where the record marker is
  598                  * split across more than one mbuf.
  599                  */
  600                 size_t n = 0;
  601                 uint32_t header;
  602 
  603                 m = cd->mpending;
  604                 while (n < sizeof(uint32_t) && m) {
  605                         n += m->m_len;
  606                         m = m->m_next;
  607                 }
  608                 if (n < sizeof(uint32_t)) {
  609                         so->so_rcv.sb_lowat = sizeof(uint32_t) - n;
  610                         return (FALSE);
  611                 }
  612                 m_copydata(cd->mpending, 0, sizeof(header),
  613                     (char *)&header);
  614                 header = ntohl(header);
  615                 cd->eor = (header & 0x80000000) != 0;
  616                 cd->resid = header & 0x7fffffff;
  617                 m_adj(cd->mpending, sizeof(uint32_t));
  618         }
  619 
  620         /*
  621          * Start pulling off mbufs from cd->mpending
  622          * until we either have a complete record or
  623          * we run out of data. We use m_split to pull
  624          * data - it will pull as much as possible and
  625          * split the last mbuf if necessary.
  626          */
  627         while (cd->mpending && cd->resid) {
  628                 m = cd->mpending;
  629                 if (cd->mpending->m_next
  630                     || cd->mpending->m_len > cd->resid)
  631                         cd->mpending = m_split(cd->mpending,
  632                             cd->resid, M_WAITOK);
  633                 else
  634                         cd->mpending = NULL;
  635                 if (cd->mreq)
  636                         m_last(cd->mreq)->m_next = m;
  637                 else
  638                         cd->mreq = m;
  639                 while (m) {
  640                         cd->resid -= m->m_len;
  641                         m = m->m_next;
  642                 }
  643         }
  644 
  645         /*
  646          * Block receive upcalls if we have more data pending,
  647          * otherwise report our need.
  648          */
  649         if (cd->mpending)
  650                 so->so_rcv.sb_lowat = INT_MAX;
  651         else
  652                 so->so_rcv.sb_lowat =
  653                     imax(1, imin(cd->resid, so->so_rcv.sb_hiwat / 2));
  654         return (TRUE);
  655 }
  656 
  657 static bool_t
  658 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
  659     struct sockaddr **addrp, struct mbuf **mp)
  660 {
  661         struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
  662         struct uio uio;
  663         struct mbuf *m;
  664         struct socket* so = xprt->xp_socket;
  665         XDR xdrs;
  666         int error, rcvflag;
  667         uint32_t xid_plus_direction[2];
  668 
  669         /*
  670          * Serialise access to the socket and our own record parsing
  671          * state.
  672          */
  673         sx_xlock(&xprt->xp_lock);
  674 
  675         for (;;) {
  676                 /* If we have no request ready, check pending queue. */
  677                 while (cd->mpending &&
  678                     (cd->mreq == NULL || cd->resid != 0 || !cd->eor)) {
  679                         if (!svc_vc_process_pending(xprt))
  680                                 break;
  681                 }
  682 
  683                 /* Process and return complete request in cd->mreq. */
  684                 if (cd->mreq != NULL && cd->resid == 0 && cd->eor) {
  685 
  686                         /*
  687                          * Now, check for a backchannel reply.
  688                          * The XID is in the first uint32_t of the reply
  689                          * and the message direction is the second one.
  690                          */
  691                         if ((cd->mreq->m_len >= sizeof(xid_plus_direction) ||
  692                             m_length(cd->mreq, NULL) >=
  693                             sizeof(xid_plus_direction)) &&
  694                             xprt->xp_p2 != NULL) {
  695                                 m_copydata(cd->mreq, 0,
  696                                     sizeof(xid_plus_direction),
  697                                     (char *)xid_plus_direction);
  698                                 xid_plus_direction[0] =
  699                                     ntohl(xid_plus_direction[0]);
  700                                 xid_plus_direction[1] =
  701                                     ntohl(xid_plus_direction[1]);
  702                                 /* Check message direction. */
  703                                 if (xid_plus_direction[1] == REPLY) {
  704                                         clnt_bck_svccall(xprt->xp_p2,
  705                                             cd->mreq,
  706                                             xid_plus_direction[0]);
  707                                         cd->mreq = NULL;
  708                                         continue;
  709                                 }
  710                         }
  711 
  712                         xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE);
  713                         cd->mreq = NULL;
  714 
  715                         /* Check for next request in a pending queue. */
  716                         svc_vc_process_pending(xprt);
  717                         if (cd->mreq == NULL || cd->resid != 0) {
  718                                 SOCKBUF_LOCK(&so->so_rcv);
  719                                 if (!soreadable(so))
  720                                         xprt_inactive_self(xprt);
  721                                 SOCKBUF_UNLOCK(&so->so_rcv);
  722                         }
  723 
  724                         sx_xunlock(&xprt->xp_lock);
  725 
  726                         if (! xdr_callmsg(&xdrs, msg)) {
  727                                 XDR_DESTROY(&xdrs);
  728                                 return (FALSE);
  729                         }
  730 
  731                         *addrp = NULL;
  732                         *mp = xdrmbuf_getall(&xdrs);
  733                         XDR_DESTROY(&xdrs);
  734 
  735                         return (TRUE);
  736                 }
  737 
  738                 /*
  739                  * The socket upcall calls xprt_active() which will eventually
  740                  * cause the server to call us here. We attempt to
  741                  * read as much as possible from the socket and put
  742                  * the result in cd->mpending. If the read fails,
  743                  * we have drained both cd->mpending and the socket so
  744                  * we can call xprt_inactive().
  745                  */
  746                 uio.uio_resid = 1000000000;
  747                 uio.uio_td = curthread;
  748                 m = NULL;
  749                 rcvflag = MSG_DONTWAIT;
  750                 error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
  751 
  752                 if (error == EWOULDBLOCK) {
  753                         /*
  754                          * We must re-test for readability after
  755                          * taking the lock to protect us in the case
  756                          * where a new packet arrives on the socket
  757                          * after our call to soreceive fails with
  758                          * EWOULDBLOCK.
  759                          */
  760                         SOCKBUF_LOCK(&so->so_rcv);
  761                         if (!soreadable(so))
  762                                 xprt_inactive_self(xprt);
  763                         SOCKBUF_UNLOCK(&so->so_rcv);
  764                         sx_xunlock(&xprt->xp_lock);
  765                         return (FALSE);
  766                 }
  767 
  768                 if (error) {
  769                         SOCKBUF_LOCK(&so->so_rcv);
  770                         if (xprt->xp_upcallset) {
  771                                 xprt->xp_upcallset = 0;
  772                                 soupcall_clear(so, SO_RCV);
  773                         }
  774                         SOCKBUF_UNLOCK(&so->so_rcv);
  775                         xprt_inactive_self(xprt);
  776                         cd->strm_stat = XPRT_DIED;
  777                         sx_xunlock(&xprt->xp_lock);
  778                         return (FALSE);
  779                 }
  780 
  781                 if (!m) {
  782                         /*
  783                          * EOF - the other end has closed the socket.
  784                          */
  785                         xprt_inactive_self(xprt);
  786                         cd->strm_stat = XPRT_DIED;
  787                         sx_xunlock(&xprt->xp_lock);
  788                         return (FALSE);
  789                 }
  790 
  791                 if (cd->mpending)
  792                         m_last(cd->mpending)->m_next = m;
  793                 else
  794                         cd->mpending = m;
  795         }
  796 }
  797 
  798 static bool_t
  799 svc_vc_backchannel_recv(SVCXPRT *xprt, struct rpc_msg *msg,
  800     struct sockaddr **addrp, struct mbuf **mp)
  801 {
  802         struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
  803         struct ct_data *ct;
  804         struct mbuf *m;
  805         XDR xdrs;
  806 
  807         sx_xlock(&xprt->xp_lock);
  808         ct = (struct ct_data *)xprt->xp_p2;
  809         if (ct == NULL) {
  810                 sx_xunlock(&xprt->xp_lock);
  811                 return (FALSE);
  812         }
  813         mtx_lock(&ct->ct_lock);
  814         m = cd->mreq;
  815         if (m == NULL) {
  816                 xprt_inactive_self(xprt);
  817                 mtx_unlock(&ct->ct_lock);
  818                 sx_xunlock(&xprt->xp_lock);
  819                 return (FALSE);
  820         }
  821         cd->mreq = m->m_nextpkt;
  822         mtx_unlock(&ct->ct_lock);
  823         sx_xunlock(&xprt->xp_lock);
  824 
  825         xdrmbuf_create(&xdrs, m, XDR_DECODE);
  826         if (! xdr_callmsg(&xdrs, msg)) {
  827                 XDR_DESTROY(&xdrs);
  828                 return (FALSE);
  829         }
  830         *addrp = NULL;
  831         *mp = xdrmbuf_getall(&xdrs);
  832         XDR_DESTROY(&xdrs);
  833         return (TRUE);
  834 }
  835 
  836 static bool_t
  837 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
  838     struct sockaddr *addr, struct mbuf *m, uint32_t *seq)
  839 {
  840         XDR xdrs;
  841         struct mbuf *mrep;
  842         bool_t stat = TRUE;
  843         int error, len;
  844 
  845         /*
  846          * Leave space for record mark.
  847          */
  848         mrep = m_gethdr(M_WAITOK, MT_DATA);
  849         mrep->m_data += sizeof(uint32_t);
  850 
  851         xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
  852 
  853         if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
  854             msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
  855                 if (!xdr_replymsg(&xdrs, msg))
  856                         stat = FALSE;
  857                 else
  858                         xdrmbuf_append(&xdrs, m);
  859         } else {
  860                 stat = xdr_replymsg(&xdrs, msg);
  861         }
  862 
  863         if (stat) {
  864                 m_fixhdr(mrep);
  865 
  866                 /*
  867                  * Prepend a record marker containing the reply length.
  868                  */
  869                 M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK);
  870                 len = mrep->m_pkthdr.len;
  871                 *mtod(mrep, uint32_t *) =
  872                         htonl(0x80000000 | (len - sizeof(uint32_t)));
  873                 atomic_add_32(&xprt->xp_snd_cnt, len);
  874                 error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
  875                     0, curthread);
  876                 if (!error) {
  877                         atomic_add_rel_32(&xprt->xp_snt_cnt, len);
  878                         if (seq)
  879                                 *seq = xprt->xp_snd_cnt;
  880                         stat = TRUE;
  881                 } else
  882                         atomic_subtract_32(&xprt->xp_snd_cnt, len);
  883         } else {
  884                 m_freem(mrep);
  885         }
  886 
  887         XDR_DESTROY(&xdrs);
  888 
  889         return (stat);
  890 }
  891 
  892 static bool_t
  893 svc_vc_backchannel_reply(SVCXPRT *xprt, struct rpc_msg *msg,
  894     struct sockaddr *addr, struct mbuf *m, uint32_t *seq)
  895 {
  896         struct ct_data *ct;
  897         XDR xdrs;
  898         struct mbuf *mrep;
  899         bool_t stat = TRUE;
  900         int error;
  901 
  902         /*
  903          * Leave space for record mark.
  904          */
  905         mrep = m_gethdr(M_WAITOK, MT_DATA);
  906         mrep->m_data += sizeof(uint32_t);
  907 
  908         xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
  909 
  910         if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
  911             msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
  912                 if (!xdr_replymsg(&xdrs, msg))
  913                         stat = FALSE;
  914                 else
  915                         xdrmbuf_append(&xdrs, m);
  916         } else {
  917                 stat = xdr_replymsg(&xdrs, msg);
  918         }
  919 
  920         if (stat) {
  921                 m_fixhdr(mrep);
  922 
  923                 /*
  924                  * Prepend a record marker containing the reply length.
  925                  */
  926                 M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK);
  927                 *mtod(mrep, uint32_t *) =
  928                         htonl(0x80000000 | (mrep->m_pkthdr.len
  929                                 - sizeof(uint32_t)));
  930                 sx_xlock(&xprt->xp_lock);
  931                 ct = (struct ct_data *)xprt->xp_p2;
  932                 if (ct != NULL)
  933                         error = sosend(ct->ct_socket, NULL, NULL, mrep, NULL,
  934                             0, curthread);
  935                 else
  936                         error = EPIPE;
  937                 sx_xunlock(&xprt->xp_lock);
  938                 if (!error) {
  939                         stat = TRUE;
  940                 }
  941         } else {
  942                 m_freem(mrep);
  943         }
  944 
  945         XDR_DESTROY(&xdrs);
  946 
  947         return (stat);
  948 }
  949 
  950 static bool_t
  951 svc_vc_null(void)
  952 {
  953 
  954         return (FALSE);
  955 }
  956 
  957 static int
  958 svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
  959 {
  960         SVCXPRT *xprt = (SVCXPRT *) arg;
  961 
  962         if (soreadable(xprt->xp_socket))
  963                 xprt_active(xprt);
  964         return (SU_OK);
  965 }
  966 
  967 static int
  968 svc_vc_rendezvous_soupcall(struct socket *head, void *arg, int waitflag)
  969 {
  970         SVCXPRT *xprt = (SVCXPRT *) arg;
  971 
  972         if (!TAILQ_EMPTY(&head->sol_comp))
  973                 xprt_active(xprt);
  974         return (SU_OK);
  975 }
  976 
  977 #if 0
  978 /*
  979  * Get the effective UID of the sending process. Used by rpcbind, keyserv
  980  * and rpc.yppasswdd on AF_LOCAL.
  981  */
  982 int
  983 __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
  984         int sock, ret;
  985         gid_t egid;
  986         uid_t euid;
  987         struct sockaddr *sa;
  988 
  989         sock = transp->xp_fd;
  990         sa = (struct sockaddr *)transp->xp_rtaddr;
  991         if (sa->sa_family == AF_LOCAL) {
  992                 ret = getpeereid(sock, &euid, &egid);
  993                 if (ret == 0)
  994                         *uid = euid;
  995                 return (ret);
  996         } else
  997                 return (-1);
  998 }
  999 #endif

Cache object: 04d51e7497d2f0c5e60c467ccd6d427d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.