The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/tcp_reass.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include "opt_inet.h"
   36 #include "opt_inet6.h"
   37 #include "opt_tcpdebug.h"
   38 
   39 #include <sys/param.h>
   40 #include <sys/kernel.h>
   41 #include <sys/malloc.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/socket.h>
   44 #include <sys/socketvar.h>
   45 #include <sys/sysctl.h>
   46 #include <sys/syslog.h>
   47 #include <sys/systm.h>
   48 
   49 #include <vm/uma.h>
   50 
   51 #include <net/if.h>
   52 #include <net/route.h>
   53 #include <net/vnet.h>
   54 
   55 #include <netinet/in.h>
   56 #include <netinet/in_pcb.h>
   57 #include <netinet/in_systm.h>
   58 #include <netinet/in_var.h>
   59 #include <netinet/ip.h>
   60 #include <netinet/ip_var.h>
   61 #include <netinet/ip_options.h>
   62 #include <netinet/ip6.h>
   63 #include <netinet6/in6_pcb.h>
   64 #include <netinet6/ip6_var.h>
   65 #include <netinet6/nd6.h>
   66 #include <netinet/tcp.h>
   67 #include <netinet/tcp_fsm.h>
   68 #include <netinet/tcp_seq.h>
   69 #include <netinet/tcp_timer.h>
   70 #include <netinet/tcp_var.h>
   71 #include <netinet6/tcp6_var.h>
   72 #include <netinet/tcpip.h>
   73 #ifdef TCPDEBUG
   74 #include <netinet/tcp_debug.h>
   75 #endif /* TCPDEBUG */
   76 
   77 static int tcp_reass_sysctl_qsize(SYSCTL_HANDLER_ARGS);
   78 
   79 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
   80     "TCP Segment Reassembly Queue");
   81 
   82 static int tcp_reass_maxseg = 0;
   83 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN,
   84     &tcp_reass_maxseg, 0,
   85     "Global maximum number of TCP Segments in Reassembly Queue");
   86 
   87 SYSCTL_PROC(_net_inet_tcp_reass, OID_AUTO, cursegments,
   88     (CTLTYPE_INT | CTLFLAG_RD), NULL, 0, &tcp_reass_sysctl_qsize, "I",
   89     "Global number of TCP Segments currently in Reassembly Queue");
   90 
   91 static int tcp_reass_overflows = 0;
   92 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows,
   93     CTLFLAG_RD,
   94     &tcp_reass_overflows, 0,
   95     "Global number of TCP Segment Reassembly Queue Overflows");
   96 
   97 static uma_zone_t tcp_reass_zone;
   98 
   99 /* Initialize TCP reassembly queue */
  100 static void
  101 tcp_reass_zone_change(void *tag)
  102 {
  103 
  104         /* Set the zone limit and read back the effective value. */
  105         tcp_reass_maxseg = nmbclusters / 16;
  106         tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone,
  107             tcp_reass_maxseg);
  108 }
  109 
  110 void
  111 tcp_reass_global_init(void)
  112 {
  113 
  114         tcp_reass_maxseg = nmbclusters / 16;
  115         TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
  116             &tcp_reass_maxseg);
  117         tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
  118             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  119         /* Set the zone limit and read back the effective value. */
  120         tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone,
  121             tcp_reass_maxseg);
  122         EVENTHANDLER_REGISTER(nmbclusters_change,
  123             tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY);
  124 }
  125 
  126 void
  127 tcp_reass_flush(struct tcpcb *tp)
  128 {
  129         struct tseg_qent *qe;
  130 
  131         INP_WLOCK_ASSERT(tp->t_inpcb);
  132 
  133         while ((qe = LIST_FIRST(&tp->t_segq)) != NULL) {
  134                 LIST_REMOVE(qe, tqe_q);
  135                 m_freem(qe->tqe_m);
  136                 uma_zfree(tcp_reass_zone, qe);
  137                 tp->t_segqlen--;
  138         }
  139 
  140         KASSERT((tp->t_segqlen == 0),
  141             ("TCP reass queue %p segment count is %d instead of 0 after flush.",
  142             tp, tp->t_segqlen));
  143 }
  144 
  145 static int
  146 tcp_reass_sysctl_qsize(SYSCTL_HANDLER_ARGS)
  147 {
  148         int qsize;
  149 
  150         qsize = uma_zone_get_cur(tcp_reass_zone);
  151         return (sysctl_handle_int(oidp, &qsize, 0, req));
  152 }
  153 
  154 int
  155 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
  156 {
  157         struct tseg_qent *q;
  158         struct tseg_qent *p = NULL;
  159         struct tseg_qent *nq;
  160         struct tseg_qent *te = NULL;
  161         struct socket *so = tp->t_inpcb->inp_socket;
  162         char *s = NULL;
  163         int flags;
  164         struct tseg_qent tqs;
  165 
  166         INP_WLOCK_ASSERT(tp->t_inpcb);
  167 
  168         /*
  169          * XXX: tcp_reass() is rather inefficient with its data structures
  170          * and should be rewritten (see NetBSD for optimizations).
  171          */
  172 
  173         /*
  174          * Call with th==NULL after become established to
  175          * force pre-ESTABLISHED data up to user socket.
  176          */
  177         if (th == NULL)
  178                 goto present;
  179 
  180         /*
  181          * Limit the number of segments that can be queued to reduce the
  182          * potential for mbuf exhaustion. For best performance, we want to be
  183          * able to queue a full window's worth of segments. The size of the
  184          * socket receive buffer determines our advertised window and grows
  185          * automatically when socket buffer autotuning is enabled. Use it as the
  186          * basis for our queue limit.
  187          * Always let the missing segment through which caused this queue.
  188          * NB: Access to the socket buffer is left intentionally unlocked as we
  189          * can tolerate stale information here.
  190          *
  191          * XXXLAS: Using sbspace(so->so_rcv) instead of so->so_rcv.sb_hiwat
  192          * should work but causes packets to be dropped when they shouldn't.
  193          * Investigate why and re-evaluate the below limit after the behaviour
  194          * is understood.
  195          */
  196         if ((th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) &&
  197             tp->t_segqlen >= (so->so_rcv.sb_hiwat / tp->t_maxseg) + 1) {
  198                 tcp_reass_overflows++;
  199                 TCPSTAT_INC(tcps_rcvmemdrop);
  200                 m_freem(m);
  201                 *tlenp = 0;
  202                 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
  203                         log(LOG_DEBUG, "%s; %s: queue limit reached, "
  204                             "segment dropped\n", s, __func__);
  205                         free(s, M_TCPLOG);
  206                 }
  207                 return (0);
  208         }
  209 
  210         /*
  211          * Allocate a new queue entry. If we can't, or hit the zone limit
  212          * just drop the pkt.
  213          *
  214          * Use a temporary structure on the stack for the missing segment
  215          * when the zone is exhausted. Otherwise we may get stuck.
  216          */
  217         te = uma_zalloc(tcp_reass_zone, M_NOWAIT);
  218         if (te == NULL) {
  219                 if (th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) {
  220                         TCPSTAT_INC(tcps_rcvmemdrop);
  221                         m_freem(m);
  222                         *tlenp = 0;
  223                         if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL,
  224                             NULL))) {
  225                                 log(LOG_DEBUG, "%s; %s: global zone limit "
  226                                     "reached, segment dropped\n", s, __func__);
  227                                 free(s, M_TCPLOG);
  228                         }
  229                         return (0);
  230                 } else {
  231                         bzero(&tqs, sizeof(struct tseg_qent));
  232                         te = &tqs;
  233                         if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL,
  234                             NULL))) {
  235                                 log(LOG_DEBUG,
  236                                     "%s; %s: global zone limit reached, using "
  237                                     "stack for missing segment\n", s, __func__);
  238                                 free(s, M_TCPLOG);
  239                         }
  240                 }
  241         }
  242         tp->t_segqlen++;
  243 
  244         /*
  245          * Find a segment which begins after this one does.
  246          */
  247         LIST_FOREACH(q, &tp->t_segq, tqe_q) {
  248                 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
  249                         break;
  250                 p = q;
  251         }
  252 
  253         /*
  254          * If there is a preceding segment, it may provide some of
  255          * our data already.  If so, drop the data from the incoming
  256          * segment.  If it provides all of our data, drop us.
  257          */
  258         if (p != NULL) {
  259                 int i;
  260                 /* conversion to int (in i) handles seq wraparound */
  261                 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
  262                 if (i > 0) {
  263                         if (i >= *tlenp) {
  264                                 TCPSTAT_INC(tcps_rcvduppack);
  265                                 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
  266                                 m_freem(m);
  267                                 if (te != &tqs)
  268                                         uma_zfree(tcp_reass_zone, te);
  269                                 tp->t_segqlen--;
  270                                 /*
  271                                  * Try to present any queued data
  272                                  * at the left window edge to the user.
  273                                  * This is needed after the 3-WHS
  274                                  * completes.
  275                                  */
  276                                 goto present;   /* ??? */
  277                         }
  278                         m_adj(m, i);
  279                         *tlenp -= i;
  280                         th->th_seq += i;
  281                 }
  282         }
  283         tp->t_rcvoopack++;
  284         TCPSTAT_INC(tcps_rcvoopack);
  285         TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
  286 
  287         /*
  288          * While we overlap succeeding segments trim them or,
  289          * if they are completely covered, dequeue them.
  290          */
  291         while (q) {
  292                 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
  293                 if (i <= 0)
  294                         break;
  295                 if (i < q->tqe_len) {
  296                         q->tqe_th->th_seq += i;
  297                         q->tqe_len -= i;
  298                         m_adj(q->tqe_m, i);
  299                         break;
  300                 }
  301 
  302                 nq = LIST_NEXT(q, tqe_q);
  303                 LIST_REMOVE(q, tqe_q);
  304                 m_freem(q->tqe_m);
  305                 uma_zfree(tcp_reass_zone, q);
  306                 tp->t_segqlen--;
  307                 q = nq;
  308         }
  309 
  310         /* Insert the new segment queue entry into place. */
  311         te->tqe_m = m;
  312         te->tqe_th = th;
  313         te->tqe_len = *tlenp;
  314 
  315         if (p == NULL) {
  316                 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
  317         } else {
  318                 KASSERT(te != &tqs, ("%s: temporary stack based entry not "
  319                     "first element in queue", __func__));
  320                 LIST_INSERT_AFTER(p, te, tqe_q);
  321         }
  322 
  323 present:
  324         /*
  325          * Present data to user, advancing rcv_nxt through
  326          * completed sequence space.
  327          */
  328         if (!TCPS_HAVEESTABLISHED(tp->t_state))
  329                 return (0);
  330         q = LIST_FIRST(&tp->t_segq);
  331         if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
  332                 return (0);
  333         SOCKBUF_LOCK(&so->so_rcv);
  334         do {
  335                 tp->rcv_nxt += q->tqe_len;
  336                 flags = q->tqe_th->th_flags & TH_FIN;
  337                 nq = LIST_NEXT(q, tqe_q);
  338                 LIST_REMOVE(q, tqe_q);
  339                 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
  340                         m_freem(q->tqe_m);
  341                 else
  342                         sbappendstream_locked(&so->so_rcv, q->tqe_m);
  343                 if (q != &tqs)
  344                         uma_zfree(tcp_reass_zone, q);
  345                 tp->t_segqlen--;
  346                 q = nq;
  347         } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
  348         ND6_HINT(tp);
  349         sorwakeup_locked(so);
  350         return (flags);
  351 }

Cache object: da35335a30e7bf96ed4348544e6d76fc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.