The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/tcp_timer.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
   30  * $FreeBSD: releng/6.1/sys/netinet/tcp_timer.c 156185 2006-03-01 21:08:53Z andre $
   31  */
   32 
   33 #include "opt_inet6.h"
   34 #include "opt_tcpdebug.h"
   35 #include "opt_tcp_sack.h"
   36 
   37 #include <sys/param.h>
   38 #include <sys/kernel.h>
   39 #include <sys/lock.h>
   40 #include <sys/mbuf.h>
   41 #include <sys/mutex.h>
   42 #include <sys/protosw.h>
   43 #include <sys/socket.h>
   44 #include <sys/socketvar.h>
   45 #include <sys/sysctl.h>
   46 #include <sys/systm.h>
   47 
   48 #include <net/route.h>
   49 
   50 #include <netinet/in.h>
   51 #include <netinet/in_pcb.h>
   52 #include <netinet/in_systm.h>
   53 #ifdef INET6
   54 #include <netinet6/in6_pcb.h>
   55 #endif
   56 #include <netinet/ip_var.h>
   57 #include <netinet/tcp.h>
   58 #include <netinet/tcp_fsm.h>
   59 #include <netinet/tcp_timer.h>
   60 #include <netinet/tcp_var.h>
   61 #include <netinet/tcpip.h>
   62 #ifdef TCPDEBUG
   63 #include <netinet/tcp_debug.h>
   64 #endif
   65 
   66 int     tcp_keepinit;
   67 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW,
   68     &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
   69 
   70 int     tcp_keepidle;
   71 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW,
   72     &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
   73 
   74 int     tcp_keepintvl;
   75 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW,
   76     &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
   77 
   78 int     tcp_delacktime;
   79 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime,
   80     CTLTYPE_INT|CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I",
   81     "Time before a delayed ACK is sent");
   82 
   83 int     tcp_msl;
   84 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW,
   85     &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
   86 
   87 int     tcp_rexmit_min;
   88 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT|CTLFLAG_RW,
   89     &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I", "Minimum Retransmission Timeout");
   90 
   91 int     tcp_rexmit_slop;
   92 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT|CTLFLAG_RW,
   93     &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", "Retransmission Timer Slop");
   94 
   95 static int      always_keepalive = 1;
   96 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
   97     &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
   98 
   99 static int      tcp_keepcnt = TCPTV_KEEPCNT;
  100         /* max idle probes */
  101 int     tcp_maxpersistidle;
  102         /* max idle time in persist */
  103 int     tcp_maxidle;
  104 
  105 /*
  106  * Tcp protocol timeout routine called every 500 ms.
  107  * Updates timestamps used for TCP
  108  * causes finite state machine actions if timers expire.
  109  */
  110 void
  111 tcp_slowtimo()
  112 {
  113 
  114         tcp_maxidle = tcp_keepcnt * tcp_keepintvl;
  115         INP_INFO_WLOCK(&tcbinfo);
  116         (void) tcp_timer_2msl_tw(0);
  117         INP_INFO_WUNLOCK(&tcbinfo);
  118 }
  119 
  120 int     tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
  121     { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
  122 
  123 int     tcp_backoff[TCP_MAXRXTSHIFT + 1] =
  124     { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 };
  125 
  126 static int tcp_totbackoff = 2559;       /* sum of tcp_backoff[] */
  127 
  128 /*
  129  * TCP timer processing.
  130  */
  131 
  132 void
  133 tcp_timer_delack(xtp)
  134         void *xtp;
  135 {
  136         struct tcpcb *tp = xtp;
  137         struct inpcb *inp;
  138 
  139         INP_INFO_RLOCK(&tcbinfo);
  140         inp = tp->t_inpcb;
  141         if (inp == NULL) {
  142                 INP_INFO_RUNLOCK(&tcbinfo);
  143                 return;
  144         }
  145         INP_LOCK(inp);
  146         INP_INFO_RUNLOCK(&tcbinfo);
  147         if (callout_pending(tp->tt_delack) || !callout_active(tp->tt_delack)) {
  148                 INP_UNLOCK(inp);
  149                 return;
  150         }
  151         callout_deactivate(tp->tt_delack);
  152 
  153         tp->t_flags |= TF_ACKNOW;
  154         tcpstat.tcps_delack++;
  155         (void) tcp_output(tp);
  156         INP_UNLOCK(inp);
  157 }
  158 
  159 void
  160 tcp_timer_2msl(xtp)
  161         void *xtp;
  162 {
  163         struct tcpcb *tp = xtp;
  164         struct inpcb *inp;
  165 #ifdef TCPDEBUG
  166         int ostate;
  167 
  168         ostate = tp->t_state;
  169 #endif
  170         INP_INFO_WLOCK(&tcbinfo);
  171         inp = tp->t_inpcb;
  172         if (inp == NULL) {
  173                 INP_INFO_WUNLOCK(&tcbinfo);
  174                 return;
  175         }
  176         INP_LOCK(inp);
  177         tcp_free_sackholes(tp);
  178         if (callout_pending(tp->tt_2msl) || !callout_active(tp->tt_2msl)) {
  179                 INP_UNLOCK(tp->t_inpcb);
  180                 INP_INFO_WUNLOCK(&tcbinfo);
  181                 return;
  182         }
  183         callout_deactivate(tp->tt_2msl);
  184         /*
  185          * 2 MSL timeout in shutdown went off.  If we're closed but
  186          * still waiting for peer to close and connection has been idle
  187          * too long, or if 2MSL time is up from TIME_WAIT, delete connection
  188          * control block.  Otherwise, check again in a bit.
  189          */
  190         if (tp->t_state != TCPS_TIME_WAIT &&
  191             (ticks - tp->t_rcvtime) <= tcp_maxidle)
  192                 callout_reset(tp->tt_2msl, tcp_keepintvl,
  193                               tcp_timer_2msl, tp);
  194         else
  195                 tp = tcp_close(tp);
  196 
  197 #ifdef TCPDEBUG
  198         if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
  199                 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
  200                           PRU_SLOWTIMO);
  201 #endif
  202         if (tp)
  203                 INP_UNLOCK(inp);
  204         INP_INFO_WUNLOCK(&tcbinfo);
  205 }
  206 
  207 /*
  208  * The timed wait lists contain references to each of the TCP sessions
  209  * currently TIME_WAIT state.  The list pointers, including the list pointers
  210  * in each tcptw structure, are protected using the global tcbinfo lock,
  211  * which must be held over list iteration and modification.
  212  */
  213 struct twlist {
  214         LIST_HEAD(, tcptw)      tw_list;
  215         struct tcptw    tw_tail;
  216 };
  217 #define TWLIST_NLISTS   2
  218 static struct twlist twl_2msl[TWLIST_NLISTS];
  219 static struct twlist *tw_2msl_list[] = { &twl_2msl[0], &twl_2msl[1], NULL };
  220 
  221 void
  222 tcp_timer_init(void)
  223 {
  224         int i;
  225         struct twlist *twl;
  226 
  227         for (i = 0; i < TWLIST_NLISTS; i++) {
  228                 twl = &twl_2msl[i];
  229                 LIST_INIT(&twl->tw_list);
  230                 LIST_INSERT_HEAD(&twl->tw_list, &twl->tw_tail, tw_2msl);
  231         }
  232 }
  233 
  234 void
  235 tcp_timer_2msl_reset(struct tcptw *tw, int timeo)
  236 {
  237         int i;
  238         struct tcptw *tw_tail;
  239 
  240         INP_INFO_WLOCK_ASSERT(&tcbinfo);
  241         INP_LOCK_ASSERT(tw->tw_inpcb);
  242         if (tw->tw_time != 0)
  243                 LIST_REMOVE(tw, tw_2msl);
  244         tw->tw_time = timeo + ticks;
  245         i = timeo > tcp_msl ? 1 : 0;
  246         tw_tail = &twl_2msl[i].tw_tail;
  247         LIST_INSERT_BEFORE(tw_tail, tw, tw_2msl);
  248 }
  249 
  250 void
  251 tcp_timer_2msl_stop(struct tcptw *tw)
  252 {
  253 
  254         INP_INFO_WLOCK_ASSERT(&tcbinfo);
  255         if (tw->tw_time != 0)
  256                 LIST_REMOVE(tw, tw_2msl);
  257 }
  258 
  259 struct tcptw *
  260 tcp_timer_2msl_tw(int reuse)
  261 {
  262         struct tcptw *tw, *tw_tail;
  263         struct twlist *twl;
  264         int i;
  265 
  266         INP_INFO_WLOCK_ASSERT(&tcbinfo);
  267         for (i = 0; i < 2; i++) {
  268                 twl = tw_2msl_list[i];
  269                 tw_tail = &twl->tw_tail;
  270                 for (;;) {
  271                         tw = LIST_FIRST(&twl->tw_list);
  272                         if (tw == tw_tail || (!reuse && tw->tw_time > ticks))
  273                                 break;
  274                         INP_LOCK(tw->tw_inpcb);
  275                         if (tcp_twclose(tw, reuse) != NULL)
  276                                 return (tw);
  277                 }
  278         }
  279         return (NULL);
  280 }
  281 
  282 void
  283 tcp_timer_keep(xtp)
  284         void *xtp;
  285 {
  286         struct tcpcb *tp = xtp;
  287         struct tcptemp *t_template;
  288         struct inpcb *inp;
  289 #ifdef TCPDEBUG
  290         int ostate;
  291 
  292         ostate = tp->t_state;
  293 #endif
  294         INP_INFO_WLOCK(&tcbinfo);
  295         inp = tp->t_inpcb;
  296         if (!inp) {
  297                 INP_INFO_WUNLOCK(&tcbinfo);
  298                 return;
  299         }
  300         INP_LOCK(inp);
  301         if (callout_pending(tp->tt_keep) || !callout_active(tp->tt_keep)) {
  302                 INP_UNLOCK(inp);
  303                 INP_INFO_WUNLOCK(&tcbinfo);
  304                 return;
  305         }
  306         callout_deactivate(tp->tt_keep);
  307         /*
  308          * Keep-alive timer went off; send something
  309          * or drop connection if idle for too long.
  310          */
  311         tcpstat.tcps_keeptimeo++;
  312         if (tp->t_state < TCPS_ESTABLISHED)
  313                 goto dropit;
  314         if ((always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
  315             tp->t_state <= TCPS_CLOSING) {
  316                 if ((ticks - tp->t_rcvtime) >= tcp_keepidle + tcp_maxidle)
  317                         goto dropit;
  318                 /*
  319                  * Send a packet designed to force a response
  320                  * if the peer is up and reachable:
  321                  * either an ACK if the connection is still alive,
  322                  * or an RST if the peer has closed the connection
  323                  * due to timeout or reboot.
  324                  * Using sequence number tp->snd_una-1
  325                  * causes the transmitted zero-length segment
  326                  * to lie outside the receive window;
  327                  * by the protocol spec, this requires the
  328                  * correspondent TCP to respond.
  329                  */
  330                 tcpstat.tcps_keepprobe++;
  331                 t_template = tcpip_maketemplate(inp);
  332                 if (t_template) {
  333                         tcp_respond(tp, t_template->tt_ipgen,
  334                                     &t_template->tt_t, (struct mbuf *)NULL,
  335                                     tp->rcv_nxt, tp->snd_una - 1, 0);
  336                         (void) m_free(dtom(t_template));
  337                 }
  338                 callout_reset(tp->tt_keep, tcp_keepintvl, tcp_timer_keep, tp);
  339         } else
  340                 callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp);
  341 
  342 #ifdef TCPDEBUG
  343         if (inp->inp_socket->so_options & SO_DEBUG)
  344                 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
  345                           PRU_SLOWTIMO);
  346 #endif
  347         INP_UNLOCK(inp);
  348         INP_INFO_WUNLOCK(&tcbinfo);
  349         return;
  350 
  351 dropit:
  352         tcpstat.tcps_keepdrops++;
  353         tp = tcp_drop(tp, ETIMEDOUT);
  354 
  355 #ifdef TCPDEBUG
  356         if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
  357                 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
  358                           PRU_SLOWTIMO);
  359 #endif
  360         if (tp)
  361                 INP_UNLOCK(tp->t_inpcb);
  362         INP_INFO_WUNLOCK(&tcbinfo);
  363 }
  364 
  365 void
  366 tcp_timer_persist(xtp)
  367         void *xtp;
  368 {
  369         struct tcpcb *tp = xtp;
  370         struct inpcb *inp;
  371 #ifdef TCPDEBUG
  372         int ostate;
  373 
  374         ostate = tp->t_state;
  375 #endif
  376         INP_INFO_WLOCK(&tcbinfo);
  377         inp = tp->t_inpcb;
  378         if (!inp) {
  379                 INP_INFO_WUNLOCK(&tcbinfo);
  380                 return;
  381         }
  382         INP_LOCK(inp);
  383         if (callout_pending(tp->tt_persist) || !callout_active(tp->tt_persist)){
  384                 INP_UNLOCK(inp);
  385                 INP_INFO_WUNLOCK(&tcbinfo);
  386                 return;
  387         }
  388         callout_deactivate(tp->tt_persist);
  389         /*
  390          * Persistance timer into zero window.
  391          * Force a byte to be output, if possible.
  392          */
  393         tcpstat.tcps_persisttimeo++;
  394         /*
  395          * Hack: if the peer is dead/unreachable, we do not
  396          * time out if the window is closed.  After a full
  397          * backoff, drop the connection if the idle time
  398          * (no responses to probes) reaches the maximum
  399          * backoff that we would use if retransmitting.
  400          */
  401         if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
  402             ((ticks - tp->t_rcvtime) >= tcp_maxpersistidle ||
  403              (ticks - tp->t_rcvtime) >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
  404                 tcpstat.tcps_persistdrop++;
  405                 tp = tcp_drop(tp, ETIMEDOUT);
  406                 goto out;
  407         }
  408         tcp_setpersist(tp);
  409         tp->t_flags |= TF_FORCEDATA;
  410         (void) tcp_output(tp);
  411         tp->t_flags &= ~TF_FORCEDATA;
  412 
  413 out:
  414 #ifdef TCPDEBUG
  415         if (tp && tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
  416                 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
  417                           PRU_SLOWTIMO);
  418 #endif
  419         if (tp)
  420                 INP_UNLOCK(inp);
  421         INP_INFO_WUNLOCK(&tcbinfo);
  422 }
  423 
  424 void
  425 tcp_timer_rexmt(xtp)
  426         void *xtp;
  427 {
  428         struct tcpcb *tp = xtp;
  429         int rexmt;
  430         int headlocked;
  431         struct inpcb *inp;
  432 #ifdef TCPDEBUG
  433         int ostate;
  434 
  435         ostate = tp->t_state;
  436 #endif
  437         INP_INFO_WLOCK(&tcbinfo);
  438         headlocked = 1;
  439         inp = tp->t_inpcb;
  440         if (!inp) {
  441                 INP_INFO_WUNLOCK(&tcbinfo);
  442                 return;
  443         }
  444         INP_LOCK(inp);
  445         if (callout_pending(tp->tt_rexmt) || !callout_active(tp->tt_rexmt)) {
  446                 INP_UNLOCK(inp);
  447                 INP_INFO_WUNLOCK(&tcbinfo);
  448                 return;
  449         }
  450         callout_deactivate(tp->tt_rexmt);
  451         tcp_free_sackholes(tp);
  452         /*
  453          * Retransmission timer went off.  Message has not
  454          * been acked within retransmit interval.  Back off
  455          * to a longer retransmit interval and retransmit one segment.
  456          */
  457         if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
  458                 tp->t_rxtshift = TCP_MAXRXTSHIFT;
  459                 tcpstat.tcps_timeoutdrop++;
  460                 tp = tcp_drop(tp, tp->t_softerror ?
  461                               tp->t_softerror : ETIMEDOUT);
  462                 goto out;
  463         }
  464         INP_INFO_WUNLOCK(&tcbinfo);
  465         headlocked = 0;
  466         if (tp->t_rxtshift == 1) {
  467                 /*
  468                  * first retransmit; record ssthresh and cwnd so they can
  469                  * be recovered if this turns out to be a "bad" retransmit.
  470                  * A retransmit is considered "bad" if an ACK for this
  471                  * segment is received within RTT/2 interval; the assumption
  472                  * here is that the ACK was already in flight.  See
  473                  * "On Estimating End-to-End Network Path Properties" by
  474                  * Allman and Paxson for more details.
  475                  */
  476                 tp->snd_cwnd_prev = tp->snd_cwnd;
  477                 tp->snd_ssthresh_prev = tp->snd_ssthresh;
  478                 tp->snd_recover_prev = tp->snd_recover;
  479                 if (IN_FASTRECOVERY(tp))
  480                   tp->t_flags |= TF_WASFRECOVERY;
  481                 else
  482                   tp->t_flags &= ~TF_WASFRECOVERY;
  483                 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
  484         }
  485         tcpstat.tcps_rexmttimeo++;
  486         if (tp->t_state == TCPS_SYN_SENT)
  487                 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
  488         else
  489                 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
  490         TCPT_RANGESET(tp->t_rxtcur, rexmt,
  491                       tp->t_rttmin, TCPTV_REXMTMAX);
  492         /*
  493          * Disable rfc1323 if we havn't got any response to
  494          * our third SYN to work-around some broken terminal servers
  495          * (most of which have hopefully been retired) that have bad VJ
  496          * header compression code which trashes TCP segments containing
  497          * unknown-to-them TCP options.
  498          */
  499         if ((tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3))
  500                 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP);
  501         /*
  502          * If we backed off this far, our srtt estimate is probably bogus.
  503          * Clobber it so we'll take the next rtt measurement as our srtt;
  504          * move the current srtt into rttvar to keep the current
  505          * retransmit times until then.
  506          */
  507         if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
  508 #ifdef INET6
  509                 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
  510                         in6_losing(tp->t_inpcb);
  511                 else
  512 #endif
  513                 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
  514                 tp->t_srtt = 0;
  515         }
  516         tp->snd_nxt = tp->snd_una;
  517         tp->snd_recover = tp->snd_max;
  518         /*
  519          * Force a segment to be sent.
  520          */
  521         tp->t_flags |= TF_ACKNOW;
  522         /*
  523          * If timing a segment in this window, stop the timer.
  524          */
  525         tp->t_rtttime = 0;
  526         /*
  527          * Close the congestion window down to one segment
  528          * (we'll open it by one segment for each ack we get).
  529          * Since we probably have a window's worth of unacked
  530          * data accumulated, this "slow start" keeps us from
  531          * dumping all that data as back-to-back packets (which
  532          * might overwhelm an intermediate gateway).
  533          *
  534          * There are two phases to the opening: Initially we
  535          * open by one mss on each ack.  This makes the window
  536          * size increase exponentially with time.  If the
  537          * window is larger than the path can handle, this
  538          * exponential growth results in dropped packet(s)
  539          * almost immediately.  To get more time between
  540          * drops but still "push" the network to take advantage
  541          * of improving conditions, we switch from exponential
  542          * to linear window opening at some threshhold size.
  543          * For a threshhold, we use half the current window
  544          * size, truncated to a multiple of the mss.
  545          *
  546          * (the minimum cwnd that will give us exponential
  547          * growth is 2 mss.  We don't allow the threshhold
  548          * to go below this.)
  549          */
  550         {
  551                 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
  552                 if (win < 2)
  553                         win = 2;
  554                 tp->snd_cwnd = tp->t_maxseg;
  555                 tp->snd_ssthresh = win * tp->t_maxseg;
  556                 tp->t_dupacks = 0;
  557         }
  558         EXIT_FASTRECOVERY(tp);
  559         (void) tcp_output(tp);
  560 
  561 out:
  562 #ifdef TCPDEBUG
  563         if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
  564                 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
  565                           PRU_SLOWTIMO);
  566 #endif
  567         if (tp)
  568                 INP_UNLOCK(inp);
  569         if (headlocked)
  570                 INP_INFO_WUNLOCK(&tcbinfo);
  571 }

Cache object: 9e76c4c8df98721af7f816e45d72ee0c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.