The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/tcp_timer.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: tcp_timer.c,v 1.71 2022/11/07 11:22:55 yasuoka Exp $  */
    2 /*      $NetBSD: tcp_timer.c,v 1.14 1996/02/13 23:44:09 christos Exp $  */
    3 
    4 /*
    5  * Copyright (c) 1982, 1986, 1988, 1990, 1993
    6  *      The Regents of the University of California.  All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)tcp_timer.c 8.1 (Berkeley) 6/10/93
   33  */
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/mbuf.h>
   38 #include <sys/socket.h>
   39 #include <sys/socketvar.h>
   40 #include <sys/protosw.h>
   41 #include <sys/kernel.h>
   42 #include <sys/pool.h>
   43 
   44 #include <net/route.h>
   45 
   46 #include <netinet/in.h>
   47 #include <netinet/ip.h>
   48 #include <netinet/in_pcb.h>
   49 #include <netinet/ip_var.h>
   50 #include <netinet/tcp.h>
   51 #include <netinet/tcp_fsm.h>
   52 #include <netinet/tcp_timer.h>
   53 #include <netinet/tcp_var.h>
   54 #include <netinet/tcp_debug.h>
   55 #include <netinet/ip_icmp.h>
   56 #include <netinet/tcp_seq.h>
   57 
   58 /*
   59  * Locks used to protect struct members in this file:
   60  *      T       tcp_timer_mtx           global tcp timer data structures
   61  */
   62 
   63 int     tcp_always_keepalive;
   64 int     tcp_keepidle;
   65 int     tcp_keepintvl;
   66 int     tcp_maxpersistidle;     /* max idle time in persist */
   67 int     tcp_maxidle;            /* [T] max idle time for keep alive */
   68 
   69 /*
   70  * Time to delay the ACK.  This is initialized in tcp_init(), unless
   71  * its patched.
   72  */
   73 int     tcp_delack_msecs;
   74 
   75 void    tcp_timer_rexmt(void *);
   76 void    tcp_timer_persist(void *);
   77 void    tcp_timer_keep(void *);
   78 void    tcp_timer_2msl(void *);
   79 void    tcp_timer_reaper(void *);
   80 void    tcp_timer_delack(void *);
   81 
   82 const tcp_timer_func_t tcp_timer_funcs[TCPT_NTIMERS] = {
   83         tcp_timer_rexmt,
   84         tcp_timer_persist,
   85         tcp_timer_keep,
   86         tcp_timer_2msl,
   87         tcp_timer_reaper,
   88         tcp_timer_delack,
   89 };
   90 
   91 /*
   92  * Timer state initialization, called from tcp_init().
   93  */
   94 void
   95 tcp_timer_init(void)
   96 {
   97 
   98         if (tcp_keepidle == 0)
   99                 tcp_keepidle = TCPTV_KEEP_IDLE;
  100 
  101         if (tcp_keepintvl == 0)
  102                 tcp_keepintvl = TCPTV_KEEPINTVL;
  103 
  104         if (tcp_maxpersistidle == 0)
  105                 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
  106 
  107         if (tcp_delack_msecs == 0)
  108                 tcp_delack_msecs = TCP_DELACK_MSECS;
  109 }
  110 
  111 /*
  112  * Callout to process delayed ACKs for a TCPCB.
  113  */
  114 void
  115 tcp_timer_delack(void *arg)
  116 {
  117         struct tcpcb *otp = NULL, *tp = arg;
  118         short ostate;
  119 
  120         /*
  121          * If tcp_output() wasn't able to transmit the ACK
  122          * for whatever reason, it will restart the delayed
  123          * ACK callout.
  124          */
  125         NET_LOCK();
  126         /* Ignore canceled timeouts or timeouts that have been rescheduled. */
  127         if (!ISSET((tp)->t_flags, TF_TMR_DELACK) ||
  128             timeout_pending(&tp->t_timer[TCPT_DELACK]))
  129                 goto out;
  130         CLR((tp)->t_flags, TF_TMR_DELACK);
  131 
  132         if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) {
  133                 otp = tp;
  134                 ostate = tp->t_state;
  135         }
  136         tp->t_flags |= TF_ACKNOW;
  137         (void) tcp_output(tp);
  138         if (otp)
  139                 tcp_trace(TA_TIMER, ostate, tp, otp, NULL, TCPT_DELACK, 0);
  140  out:
  141         NET_UNLOCK();
  142 }
  143 
  144 /*
  145  * Tcp protocol timeout routine called every 500 ms.
  146  * Updates the timers in all active tcb's and
  147  * causes finite state machine actions if timers expire.
  148  */
  149 void
  150 tcp_slowtimo(void)
  151 {
  152         mtx_enter(&tcp_timer_mtx);
  153         tcp_maxidle = TCPTV_KEEPCNT * tcp_keepintvl;
  154         tcp_iss += TCP_ISSINCR2/PR_SLOWHZ;              /* increment iss */
  155         mtx_leave(&tcp_timer_mtx);
  156 }
  157 
  158 /*
  159  * Cancel all timers for TCP tp.
  160  */
  161 void
  162 tcp_canceltimers(struct tcpcb *tp)
  163 {
  164         int i;
  165 
  166         for (i = 0; i < TCPT_NTIMERS; i++)
  167                 TCP_TIMER_DISARM(tp, i);
  168 }
  169 
  170 int     tcp_backoff[TCP_MAXRXTSHIFT + 1] =
  171     { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
  172 
  173 int tcp_totbackoff = 511;       /* sum of tcp_backoff[] */
  174 
  175 /*
  176  * TCP timer processing.
  177  */
  178 
  179 void    tcp_timer_freesack(struct tcpcb *);
  180 
  181 void
  182 tcp_timer_freesack(struct tcpcb *tp)
  183 {
  184         struct sackhole *p, *q;
  185         /*
  186          * Free SACK holes for 2MSL and REXMT timers.
  187          */
  188         q = tp->snd_holes;
  189         while (q != NULL) {
  190                 p = q;
  191                 q = q->next;
  192                 pool_put(&sackhl_pool, p);
  193         }
  194         tp->snd_holes = 0;
  195 }
  196 
  197 void
  198 tcp_timer_rexmt(void *arg)
  199 {
  200         struct tcpcb *otp = NULL, *tp = arg;
  201         uint32_t rto;
  202         short ostate;
  203 
  204         NET_LOCK();
  205         /* Ignore canceled timeouts or timeouts that have been rescheduled. */
  206         if (!ISSET((tp)->t_flags, TF_TMR_REXMT) ||
  207             timeout_pending(&tp->t_timer[TCPT_REXMT]))
  208                 goto out;
  209         CLR((tp)->t_flags, TF_TMR_REXMT);
  210 
  211         if ((tp->t_flags & TF_PMTUD_PEND) && tp->t_inpcb &&
  212             SEQ_GEQ(tp->t_pmtud_th_seq, tp->snd_una) &&
  213             SEQ_LT(tp->t_pmtud_th_seq, (int)(tp->snd_una + tp->t_maxseg))) {
  214                 struct sockaddr_in sin;
  215                 struct icmp icmp;
  216 
  217                 tp->t_flags &= ~TF_PMTUD_PEND;
  218 
  219                 /* XXX create fake icmp message with relevant entries */
  220                 icmp.icmp_nextmtu = tp->t_pmtud_nextmtu;
  221                 icmp.icmp_ip.ip_len = tp->t_pmtud_ip_len;
  222                 icmp.icmp_ip.ip_hl = tp->t_pmtud_ip_hl;
  223                 icmp.icmp_ip.ip_dst = tp->t_inpcb->inp_faddr;
  224                 icmp_mtudisc(&icmp, tp->t_inpcb->inp_rtableid);
  225 
  226                 /*
  227                  * Notify all connections to the same peer about
  228                  * new mss and trigger retransmit.
  229                  */
  230                 bzero(&sin, sizeof(sin));
  231                 sin.sin_len = sizeof(sin);
  232                 sin.sin_family = AF_INET;
  233                 sin.sin_addr = tp->t_inpcb->inp_faddr;
  234                 in_pcbnotifyall(&tcbtable, sintosa(&sin),
  235                     tp->t_inpcb->inp_rtableid, EMSGSIZE, tcp_mtudisc);
  236                 goto out;
  237         }
  238 
  239         tcp_timer_freesack(tp);
  240         if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
  241                 tp->t_rxtshift = TCP_MAXRXTSHIFT;
  242                 tcpstat_inc(tcps_timeoutdrop);
  243                 tp = tcp_drop(tp, tp->t_softerror ?
  244                     tp->t_softerror : ETIMEDOUT);
  245                 goto out;
  246         }
  247         if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) {
  248                 otp = tp;
  249                 ostate = tp->t_state;
  250         }
  251         tcpstat_inc(tcps_rexmttimeo);
  252         rto = TCP_REXMTVAL(tp);
  253         if (rto < tp->t_rttmin)
  254                 rto = tp->t_rttmin;
  255         TCPT_RANGESET(tp->t_rxtcur,
  256             rto * tcp_backoff[tp->t_rxtshift],
  257             tp->t_rttmin, TCPTV_REXMTMAX);
  258         TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur);
  259 
  260         /*
  261          * If we are losing and we are trying path MTU discovery,
  262          * try turning it off.  This will avoid black holes in
  263          * the network which suppress or fail to send "packet
  264          * too big" ICMP messages.  We should ideally do
  265          * lots more sophisticated searching to find the right
  266          * value here...
  267          */
  268         if (ip_mtudisc && tp->t_inpcb &&
  269             TCPS_HAVEESTABLISHED(tp->t_state) &&
  270             tp->t_rxtshift > TCP_MAXRXTSHIFT / 6) {
  271                 struct inpcb *inp = tp->t_inpcb;
  272                 struct rtentry *rt = NULL;
  273 
  274                 /* No data to send means path mtu is not a problem */
  275                 if (!inp->inp_socket->so_snd.sb_cc)
  276                         goto leave;
  277 
  278                 rt = in_pcbrtentry(inp);
  279                 /* Check if path MTU discovery is disabled already */
  280                 if (rt && (rt->rt_flags & RTF_HOST) &&
  281                     (rt->rt_locks & RTV_MTU))
  282                         goto leave;
  283 
  284                 rt = NULL;
  285                 switch(tp->pf) {
  286 #ifdef INET6
  287                 case PF_INET6:
  288                         /*
  289                          * We can not turn off path MTU for IPv6.
  290                          * Do nothing for now, maybe lower to
  291                          * minimum MTU.
  292                          */
  293                         break;
  294 #endif
  295                 case PF_INET:
  296                         rt = icmp_mtudisc_clone(inp->inp_faddr,
  297                             inp->inp_rtableid, 0);
  298                         break;
  299                 }
  300                 if (rt != NULL) {
  301                         /* Disable path MTU discovery */
  302                         if ((rt->rt_locks & RTV_MTU) == 0) {
  303                                 rt->rt_locks |= RTV_MTU;
  304                                 in_rtchange(inp, 0);
  305                         }
  306 
  307                         rtfree(rt);
  308                 }
  309         leave:
  310                 ;
  311         }
  312 
  313         /*
  314          * If losing, let the lower level know and try for
  315          * a better route.  Also, if we backed off this far,
  316          * our srtt estimate is probably bogus.  Clobber it
  317          * so we'll take the next rtt measurement as our srtt;
  318          * move the current srtt into rttvar to keep the current
  319          * retransmit times until then.
  320          */
  321         if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
  322                 in_losing(tp->t_inpcb);
  323                 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
  324                 tp->t_srtt = 0;
  325         }
  326         tp->snd_nxt = tp->snd_una;
  327         /*
  328          * Note:  We overload snd_last to function also as the
  329          * snd_last variable described in RFC 2582
  330          */
  331         tp->snd_last = tp->snd_max;
  332         /*
  333          * If timing a segment in this window, stop the timer.
  334          */
  335         tp->t_rtttime = 0;
  336 #ifdef TCP_ECN
  337         /*
  338          * if ECN is enabled, there might be a broken firewall which
  339          * blocks ecn packets.  fall back to non-ecn.
  340          */
  341         if ((tp->t_state == TCPS_SYN_SENT || tp->t_state == TCPS_SYN_RECEIVED)
  342             && tcp_do_ecn && !(tp->t_flags & TF_DISABLE_ECN))
  343                 tp->t_flags |= TF_DISABLE_ECN;
  344 #endif
  345         /*
  346          * Close the congestion window down to one segment
  347          * (we'll open it by one segment for each ack we get).
  348          * Since we probably have a window's worth of unacked
  349          * data accumulated, this "slow start" keeps us from
  350          * dumping all that data as back-to-back packets (which
  351          * might overwhelm an intermediate gateway).
  352          *
  353          * There are two phases to the opening: Initially we
  354          * open by one mss on each ack.  This makes the window
  355          * size increase exponentially with time.  If the
  356          * window is larger than the path can handle, this
  357          * exponential growth results in dropped packet(s)
  358          * almost immediately.  To get more time between
  359          * drops but still "push" the network to take advantage
  360          * of improving conditions, we switch from exponential
  361          * to linear window opening at some threshold size.
  362          * For a threshold, we use half the current window
  363          * size, truncated to a multiple of the mss.
  364          *
  365          * (the minimum cwnd that will give us exponential
  366          * growth is 2 mss.  We don't allow the threshold
  367          * to go below this.)
  368          */
  369         {
  370                 u_long win = ulmin(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
  371                 if (win < 2)
  372                         win = 2;
  373                 tp->snd_cwnd = tp->t_maxseg;
  374                 tp->snd_ssthresh = win * tp->t_maxseg;
  375                 tp->t_dupacks = 0;
  376 #ifdef TCP_ECN
  377                 tp->snd_last = tp->snd_max;
  378                 tp->t_flags |= TF_SEND_CWR;
  379 #endif
  380 #if 1 /* TCP_ECN */
  381                 tcpstat_inc(tcps_cwr_timeout);
  382 #endif
  383         }
  384         (void) tcp_output(tp);
  385         if (otp)
  386                 tcp_trace(TA_TIMER, ostate, tp, otp, NULL, TCPT_REXMT, 0);
  387  out:
  388         NET_UNLOCK();
  389 }
  390 
  391 void
  392 tcp_timer_persist(void *arg)
  393 {
  394         struct tcpcb *otp = NULL, *tp = arg;
  395         uint32_t rto;
  396         short ostate;
  397         uint32_t now;
  398 
  399         NET_LOCK();
  400         /* Ignore canceled timeouts or timeouts that have been rescheduled. */
  401         if (!ISSET((tp)->t_flags, TF_TMR_PERSIST) ||
  402             timeout_pending(&tp->t_timer[TCPT_PERSIST]))
  403                 goto out;
  404         CLR((tp)->t_flags, TF_TMR_PERSIST);
  405 
  406         if (TCP_TIMER_ISARMED(tp, TCPT_REXMT))
  407                 goto out;
  408 
  409         if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) {
  410                 otp = tp;
  411                 ostate = tp->t_state;
  412         }
  413         tcpstat_inc(tcps_persisttimeo);
  414         /*
  415          * Hack: if the peer is dead/unreachable, we do not
  416          * time out if the window is closed.  After a full
  417          * backoff, drop the connection if the idle time
  418          * (no responses to probes) reaches the maximum
  419          * backoff that we would use if retransmitting.
  420          */
  421         rto = TCP_REXMTVAL(tp);
  422         if (rto < tp->t_rttmin)
  423                 rto = tp->t_rttmin;
  424         now = tcp_now();
  425         if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
  426             ((now - tp->t_rcvtime) >= TCP_TIME(tcp_maxpersistidle) ||
  427             (now - tp->t_rcvtime) >= rto * tcp_totbackoff)) {
  428                 tcpstat_inc(tcps_persistdrop);
  429                 tp = tcp_drop(tp, ETIMEDOUT);
  430                 goto out;
  431         }
  432         tcp_setpersist(tp);
  433         tp->t_force = 1;
  434         (void) tcp_output(tp);
  435         tp->t_force = 0;
  436         if (otp)
  437                 tcp_trace(TA_TIMER, ostate, tp, otp, NULL, TCPT_PERSIST, 0);
  438  out:
  439         NET_UNLOCK();
  440 }
  441 
  442 void
  443 tcp_timer_keep(void *arg)
  444 {
  445         struct tcpcb *otp = NULL, *tp = arg;
  446         short ostate;
  447 
  448         NET_LOCK();
  449         /* Ignore canceled timeouts or timeouts that have been rescheduled. */
  450         if (!ISSET((tp)->t_flags, TF_TMR_KEEP) ||
  451             timeout_pending(&tp->t_timer[TCPT_KEEP]))
  452                 goto out;
  453         CLR((tp)->t_flags, TF_TMR_KEEP);
  454 
  455         if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) {
  456                 otp = tp;
  457                 ostate = tp->t_state;
  458         }
  459         tcpstat_inc(tcps_keeptimeo);
  460         if (TCPS_HAVEESTABLISHED(tp->t_state) == 0)
  461                 goto dropit;
  462         if ((tcp_always_keepalive ||
  463             tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) &&
  464             tp->t_state <= TCPS_CLOSING) {
  465                 int maxidle;
  466                 uint32_t now;
  467 
  468                 maxidle = READ_ONCE(tcp_maxidle);
  469                 now = tcp_now();
  470                 if ((maxidle > 0) && ((now - tp->t_rcvtime) >=
  471                     TCP_TIME(tcp_keepidle + maxidle)))
  472                         goto dropit;
  473                 /*
  474                  * Send a packet designed to force a response
  475                  * if the peer is up and reachable:
  476                  * either an ACK if the connection is still alive,
  477                  * or an RST if the peer has closed the connection
  478                  * due to timeout or reboot.
  479                  * Using sequence number tp->snd_una-1
  480                  * causes the transmitted zero-length segment
  481                  * to lie outside the receive window;
  482                  * by the protocol spec, this requires the
  483                  * correspondent TCP to respond.
  484                  */
  485                 tcpstat_inc(tcps_keepprobe);
  486                 tcp_respond(tp, mtod(tp->t_template, caddr_t),
  487                     NULL, tp->rcv_nxt, tp->snd_una - 1, 0, 0, now);
  488                 TCP_TIMER_ARM(tp, TCPT_KEEP, TCP_TIME(tcp_keepintvl));
  489         } else
  490                 TCP_TIMER_ARM(tp, TCPT_KEEP, TCP_TIME(tcp_keepidle));
  491         if (otp)
  492                 tcp_trace(TA_TIMER, ostate, tp, otp, NULL, TCPT_KEEP, 0);
  493  out:
  494         NET_UNLOCK();
  495         return;
  496 
  497  dropit:
  498         tcpstat_inc(tcps_keepdrops);
  499         tp = tcp_drop(tp, ETIMEDOUT);
  500         NET_UNLOCK();
  501 }
  502 
  503 void
  504 tcp_timer_2msl(void *arg)
  505 {
  506         struct tcpcb *otp = NULL, *tp = arg;
  507         short ostate;
  508         int maxidle;
  509         uint32_t now;
  510 
  511         NET_LOCK();
  512         /* Ignore canceled timeouts or timeouts that have been rescheduled. */
  513         if (!ISSET((tp)->t_flags, TF_TMR_2MSL) ||
  514             timeout_pending(&tp->t_timer[TCPT_2MSL]))
  515                 goto out;
  516         CLR((tp)->t_flags, TF_TMR_2MSL);
  517 
  518         if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) {
  519                 otp = tp;
  520                 ostate = tp->t_state;
  521         }
  522         tcp_timer_freesack(tp);
  523 
  524         maxidle = READ_ONCE(tcp_maxidle);
  525         now = tcp_now();
  526         if (tp->t_state != TCPS_TIME_WAIT &&
  527             ((maxidle == 0) || ((now - tp->t_rcvtime) <= TCP_TIME(maxidle))))
  528                 TCP_TIMER_ARM(tp, TCPT_2MSL, TCP_TIME(tcp_keepintvl));
  529         else
  530                 tp = tcp_close(tp);
  531         if (otp)
  532                 tcp_trace(TA_TIMER, ostate, tp, otp, NULL, TCPT_2MSL, 0);
  533  out:
  534         NET_UNLOCK();
  535 }
  536 
  537 void
  538 tcp_timer_reaper(void *arg)
  539 {
  540         struct tcpcb *tp = arg;
  541 
  542         /*
  543          * This timer is necessary to delay the pool_put() after all timers
  544          * have finished, even if they were sleeping to grab the net lock.
  545          * Putting the pool_put() in a timer is sufficient as all timers run
  546          * from the same timeout thread.  Note that neither softnet thread nor
  547          * user process may access the tcpcb after arming the reaper timer.
  548          * Freeing may run in parallel as it does not grab the net lock.
  549          */
  550         pool_put(&tcpcb_pool, tp);
  551         tcpstat_inc(tcps_closed);
  552 }

Cache object: ed60f4663b4e164f68f3fa60a77dbab1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.