The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/tcp_sack.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
    5  *      The Regents of the University of California.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)tcp_sack.c  8.12 (Berkeley) 5/24/95
   33  */
   34 
   35 /*-
   36  *      @@(#)COPYRIGHT  1.1 (NRL) 17 January 1995
   37  *
   38  * NRL grants permission for redistribution and use in source and binary
   39  * forms, with or without modification, of the software and documentation
   40  * created at NRL provided that the following conditions are met:
   41  *
   42  * 1. Redistributions of source code must retain the above copyright
   43  *    notice, this list of conditions and the following disclaimer.
   44  * 2. Redistributions in binary form must reproduce the above copyright
   45  *    notice, this list of conditions and the following disclaimer in the
   46  *    documentation and/or other materials provided with the distribution.
   47  * 3. All advertising materials mentioning features or use of this software
   48  *    must display the following acknowledgements:
   49  *      This product includes software developed by the University of
   50  *      California, Berkeley and its contributors.
   51  *      This product includes software developed at the Information
   52  *      Technology Division, US Naval Research Laboratory.
   53  * 4. Neither the name of the NRL nor the names of its contributors
   54  *    may be used to endorse or promote products derived from this software
   55  *    without specific prior written permission.
   56  *
   57  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
   58  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   59  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
   60  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
   61  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
   62  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   63  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
   64  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
   65  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
   66  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
   67  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   68  *
   69  * The views and conclusions contained in the software and documentation
   70  * are those of the authors and should not be interpreted as representing
   71  * official policies, either expressed or implied, of the US Naval
   72  * Research Laboratory (NRL).
   73  */
   74 
   75 #include <sys/cdefs.h>
   76 __FBSDID("$FreeBSD$");
   77 
   78 #include "opt_inet.h"
   79 #include "opt_inet6.h"
   80 #include "opt_tcpdebug.h"
   81 
   82 #include <sys/param.h>
   83 #include <sys/systm.h>
   84 #include <sys/kernel.h>
   85 #include <sys/sysctl.h>
   86 #include <sys/malloc.h>
   87 #include <sys/mbuf.h>
   88 #include <sys/proc.h>           /* for proc0 declaration */
   89 #include <sys/protosw.h>
   90 #include <sys/socket.h>
   91 #include <sys/socketvar.h>
   92 #include <sys/syslog.h>
   93 #include <sys/systm.h>
   94 
   95 #include <machine/cpu.h>        /* before tcp_seq.h, for tcp_random18() */
   96 
   97 #include <vm/uma.h>
   98 
   99 #include <net/if.h>
  100 #include <net/if_var.h>
  101 #include <net/route.h>
  102 #include <net/vnet.h>
  103 
  104 #include <netinet/in.h>
  105 #include <netinet/in_systm.h>
  106 #include <netinet/ip.h>
  107 #include <netinet/in_var.h>
  108 #include <netinet/in_pcb.h>
  109 #include <netinet/ip_var.h>
  110 #include <netinet/ip6.h>
  111 #include <netinet/icmp6.h>
  112 #include <netinet6/nd6.h>
  113 #include <netinet6/ip6_var.h>
  114 #include <netinet6/in6_pcb.h>
  115 #include <netinet/tcp.h>
  116 #include <netinet/tcp_fsm.h>
  117 #include <netinet/tcp_seq.h>
  118 #include <netinet/tcp_timer.h>
  119 #include <netinet/tcp_var.h>
  120 #include <netinet6/tcp6_var.h>
  121 #include <netinet/tcpip.h>
  122 #ifdef TCPDEBUG
  123 #include <netinet/tcp_debug.h>
  124 #endif /* TCPDEBUG */
  125 
  126 #include <machine/in_cksum.h>
  127 
  128 VNET_DECLARE(struct uma_zone *, sack_hole_zone);
  129 #define V_sack_hole_zone                VNET(sack_hole_zone)
  130 
  131 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 0, "TCP SACK");
  132 VNET_DEFINE(int, tcp_do_sack) = 1;
  133 #define V_tcp_do_sack                   VNET(tcp_do_sack)
  134 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
  135     &VNET_NAME(tcp_do_sack), 0, "Enable/Disable TCP SACK support");
  136 
  137 VNET_DEFINE(int, tcp_sack_maxholes) = 128;
  138 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, maxholes, CTLFLAG_VNET | CTLFLAG_RW,
  139     &VNET_NAME(tcp_sack_maxholes), 0,
  140     "Maximum number of TCP SACK holes allowed per connection");
  141 
  142 VNET_DEFINE(int, tcp_sack_globalmaxholes) = 65536;
  143 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalmaxholes, CTLFLAG_VNET | CTLFLAG_RW,
  144     &VNET_NAME(tcp_sack_globalmaxholes), 0, 
  145     "Global maximum number of TCP SACK holes");
  146 
  147 VNET_DEFINE(int, tcp_sack_globalholes) = 0;
  148 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalholes, CTLFLAG_VNET | CTLFLAG_RD,
  149     &VNET_NAME(tcp_sack_globalholes), 0,
  150     "Global number of TCP SACK holes currently allocated");
  151 
  152 
  153 /*
  154  * This function will find overlaps with the currently stored sackblocks
  155  * and add any overlap as a dsack block upfront
  156  */
  157 void
  158 tcp_update_dsack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
  159 {
  160         struct sackblk head_blk,mid_blk,saved_blks[MAX_SACK_BLKS];
  161         int i, j, n, identical;
  162         tcp_seq start, end;
  163 
  164         INP_WLOCK_ASSERT(tp->t_inpcb);
  165 
  166         KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end"));
  167 
  168         if (SEQ_LT(rcv_end, tp->rcv_nxt) ||
  169             ((rcv_end == tp->rcv_nxt) &&
  170              (tp->rcv_numsacks > 0 ) &&
  171              (tp->sackblks[0].end == tp->rcv_nxt))) {
  172                 saved_blks[0].start = rcv_start;
  173                 saved_blks[0].end = rcv_end;
  174         } else {
  175                 saved_blks[0].start = saved_blks[0].end = 0;
  176         }
  177 
  178         head_blk.start = head_blk.end = 0;
  179         mid_blk.start = rcv_start;
  180         mid_blk.end = rcv_end;
  181         identical = 0;
  182 
  183         for (i = 0; i < tp->rcv_numsacks; i++) {
  184                 start = tp->sackblks[i].start;
  185                 end = tp->sackblks[i].end;
  186                 if (SEQ_LT(rcv_end, start)) {
  187                         /* pkt left to sack blk */
  188                         continue;
  189                 }
  190                 if (SEQ_GT(rcv_start, end)) {
  191                         /* pkt right to sack blk */
  192                         continue;
  193                 }
  194                 if (SEQ_GT(tp->rcv_nxt, end)) {
  195                         if ((SEQ_MAX(rcv_start, start) != SEQ_MIN(rcv_end, end)) &&
  196                             (SEQ_GT(head_blk.start, SEQ_MAX(rcv_start, start)) ||
  197                             (head_blk.start == head_blk.end))) {
  198                                 head_blk.start = SEQ_MAX(rcv_start, start);
  199                                 head_blk.end = SEQ_MIN(rcv_end, end);
  200                         }
  201                         continue;
  202                 }
  203                 if (((head_blk.start == head_blk.end) ||
  204                      SEQ_LT(start, head_blk.start)) &&
  205                      (SEQ_GT(end, rcv_start) &&
  206                       SEQ_LEQ(start, rcv_end))) {
  207                         head_blk.start = start;
  208                         head_blk.end = end;
  209                 }
  210                 mid_blk.start = SEQ_MIN(mid_blk.start, start);
  211                 mid_blk.end = SEQ_MAX(mid_blk.end, end);
  212                 if ((mid_blk.start == start) &&
  213                     (mid_blk.end == end))
  214                         identical = 1;
  215         }
  216         if (SEQ_LT(head_blk.start, head_blk.end)) {
  217                 /* store overlapping range */
  218                 saved_blks[0].start = SEQ_MAX(rcv_start, head_blk.start);
  219                 saved_blks[0].end   = SEQ_MIN(rcv_end, head_blk.end);
  220         }
  221         n = 1;
  222         /*
  223          * Second, if not ACKed, store the SACK block that
  224          * overlaps with the DSACK block unless it is identical
  225          */
  226         if ((SEQ_LT(tp->rcv_nxt, mid_blk.end) &&
  227             !((mid_blk.start == saved_blks[0].start) &&
  228             (mid_blk.end == saved_blks[0].end))) ||
  229             identical == 1) {
  230                 saved_blks[n].start = mid_blk.start;
  231                 saved_blks[n++].end = mid_blk.end;
  232         }
  233         for (j = 0; (j < tp->rcv_numsacks) && (n < MAX_SACK_BLKS); j++) {
  234                 if (((SEQ_LT(tp->sackblks[j].end, mid_blk.start) ||
  235                       SEQ_GT(tp->sackblks[j].start, mid_blk.end)) &&
  236                     (SEQ_GT(tp->sackblks[j].start, tp->rcv_nxt))))
  237                 saved_blks[n++] = tp->sackblks[j];
  238         }
  239         j = 0;
  240         for (i = 0; i < n; i++) {
  241                 /* we can end up with a stale inital entry */
  242                 if (SEQ_LT(saved_blks[i].start, saved_blks[i].end)) {
  243                         tp->sackblks[j++] = saved_blks[i];
  244                 }
  245         }
  246         tp->rcv_numsacks = j;
  247 }
  248 
  249 /*
  250  * This function is called upon receipt of new valid data (while not in
  251  * header prediction mode), and it updates the ordered list of sacks.
  252  */
  253 void
  254 tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
  255 {
  256         /*
  257          * First reported block MUST be the most recent one.  Subsequent
  258          * blocks SHOULD be in the order in which they arrived at the
  259          * receiver.  These two conditions make the implementation fully
  260          * compliant with RFC 2018.
  261          */
  262         struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
  263         int num_head, num_saved, i;
  264 
  265         INP_WLOCK_ASSERT(tp->t_inpcb);
  266 
  267         /* Check arguments. */
  268         KASSERT(SEQ_LEQ(rcv_start, rcv_end), ("rcv_start <= rcv_end"));
  269 
  270         if ((rcv_start == rcv_end) &&
  271             (tp->rcv_numsacks >= 1) &&
  272             (rcv_end == tp->sackblks[0].end)) {
  273                 /* retaining DSACK block below rcv_nxt (todrop) */
  274                 head_blk = tp->sackblks[0];
  275         } else {
  276                 /* SACK block for the received segment. */
  277                 head_blk.start = rcv_start;
  278                 head_blk.end = rcv_end;
  279         }
  280 
  281         /*
  282          * Merge updated SACK blocks into head_blk, and save unchanged SACK
  283          * blocks into saved_blks[].  num_saved will have the number of the
  284          * saved SACK blocks.
  285          */
  286         num_saved = 0;
  287         for (i = 0; i < tp->rcv_numsacks; i++) {
  288                 tcp_seq start = tp->sackblks[i].start;
  289                 tcp_seq end = tp->sackblks[i].end;
  290                 if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
  291                         /*
  292                          * Discard this SACK block.
  293                          */
  294                 } else if (SEQ_LEQ(head_blk.start, end) &&
  295                            SEQ_GEQ(head_blk.end, start)) {
  296                         /*
  297                          * Merge this SACK block into head_blk.  This SACK
  298                          * block itself will be discarded.
  299                          */
  300                         /*
  301                          * |-|
  302                          *   |---|  merge
  303                          *
  304                          *     |-|
  305                          * |---|    merge
  306                          *
  307                          * |-----|
  308                          *   |-|    DSACK smaller
  309                          *
  310                          *   |-|
  311                          * |-----|  DSACK smaller
  312                          */
  313                         if (head_blk.start == end)
  314                                 head_blk.start = start;
  315                         else if (head_blk.end == start)
  316                                 head_blk.end = end;
  317                         else {
  318                                 if (SEQ_LT(head_blk.start, start)) {
  319                                         tcp_seq temp = start;
  320                                         start = head_blk.start;
  321                                         head_blk.start = temp;
  322                                 }
  323                                 if (SEQ_GT(head_blk.end, end)) {
  324                                         tcp_seq temp = end;
  325                                         end = head_blk.end;
  326                                         head_blk.end = temp;
  327                                 }
  328                                 if ((head_blk.start != start) ||
  329                                     (head_blk.end != end)) {
  330                                         if ((num_saved >= 1) &&
  331                                            SEQ_GEQ(saved_blks[num_saved-1].start, start) &&
  332                                            SEQ_LEQ(saved_blks[num_saved-1].end, end))
  333                                                 num_saved--;
  334                                         saved_blks[num_saved].start = start;
  335                                         saved_blks[num_saved].end = end;
  336                                         num_saved++;
  337                                 }
  338                         }
  339                 } else {
  340                         /*
  341                          * This block supercedes the prior block
  342                          */
  343                         if ((num_saved >= 1) &&
  344                            SEQ_GEQ(saved_blks[num_saved-1].start, start) &&
  345                            SEQ_LEQ(saved_blks[num_saved-1].end, end))
  346                                 num_saved--;
  347                         /*
  348                          * Save this SACK block.
  349                          */
  350                         saved_blks[num_saved].start = start;
  351                         saved_blks[num_saved].end = end;
  352                         num_saved++;
  353                 }
  354         }
  355 
  356         /*
  357          * Update SACK list in tp->sackblks[].
  358          */
  359         num_head = 0;
  360         if (SEQ_LT(rcv_start, rcv_end)) {
  361                 /*
  362                  * The received data segment is an out-of-order segment.  Put
  363                  * head_blk at the top of SACK list.
  364                  */
  365                 tp->sackblks[0] = head_blk;
  366                 num_head = 1;
  367                 /*
  368                  * If the number of saved SACK blocks exceeds its limit,
  369                  * discard the last SACK block.
  370                  */
  371                 if (num_saved >= MAX_SACK_BLKS)
  372                         num_saved--;
  373         }
  374         if ((rcv_start == rcv_end) &&
  375             (rcv_start == tp->sackblks[0].end)) {
  376                 num_head = 1;
  377         }
  378         if (num_saved > 0) {
  379                 /*
  380                  * Copy the saved SACK blocks back.
  381                  */
  382                 bcopy(saved_blks, &tp->sackblks[num_head],
  383                       sizeof(struct sackblk) * num_saved);
  384         }
  385 
  386         /* Save the number of SACK blocks. */
  387         tp->rcv_numsacks = num_head + num_saved;
  388 }
  389 
  390 void
  391 tcp_clean_dsack_blocks(struct tcpcb *tp)
  392 {
  393         struct sackblk saved_blks[MAX_SACK_BLKS];
  394         int num_saved, i;
  395 
  396         INP_WLOCK_ASSERT(tp->t_inpcb);
  397         /*
  398          * Clean up any DSACK blocks that
  399          * are in our queue of sack blocks.
  400          * 
  401          */
  402         num_saved = 0;
  403         for (i = 0; i < tp->rcv_numsacks; i++) {
  404                 tcp_seq start = tp->sackblks[i].start;
  405                 tcp_seq end = tp->sackblks[i].end;
  406                 if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
  407                         /*
  408                          * Discard this D-SACK block.
  409                          */
  410                         continue;
  411                 }
  412                 /*
  413                  * Save this SACK block.
  414                  */
  415                 saved_blks[num_saved].start = start;
  416                 saved_blks[num_saved].end = end;
  417                 num_saved++;
  418         }
  419         if (num_saved > 0) {
  420                 /*
  421                  * Copy the saved SACK blocks back.
  422                  */
  423                 bcopy(saved_blks, &tp->sackblks[0],
  424                       sizeof(struct sackblk) * num_saved);
  425         }
  426         tp->rcv_numsacks = num_saved;
  427 }
  428 
  429 /*
  430  * Delete all receiver-side SACK information.
  431  */
  432 void
  433 tcp_clean_sackreport(struct tcpcb *tp)
  434 {
  435         int i;
  436 
  437         INP_WLOCK_ASSERT(tp->t_inpcb);
  438         tp->rcv_numsacks = 0;
  439         for (i = 0; i < MAX_SACK_BLKS; i++)
  440                 tp->sackblks[i].start = tp->sackblks[i].end=0;
  441 }
  442 
  443 /*
  444  * Allocate struct sackhole.
  445  */
  446 static struct sackhole *
  447 tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end)
  448 {
  449         struct sackhole *hole;
  450 
  451         if (tp->snd_numholes >= V_tcp_sack_maxholes ||
  452             V_tcp_sack_globalholes >= V_tcp_sack_globalmaxholes) {
  453                 TCPSTAT_INC(tcps_sack_sboverflow);
  454                 return NULL;
  455         }
  456 
  457         hole = (struct sackhole *)uma_zalloc(V_sack_hole_zone, M_NOWAIT);
  458         if (hole == NULL)
  459                 return NULL;
  460 
  461         hole->start = start;
  462         hole->end = end;
  463         hole->rxmit = start;
  464 
  465         tp->snd_numholes++;
  466         atomic_add_int(&V_tcp_sack_globalholes, 1);
  467 
  468         return hole;
  469 }
  470 
  471 /*
  472  * Free struct sackhole.
  473  */
  474 static void
  475 tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole)
  476 {
  477 
  478         uma_zfree(V_sack_hole_zone, hole);
  479 
  480         tp->snd_numholes--;
  481         atomic_subtract_int(&V_tcp_sack_globalholes, 1);
  482 
  483         KASSERT(tp->snd_numholes >= 0, ("tp->snd_numholes >= 0"));
  484         KASSERT(V_tcp_sack_globalholes >= 0, ("tcp_sack_globalholes >= 0"));
  485 }
  486 
  487 /*
  488  * Insert new SACK hole into scoreboard.
  489  */
  490 static struct sackhole *
  491 tcp_sackhole_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end,
  492     struct sackhole *after)
  493 {
  494         struct sackhole *hole;
  495 
  496         /* Allocate a new SACK hole. */
  497         hole = tcp_sackhole_alloc(tp, start, end);
  498         if (hole == NULL)
  499                 return NULL;
  500 
  501         /* Insert the new SACK hole into scoreboard. */
  502         if (after != NULL)
  503                 TAILQ_INSERT_AFTER(&tp->snd_holes, after, hole, scblink);
  504         else
  505                 TAILQ_INSERT_TAIL(&tp->snd_holes, hole, scblink);
  506 
  507         /* Update SACK hint. */
  508         if (tp->sackhint.nexthole == NULL)
  509                 tp->sackhint.nexthole = hole;
  510 
  511         return hole;
  512 }
  513 
  514 /*
  515  * Remove SACK hole from scoreboard.
  516  */
  517 static void
  518 tcp_sackhole_remove(struct tcpcb *tp, struct sackhole *hole)
  519 {
  520 
  521         /* Update SACK hint. */
  522         if (tp->sackhint.nexthole == hole)
  523                 tp->sackhint.nexthole = TAILQ_NEXT(hole, scblink);
  524 
  525         /* Remove this SACK hole. */
  526         TAILQ_REMOVE(&tp->snd_holes, hole, scblink);
  527 
  528         /* Free this SACK hole. */
  529         tcp_sackhole_free(tp, hole);
  530 }
  531 
  532 /*
  533  * Process cumulative ACK and the TCP SACK option to update the scoreboard.
  534  * tp->snd_holes is an ordered list of holes (oldest to newest, in terms of
  535  * the sequence space).
  536  * Returns 1 if incoming ACK has previously unknown SACK information,
  537  * 0 otherwise.
  538  */
  539 int
  540 tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, tcp_seq th_ack)
  541 {
  542         struct sackhole *cur, *temp;
  543         struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp;
  544         int i, j, num_sack_blks, sack_changed;
  545         int delivered_data, left_edge_delta;
  546 
  547         INP_WLOCK_ASSERT(tp->t_inpcb);
  548 
  549         num_sack_blks = 0;
  550         sack_changed = 0;
  551         delivered_data = 0;
  552         left_edge_delta = 0;
  553         /*
  554          * If SND.UNA will be advanced by SEG.ACK, and if SACK holes exist,
  555          * treat [SND.UNA, SEG.ACK) as if it is a SACK block.
  556          * Account changes to SND.UNA always in delivered data.
  557          */
  558         if (SEQ_LT(tp->snd_una, th_ack) && !TAILQ_EMPTY(&tp->snd_holes)) {
  559                 left_edge_delta = th_ack - tp->snd_una;
  560                 sack_blocks[num_sack_blks].start = tp->snd_una;
  561                 sack_blocks[num_sack_blks++].end = th_ack;
  562         }
  563         /*
  564          * Append received valid SACK blocks to sack_blocks[], but only if we
  565          * received new blocks from the other side.
  566          */
  567         if (to->to_flags & TOF_SACK) {
  568                 for (i = 0; i < to->to_nsacks; i++) {
  569                         bcopy((to->to_sacks + i * TCPOLEN_SACK),
  570                             &sack, sizeof(sack));
  571                         sack.start = ntohl(sack.start);
  572                         sack.end = ntohl(sack.end);
  573                         if (SEQ_GT(sack.end, sack.start) &&
  574                             SEQ_GT(sack.start, tp->snd_una) &&
  575                             SEQ_GT(sack.start, th_ack) &&
  576                             SEQ_LT(sack.start, tp->snd_max) &&
  577                             SEQ_GT(sack.end, tp->snd_una) &&
  578                             SEQ_LEQ(sack.end, tp->snd_max)) {
  579                                 sack_blocks[num_sack_blks++] = sack;
  580                         }
  581                 }
  582         }
  583         /*
  584          * Return if SND.UNA is not advanced and no valid SACK block is
  585          * received.
  586          */
  587         if (num_sack_blks == 0)
  588                 return (sack_changed);
  589 
  590         /*
  591          * Sort the SACK blocks so we can update the scoreboard with just one
  592          * pass. The overhead of sorting up to 4+1 elements is less than
  593          * making up to 4+1 passes over the scoreboard.
  594          */
  595         for (i = 0; i < num_sack_blks; i++) {
  596                 for (j = i + 1; j < num_sack_blks; j++) {
  597                         if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
  598                                 sack = sack_blocks[i];
  599                                 sack_blocks[i] = sack_blocks[j];
  600                                 sack_blocks[j] = sack;
  601                         }
  602                 }
  603         }
  604         if (TAILQ_EMPTY(&tp->snd_holes)) {
  605                 /*
  606                  * Empty scoreboard. Need to initialize snd_fack (it may be
  607                  * uninitialized or have a bogus value). Scoreboard holes
  608                  * (from the sack blocks received) are created later below
  609                  * (in the logic that adds holes to the tail of the
  610                  * scoreboard).
  611                  */
  612                 tp->snd_fack = SEQ_MAX(tp->snd_una, th_ack);
  613                 tp->sackhint.sacked_bytes = 0;  /* reset */
  614         }
  615         /*
  616          * In the while-loop below, incoming SACK blocks (sack_blocks[]) and
  617          * SACK holes (snd_holes) are traversed from their tails with just
  618          * one pass in order to reduce the number of compares especially when
  619          * the bandwidth-delay product is large.
  620          *
  621          * Note: Typically, in the first RTT of SACK recovery, the highest
  622          * three or four SACK blocks with the same ack number are received.
  623          * In the second RTT, if retransmitted data segments are not lost,
  624          * the highest three or four SACK blocks with ack number advancing
  625          * are received.
  626          */
  627         sblkp = &sack_blocks[num_sack_blks - 1];        /* Last SACK block */
  628         tp->sackhint.last_sack_ack = sblkp->end;
  629         if (SEQ_LT(tp->snd_fack, sblkp->start)) {
  630                 /*
  631                  * The highest SACK block is beyond fack.  Append new SACK
  632                  * hole at the tail.  If the second or later highest SACK
  633                  * blocks are also beyond the current fack, they will be
  634                  * inserted by way of hole splitting in the while-loop below.
  635                  */
  636                 temp = tcp_sackhole_insert(tp, tp->snd_fack,sblkp->start,NULL);
  637                 if (temp != NULL) {
  638                         delivered_data += sblkp->end - sblkp->start;
  639                         tp->snd_fack = sblkp->end;
  640                         /* Go to the previous sack block. */
  641                         sblkp--;
  642                         sack_changed = 1;
  643                 } else {
  644                         /* 
  645                          * We failed to add a new hole based on the current 
  646                          * sack block.  Skip over all the sack blocks that 
  647                          * fall completely to the right of snd_fack and
  648                          * proceed to trim the scoreboard based on the
  649                          * remaining sack blocks.  This also trims the
  650                          * scoreboard for th_ack (which is sack_blocks[0]).
  651                          */
  652                         while (sblkp >= sack_blocks && 
  653                                SEQ_LT(tp->snd_fack, sblkp->start))
  654                                 sblkp--;
  655                         if (sblkp >= sack_blocks && 
  656                             SEQ_LT(tp->snd_fack, sblkp->end)) {
  657                                 delivered_data += sblkp->end - tp->snd_fack;
  658                                 tp->snd_fack = sblkp->end;
  659                                 sack_changed = 1;
  660                         }
  661                 }
  662         } else if (SEQ_LT(tp->snd_fack, sblkp->end)) {
  663                 /* fack is advanced. */
  664                 delivered_data += sblkp->end - tp->snd_fack;
  665                 tp->snd_fack = sblkp->end;
  666                 sack_changed = 1;
  667         }
  668         cur = TAILQ_LAST(&tp->snd_holes, sackhole_head); /* Last SACK hole. */
  669         /*
  670          * Since the incoming sack blocks are sorted, we can process them
  671          * making one sweep of the scoreboard.
  672          */
  673         while (sblkp >= sack_blocks  && cur != NULL) {
  674                 if (SEQ_GEQ(sblkp->start, cur->end)) {
  675                         /*
  676                          * SACKs data beyond the current hole.  Go to the
  677                          * previous sack block.
  678                          */
  679                         sblkp--;
  680                         continue;
  681                 }
  682                 if (SEQ_LEQ(sblkp->end, cur->start)) {
  683                         /*
  684                          * SACKs data before the current hole.  Go to the
  685                          * previous hole.
  686                          */
  687                         cur = TAILQ_PREV(cur, sackhole_head, scblink);
  688                         continue;
  689                 }
  690                 tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start);
  691                 KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
  692                     ("sackhint bytes rtx >= 0"));
  693                 sack_changed = 1;
  694                 if (SEQ_LEQ(sblkp->start, cur->start)) {
  695                         /* Data acks at least the beginning of hole. */
  696                         if (SEQ_GEQ(sblkp->end, cur->end)) {
  697                                 /* Acks entire hole, so delete hole. */
  698                                 delivered_data += (cur->end - cur->start);
  699                                 temp = cur;
  700                                 cur = TAILQ_PREV(cur, sackhole_head, scblink);
  701                                 tcp_sackhole_remove(tp, temp);
  702                                 /*
  703                                  * The sack block may ack all or part of the
  704                                  * next hole too, so continue onto the next
  705                                  * hole.
  706                                  */
  707                                 continue;
  708                         } else {
  709                                 /* Move start of hole forward. */
  710                                 delivered_data += (sblkp->end - cur->start);
  711                                 cur->start = sblkp->end;
  712                                 cur->rxmit = SEQ_MAX(cur->rxmit, cur->start);
  713                         }
  714                 } else {
  715                         /* Data acks at least the end of hole. */
  716                         if (SEQ_GEQ(sblkp->end, cur->end)) {
  717                                 /* Move end of hole backward. */
  718                                 delivered_data += (cur->end - sblkp->start);
  719                                 cur->end = sblkp->start;
  720                                 cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
  721                         } else {
  722                                 /*
  723                                  * ACKs some data in middle of a hole; need
  724                                  * to split current hole
  725                                  */
  726                                 temp = tcp_sackhole_insert(tp, sblkp->end,
  727                                     cur->end, cur);
  728                                 if (temp != NULL) {
  729                                         if (SEQ_GT(cur->rxmit, temp->rxmit)) {
  730                                                 temp->rxmit = cur->rxmit;
  731                                                 tp->sackhint.sack_bytes_rexmit
  732                                                     += (temp->rxmit
  733                                                     - temp->start);
  734                                         }
  735                                         cur->end = sblkp->start;
  736                                         cur->rxmit = SEQ_MIN(cur->rxmit,
  737                                             cur->end);
  738                                         delivered_data += (sblkp->end - sblkp->start);
  739                                 }
  740                         }
  741                 }
  742                 tp->sackhint.sack_bytes_rexmit += (cur->rxmit - cur->start);
  743                 /*
  744                  * Testing sblkp->start against cur->start tells us whether
  745                  * we're done with the sack block or the sack hole.
  746                  * Accordingly, we advance one or the other.
  747                  */
  748                 if (SEQ_LEQ(sblkp->start, cur->start))
  749                         cur = TAILQ_PREV(cur, sackhole_head, scblink);
  750                 else
  751                         sblkp--;
  752         }
  753         tp->sackhint.delivered_data = delivered_data;
  754         tp->sackhint.sacked_bytes += delivered_data - left_edge_delta;
  755         KASSERT((delivered_data >= 0), ("delivered_data < 0"));
  756         KASSERT((tp->sackhint.sacked_bytes >= 0), ("sacked_bytes < 0"));
  757         return (sack_changed);
  758 }
  759 
  760 /*
  761  * Free all SACK holes to clear the scoreboard.
  762  */
  763 void
  764 tcp_free_sackholes(struct tcpcb *tp)
  765 {
  766         struct sackhole *q;
  767 
  768         INP_WLOCK_ASSERT(tp->t_inpcb);
  769         while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL)
  770                 tcp_sackhole_remove(tp, q);
  771         tp->sackhint.sack_bytes_rexmit = 0;
  772 
  773         KASSERT(tp->snd_numholes == 0, ("tp->snd_numholes == 0"));
  774         KASSERT(tp->sackhint.nexthole == NULL,
  775                 ("tp->sackhint.nexthole == NULL"));
  776 }
  777 
  778 /*
  779  * Partial ack handling within a sack recovery episode.  Keeping this very
  780  * simple for now.  When a partial ack is received, force snd_cwnd to a value
  781  * that will allow the sender to transmit no more than 2 segments.  If
  782  * necessary, a better scheme can be adopted at a later point, but for now,
  783  * the goal is to prevent the sender from bursting a large amount of data in
  784  * the midst of sack recovery.
  785  */
  786 void
  787 tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th)
  788 {
  789         int num_segs = 1;
  790         u_int maxseg = tcp_maxseg(tp);
  791 
  792         INP_WLOCK_ASSERT(tp->t_inpcb);
  793         tcp_timer_activate(tp, TT_REXMT, 0);
  794         tp->t_rtttime = 0;
  795         /* Send one or 2 segments based on how much new data was acked. */
  796         if ((BYTES_THIS_ACK(tp, th) / maxseg) >= 2)
  797                 num_segs = 2;
  798         tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit +
  799             (tp->snd_nxt - tp->sack_newdata) + num_segs * maxseg);
  800         if (tp->snd_cwnd > tp->snd_ssthresh)
  801                 tp->snd_cwnd = tp->snd_ssthresh;
  802         tp->t_flags |= TF_ACKNOW;
  803         (void) tp->t_fb->tfb_tcp_output(tp);
  804 }
  805 
  806 #if 0
  807 /*
  808  * Debug version of tcp_sack_output() that walks the scoreboard.  Used for
  809  * now to sanity check the hint.
  810  */
  811 static struct sackhole *
  812 tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt)
  813 {
  814         struct sackhole *p;
  815 
  816         INP_WLOCK_ASSERT(tp->t_inpcb);
  817         *sack_bytes_rexmt = 0;
  818         TAILQ_FOREACH(p, &tp->snd_holes, scblink) {
  819                 if (SEQ_LT(p->rxmit, p->end)) {
  820                         if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */
  821                                 continue;
  822                         }
  823                         *sack_bytes_rexmt += (p->rxmit - p->start);
  824                         break;
  825                 }
  826                 *sack_bytes_rexmt += (p->rxmit - p->start);
  827         }
  828         return (p);
  829 }
  830 #endif
  831 
  832 /*
  833  * Returns the next hole to retransmit and the number of retransmitted bytes
  834  * from the scoreboard.  We store both the next hole and the number of
  835  * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK
  836  * reception).  This avoids scoreboard traversals completely.
  837  *
  838  * The loop here will traverse *at most* one link.  Here's the argument.  For
  839  * the loop to traverse more than 1 link before finding the next hole to
  840  * retransmit, we would need to have at least 1 node following the current
  841  * hint with (rxmit == end).  But, for all holes following the current hint,
  842  * (start == rxmit), since we have not yet retransmitted from them.
  843  * Therefore, in order to traverse more 1 link in the loop below, we need to
  844  * have at least one node following the current hint with (start == rxmit ==
  845  * end).  But that can't happen, (start == end) means that all the data in
  846  * that hole has been sacked, in which case, the hole would have been removed
  847  * from the scoreboard.
  848  */
  849 struct sackhole *
  850 tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt)
  851 {
  852         struct sackhole *hole = NULL;
  853 
  854         INP_WLOCK_ASSERT(tp->t_inpcb);
  855         *sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit;
  856         hole = tp->sackhint.nexthole;
  857         if (hole == NULL || SEQ_LT(hole->rxmit, hole->end))
  858                 goto out;
  859         while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) {
  860                 if (SEQ_LT(hole->rxmit, hole->end)) {
  861                         tp->sackhint.nexthole = hole;
  862                         break;
  863                 }
  864         }
  865 out:
  866         return (hole);
  867 }
  868 
  869 /*
  870  * After a timeout, the SACK list may be rebuilt.  This SACK information
  871  * should be used to avoid retransmitting SACKed data.  This function
  872  * traverses the SACK list to see if snd_nxt should be moved forward.
  873  */
  874 void
  875 tcp_sack_adjust(struct tcpcb *tp)
  876 {
  877         struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes);
  878 
  879         INP_WLOCK_ASSERT(tp->t_inpcb);
  880         if (cur == NULL)
  881                 return; /* No holes */
  882         if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack))
  883                 return; /* We're already beyond any SACKed blocks */
  884         /*-
  885          * Two cases for which we want to advance snd_nxt:
  886          * i) snd_nxt lies between end of one hole and beginning of another
  887          * ii) snd_nxt lies between end of last hole and snd_fack
  888          */
  889         while ((p = TAILQ_NEXT(cur, scblink)) != NULL) {
  890                 if (SEQ_LT(tp->snd_nxt, cur->end))
  891                         return;
  892                 if (SEQ_GEQ(tp->snd_nxt, p->start))
  893                         cur = p;
  894                 else {
  895                         tp->snd_nxt = p->start;
  896                         return;
  897                 }
  898         }
  899         if (SEQ_LT(tp->snd_nxt, cur->end))
  900                 return;
  901         tp->snd_nxt = tp->snd_fack;
  902 }

Cache object: 5e61cf3d841b961b4fb08fc7abce577a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.