The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/sctp_cc_functions.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions are met:
    6  *
    7  * a) Redistributions of source code must retain the above copyright notice,
    8  *   this list of conditions and the following disclaimer.
    9  *
   10  * b) Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in
   12  *   the documentation and/or other materials provided with the distribution.
   13  *
   14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
   15  *    contributors may be used to endorse or promote products derived
   16  *    from this software without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
   20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   28  * THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 
   31 #include <netinet/sctp_os.h>
   32 #include <netinet/sctp_var.h>
   33 #include <netinet/sctp_sysctl.h>
   34 #include <netinet/sctp_pcb.h>
   35 #include <netinet/sctp_header.h>
   36 #include <netinet/sctputil.h>
   37 #include <netinet/sctp_output.h>
   38 #include <netinet/sctp_input.h>
   39 #include <netinet/sctp_indata.h>
   40 #include <netinet/sctp_uio.h>
   41 #include <netinet/sctp_timer.h>
   42 #include <netinet/sctp_auth.h>
   43 #include <netinet/sctp_asconf.h>
   44 #include <netinet/sctp_cc_functions.h>
   45 #include <sys/cdefs.h>
   46 __FBSDID("$FreeBSD$");
   47 void
   48 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
   49 {
   50         /*
   51          * We take the max of the burst limit times a MTU or the
   52          * INITIAL_CWND. We then limit this to 4 MTU's of sending.
   53          */
   54         net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
   55         /* we always get at LEAST 2 MTU's */
   56         if (net->cwnd < (2 * net->mtu)) {
   57                 net->cwnd = 2 * net->mtu;
   58         }
   59         net->ssthresh = stcb->asoc.peers_rwnd;
   60 
   61         if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
   62                 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
   63         }
   64 }
   65 
   66 void
   67 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
   68     struct sctp_association *asoc)
   69 {
   70         struct sctp_nets *net;
   71 
   72         /*-
   73          * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
   74          * (net->fast_retran_loss_recovery == 0)))
   75          */
   76         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   77                 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
   78                         /* out of a RFC2582 Fast recovery window? */
   79                         if (net->net_ack > 0) {
   80                                 /*
   81                                  * per section 7.2.3, are there any
   82                                  * destinations that had a fast retransmit
   83                                  * to them. If so what we need to do is
   84                                  * adjust ssthresh and cwnd.
   85                                  */
   86                                 struct sctp_tmit_chunk *lchk;
   87                                 int old_cwnd = net->cwnd;
   88 
   89                                 net->ssthresh = net->cwnd / 2;
   90                                 if (net->ssthresh < (net->mtu * 2)) {
   91                                         net->ssthresh = 2 * net->mtu;
   92                                 }
   93                                 net->cwnd = net->ssthresh;
   94                                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
   95                                         sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
   96                                             SCTP_CWND_LOG_FROM_FR);
   97                                 }
   98                                 lchk = TAILQ_FIRST(&asoc->send_queue);
   99 
  100                                 net->partial_bytes_acked = 0;
  101                                 /* Turn on fast recovery window */
  102                                 asoc->fast_retran_loss_recovery = 1;
  103                                 if (lchk == NULL) {
  104                                         /* Mark end of the window */
  105                                         asoc->fast_recovery_tsn = asoc->sending_seq - 1;
  106                                 } else {
  107                                         asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
  108                                 }
  109 
  110                                 /*
  111                                  * CMT fast recovery -- per destination
  112                                  * recovery variable.
  113                                  */
  114                                 net->fast_retran_loss_recovery = 1;
  115 
  116                                 if (lchk == NULL) {
  117                                         /* Mark end of the window */
  118                                         net->fast_recovery_tsn = asoc->sending_seq - 1;
  119                                 } else {
  120                                         net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
  121                                 }
  122 
  123                                 /*
  124                                  * Disable Nonce Sum Checking and store the
  125                                  * resync tsn
  126                                  */
  127                                 asoc->nonce_sum_check = 0;
  128                                 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
  129 
  130                                 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
  131                                     stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
  132                                 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
  133                                     stcb->sctp_ep, stcb, net);
  134                         }
  135                 } else if (net->net_ack > 0) {
  136                         /*
  137                          * Mark a peg that we WOULD have done a cwnd
  138                          * reduction but RFC2582 prevented this action.
  139                          */
  140                         SCTP_STAT_INCR(sctps_fastretransinrtt);
  141                 }
  142         }
  143 }
  144 
  145 void
  146 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
  147     struct sctp_association *asoc,
  148     int accum_moved, int reneged_all, int will_exit)
  149 {
  150         struct sctp_nets *net;
  151 
  152         /******************************/
  153         /* update cwnd and Early FR   */
  154         /******************************/
  155         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  156 
  157 #ifdef JANA_CMT_FAST_RECOVERY
  158                 /*
  159                  * CMT fast recovery code. Need to debug.
  160                  */
  161                 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
  162                         if (compare_with_wrap(asoc->last_acked_seq,
  163                             net->fast_recovery_tsn, MAX_TSN) ||
  164                             (asoc->last_acked_seq == net->fast_recovery_tsn) ||
  165                             compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
  166                             (net->pseudo_cumack == net->fast_recovery_tsn)) {
  167                                 net->will_exit_fast_recovery = 1;
  168                         }
  169                 }
  170 #endif
  171                 if (sctp_early_fr) {
  172                         /*
  173                          * So, first of all do we need to have a Early FR
  174                          * timer running?
  175                          */
  176                         if (((TAILQ_FIRST(&asoc->sent_queue)) &&
  177                             (net->ref_count > 1) &&
  178                             (net->flight_size < net->cwnd)) ||
  179                             (reneged_all)) {
  180                                 /*
  181                                  * yes, so in this case stop it if its
  182                                  * running, and then restart it. Reneging
  183                                  * all is a special case where we want to
  184                                  * run the Early FR timer and then force the
  185                                  * last few unacked to be sent, causing us
  186                                  * to illicit a sack with gaps to force out
  187                                  * the others.
  188                                  */
  189                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
  190                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
  191                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
  192                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
  193                                 }
  194                                 SCTP_STAT_INCR(sctps_earlyfrstrid);
  195                                 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
  196                         } else {
  197                                 /* No, stop it if its running */
  198                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
  199                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
  200                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
  201                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
  202                                 }
  203                         }
  204                 }
  205                 /* if nothing was acked on this destination skip it */
  206                 if (net->net_ack == 0) {
  207                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
  208                                 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
  209                         }
  210                         continue;
  211                 }
  212                 if (net->net_ack2 > 0) {
  213                         /*
  214                          * Karn's rule applies to clearing error count, this
  215                          * is optional.
  216                          */
  217                         net->error_count = 0;
  218                         if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
  219                             SCTP_ADDR_NOT_REACHABLE) {
  220                                 /* addr came good */
  221                                 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
  222                                 net->dest_state |= SCTP_ADDR_REACHABLE;
  223                                 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
  224                                     SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
  225                                 /* now was it the primary? if so restore */
  226                                 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
  227                                         (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
  228                                 }
  229                         }
  230                         /*
  231                          * JRS 5/14/07 - If CMT PF is on and the destination
  232                          * is in PF state, set the destination to active
  233                          * state and set the cwnd to one or two MTU's based
  234                          * on whether PF1 or PF2 is being used.
  235                          * 
  236                          * Should we stop any running T3 timer here?
  237                          */
  238                         if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
  239                             SCTP_ADDR_PF)) {
  240                                 net->dest_state &= ~SCTP_ADDR_PF;
  241                                 net->cwnd = net->mtu * sctp_cmt_pf;
  242                                 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
  243                                     net, net->cwnd);
  244                                 /*
  245                                  * Since the cwnd value is explicitly set,
  246                                  * skip the code that updates the cwnd
  247                                  * value.
  248                                  */
  249                                 goto skip_cwnd_update;
  250                         }
  251                 }
  252 #ifdef JANA_CMT_FAST_RECOVERY
  253                 /*
  254                  * CMT fast recovery code
  255                  */
  256                 /*
  257                  * if (sctp_cmt_on_off == 1 &&
  258                  * net->fast_retran_loss_recovery &&
  259                  * net->will_exit_fast_recovery == 0) { @@@ Do something }
  260                  * else if (sctp_cmt_on_off == 0 &&
  261                  * asoc->fast_retran_loss_recovery && will_exit == 0) {
  262                  */
  263 #endif
  264 
  265                 if (asoc->fast_retran_loss_recovery && will_exit == 0 && sctp_cmt_on_off == 0) {
  266                         /*
  267                          * If we are in loss recovery we skip any cwnd
  268                          * update
  269                          */
  270                         goto skip_cwnd_update;
  271                 }
  272                 /*
  273                  * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
  274                  * moved.
  275                  */
  276                 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
  277                         /* If the cumulative ack moved we can proceed */
  278                         if (net->cwnd <= net->ssthresh) {
  279                                 /* We are in slow start */
  280                                 if (net->flight_size + net->net_ack >=
  281                                     net->cwnd) {
  282                                         if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
  283                                                 net->cwnd += (net->mtu * sctp_L2_abc_variable);
  284                                                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  285                                                         sctp_log_cwnd(stcb, net, net->mtu,
  286                                                             SCTP_CWND_LOG_FROM_SS);
  287                                                 }
  288                                         } else {
  289                                                 net->cwnd += net->net_ack;
  290                                                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  291                                                         sctp_log_cwnd(stcb, net, net->net_ack,
  292                                                             SCTP_CWND_LOG_FROM_SS);
  293                                                 }
  294                                         }
  295                                 } else {
  296                                         unsigned int dif;
  297 
  298                                         dif = net->cwnd - (net->flight_size +
  299                                             net->net_ack);
  300                                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
  301                                                 sctp_log_cwnd(stcb, net, net->net_ack,
  302                                                     SCTP_CWND_LOG_NOADV_SS);
  303                                         }
  304                                 }
  305                         } else {
  306                                 /* We are in congestion avoidance */
  307                                 if (net->flight_size + net->net_ack >=
  308                                     net->cwnd) {
  309                                         /*
  310                                          * add to pba only if we had a
  311                                          * cwnd's worth (or so) in flight OR
  312                                          * the burst limit was applied.
  313                                          */
  314                                         net->partial_bytes_acked +=
  315                                             net->net_ack;
  316 
  317                                         /*
  318                                          * Do we need to increase (if pba is
  319                                          * > cwnd)?
  320                                          */
  321                                         if (net->partial_bytes_acked >=
  322                                             net->cwnd) {
  323                                                 if (net->cwnd <
  324                                                     net->partial_bytes_acked) {
  325                                                         net->partial_bytes_acked -=
  326                                                             net->cwnd;
  327                                                 } else {
  328                                                         net->partial_bytes_acked =
  329                                                             0;
  330                                                 }
  331                                                 net->cwnd += net->mtu;
  332                                                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  333                                                         sctp_log_cwnd(stcb, net, net->mtu,
  334                                                             SCTP_CWND_LOG_FROM_CA);
  335                                                 }
  336                                         } else {
  337                                                 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
  338                                                         sctp_log_cwnd(stcb, net, net->net_ack,
  339                                                             SCTP_CWND_LOG_NOADV_CA);
  340                                                 }
  341                                         }
  342                                 } else {
  343                                         unsigned int dif;
  344 
  345                                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
  346                                                 sctp_log_cwnd(stcb, net, net->net_ack,
  347                                                     SCTP_CWND_LOG_NOADV_CA);
  348                                         }
  349                                         dif = net->cwnd - (net->flight_size +
  350                                             net->net_ack);
  351                                 }
  352                         }
  353                 } else {
  354                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
  355                                 sctp_log_cwnd(stcb, net, net->mtu,
  356                                     SCTP_CWND_LOG_NO_CUMACK);
  357                         }
  358                 }
  359 skip_cwnd_update:
  360                 /*
  361                  * NOW, according to Karn's rule do we need to restore the
  362                  * RTO timer back? Check our net_ack2. If not set then we
  363                  * have a ambiguity.. i.e. all data ack'd was sent to more
  364                  * than one place.
  365                  */
  366                 if (net->net_ack2) {
  367                         /* restore any doubled timers */
  368                         net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
  369                         if (net->RTO < stcb->asoc.minrto) {
  370                                 net->RTO = stcb->asoc.minrto;
  371                         }
  372                         if (net->RTO > stcb->asoc.maxrto) {
  373                                 net->RTO = stcb->asoc.maxrto;
  374                         }
  375                 }
  376         }
  377 }
  378 
  379 void
  380 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
  381     struct sctp_nets *net)
  382 {
  383         int old_cwnd = net->cwnd;
  384 
  385         net->ssthresh = net->cwnd >> 1;
  386         if (net->ssthresh < (net->mtu << 1)) {
  387                 net->ssthresh = (net->mtu << 1);
  388         }
  389         net->cwnd = net->mtu;
  390         /* floor of 1 mtu */
  391         if (net->cwnd < net->mtu)
  392                 net->cwnd = net->mtu;
  393         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  394                 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
  395         }
  396         net->partial_bytes_acked = 0;
  397 }
  398 
  399 struct sctp_hs_raise_drop {
  400         int32_t cwnd;
  401         int32_t increase;
  402         int32_t drop_percent;
  403 };
  404 
  405 #define SCTP_HS_TABLE_SIZE 73
  406 
  407 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
  408         {38, 1, 50},            /* 0   */
  409         {118, 2, 44},           /* 1   */
  410         {221, 3, 41},           /* 2   */
  411         {347, 4, 38},           /* 3   */
  412         {495, 5, 37},           /* 4   */
  413         {663, 6, 35},           /* 5   */
  414         {851, 7, 34},           /* 6   */
  415         {1058, 8, 33},          /* 7   */
  416         {1284, 9, 32},          /* 8   */
  417         {1529, 10, 31},         /* 9   */
  418         {1793, 11, 30},         /* 10  */
  419         {2076, 12, 29},         /* 11  */
  420         {2378, 13, 28},         /* 12  */
  421         {2699, 14, 28},         /* 13  */
  422         {3039, 15, 27},         /* 14  */
  423         {3399, 16, 27},         /* 15  */
  424         {3778, 17, 26},         /* 16  */
  425         {4177, 18, 26},         /* 17  */
  426         {4596, 19, 25},         /* 18  */
  427         {5036, 20, 25},         /* 19  */
  428         {5497, 21, 24},         /* 20  */
  429         {5979, 22, 24},         /* 21  */
  430         {6483, 23, 23},         /* 22  */
  431         {7009, 24, 23},         /* 23  */
  432         {7558, 25, 22},         /* 24  */
  433         {8130, 26, 22},         /* 25  */
  434         {8726, 27, 22},         /* 26  */
  435         {9346, 28, 21},         /* 27  */
  436         {9991, 29, 21},         /* 28  */
  437         {10661, 30, 21},        /* 29  */
  438         {11358, 31, 20},        /* 30  */
  439         {12082, 32, 20},        /* 31  */
  440         {12834, 33, 20},        /* 32  */
  441         {13614, 34, 19},        /* 33  */
  442         {14424, 35, 19},        /* 34  */
  443         {15265, 36, 19},        /* 35  */
  444         {16137, 37, 19},        /* 36  */
  445         {17042, 38, 18},        /* 37  */
  446         {17981, 39, 18},        /* 38  */
  447         {18955, 40, 18},        /* 39  */
  448         {19965, 41, 17},        /* 40  */
  449         {21013, 42, 17},        /* 41  */
  450         {22101, 43, 17},        /* 42  */
  451         {23230, 44, 17},        /* 43  */
  452         {24402, 45, 16},        /* 44  */
  453         {25618, 46, 16},        /* 45  */
  454         {26881, 47, 16},        /* 46  */
  455         {28193, 48, 16},        /* 47  */
  456         {29557, 49, 15},        /* 48  */
  457         {30975, 50, 15},        /* 49  */
  458         {32450, 51, 15},        /* 50  */
  459         {33986, 52, 15},        /* 51  */
  460         {35586, 53, 14},        /* 52  */
  461         {37253, 54, 14},        /* 53  */
  462         {38992, 55, 14},        /* 54  */
  463         {40808, 56, 14},        /* 55  */
  464         {42707, 57, 13},        /* 56  */
  465         {44694, 58, 13},        /* 57  */
  466         {46776, 59, 13},        /* 58  */
  467         {48961, 60, 13},        /* 59  */
  468         {51258, 61, 13},        /* 60  */
  469         {53677, 62, 12},        /* 61  */
  470         {56230, 63, 12},        /* 62  */
  471         {58932, 64, 12},        /* 63  */
  472         {61799, 65, 12},        /* 64  */
  473         {64851, 66, 11},        /* 65  */
  474         {68113, 67, 11},        /* 66  */
  475         {71617, 68, 11},        /* 67  */
  476         {75401, 69, 10},        /* 68  */
  477         {79517, 70, 10},        /* 69  */
  478         {84035, 71, 10},        /* 70  */
  479         {89053, 72, 10},        /* 71  */
  480         {94717, 73, 9}          /* 72  */
  481 };
  482 
  483 static void
  484 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
  485 {
  486         int cur_val, i, indx, incr;
  487 
  488         cur_val = net->cwnd >> 10;
  489         indx = SCTP_HS_TABLE_SIZE - 1;
  490 #ifdef SCTP_DEBUG
  491         printf("HS CC CAlled.\n");
  492 #endif
  493         if (cur_val < sctp_cwnd_adjust[0].cwnd) {
  494                 /* normal mode */
  495                 if (net->net_ack > net->mtu) {
  496                         net->cwnd += net->mtu;
  497                         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  498                                 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
  499                         }
  500                 } else {
  501                         net->cwnd += net->net_ack;
  502                         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  503                                 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
  504                         }
  505                 }
  506         } else {
  507                 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
  508                         if (cur_val < sctp_cwnd_adjust[i].cwnd) {
  509                                 indx = i;
  510                                 break;
  511                         }
  512                 }
  513                 net->last_hs_used = indx;
  514                 incr = ((sctp_cwnd_adjust[indx].increase) << 10);
  515                 net->cwnd += incr;
  516                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  517                         sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
  518                 }
  519         }
  520 }
  521 
  522 static void
  523 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
  524 {
  525         int cur_val, i, indx;
  526         int old_cwnd = net->cwnd;
  527 
  528         cur_val = net->cwnd >> 10;
  529         indx = net->last_hs_used;
  530         if (cur_val < sctp_cwnd_adjust[0].cwnd) {
  531                 /* normal mode */
  532                 net->ssthresh = net->cwnd / 2;
  533                 if (net->ssthresh < (net->mtu * 2)) {
  534                         net->ssthresh = 2 * net->mtu;
  535                 }
  536                 net->cwnd = net->ssthresh;
  537         } else {
  538                 /* drop by the proper amount */
  539                 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
  540                     sctp_cwnd_adjust[net->last_hs_used].drop_percent);
  541                 net->cwnd = net->ssthresh;
  542                 /* now where are we */
  543                 indx = net->last_hs_used;
  544                 cur_val = net->cwnd >> 10;
  545                 /* reset where we are in the table */
  546                 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
  547                         /* feel out of hs */
  548                         net->last_hs_used = 0;
  549                 } else {
  550                         for (i = indx; i >= 1; i--) {
  551                                 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
  552                                         break;
  553                                 }
  554                         }
  555                         net->last_hs_used = indx;
  556                 }
  557         }
  558         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  559                 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
  560         }
  561 }
  562 
  563 void
  564 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
  565     struct sctp_association *asoc)
  566 {
  567         struct sctp_nets *net;
  568 
  569         /*
  570          * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
  571          * (net->fast_retran_loss_recovery == 0)))
  572          */
  573         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  574                 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
  575                         /* out of a RFC2582 Fast recovery window? */
  576                         if (net->net_ack > 0) {
  577                                 /*
  578                                  * per section 7.2.3, are there any
  579                                  * destinations that had a fast retransmit
  580                                  * to them. If so what we need to do is
  581                                  * adjust ssthresh and cwnd.
  582                                  */
  583                                 struct sctp_tmit_chunk *lchk;
  584 
  585                                 sctp_hs_cwnd_decrease(stcb, net);
  586 
  587                                 lchk = TAILQ_FIRST(&asoc->send_queue);
  588 
  589                                 net->partial_bytes_acked = 0;
  590                                 /* Turn on fast recovery window */
  591                                 asoc->fast_retran_loss_recovery = 1;
  592                                 if (lchk == NULL) {
  593                                         /* Mark end of the window */
  594                                         asoc->fast_recovery_tsn = asoc->sending_seq - 1;
  595                                 } else {
  596                                         asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
  597                                 }
  598 
  599                                 /*
  600                                  * CMT fast recovery -- per destination
  601                                  * recovery variable.
  602                                  */
  603                                 net->fast_retran_loss_recovery = 1;
  604 
  605                                 if (lchk == NULL) {
  606                                         /* Mark end of the window */
  607                                         net->fast_recovery_tsn = asoc->sending_seq - 1;
  608                                 } else {
  609                                         net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
  610                                 }
  611 
  612                                 /*
  613                                  * Disable Nonce Sum Checking and store the
  614                                  * resync tsn
  615                                  */
  616                                 asoc->nonce_sum_check = 0;
  617                                 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
  618 
  619                                 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
  620                                     stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
  621                                 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
  622                                     stcb->sctp_ep, stcb, net);
  623                         }
  624                 } else if (net->net_ack > 0) {
  625                         /*
  626                          * Mark a peg that we WOULD have done a cwnd
  627                          * reduction but RFC2582 prevented this action.
  628                          */
  629                         SCTP_STAT_INCR(sctps_fastretransinrtt);
  630                 }
  631         }
  632 }
  633 
  634 void
  635 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
  636     struct sctp_association *asoc,
  637     int accum_moved, int reneged_all, int will_exit)
  638 {
  639         struct sctp_nets *net;
  640 
  641         /******************************/
  642         /* update cwnd and Early FR   */
  643         /******************************/
  644         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  645 
  646 #ifdef JANA_CMT_FAST_RECOVERY
  647                 /*
  648                  * CMT fast recovery code. Need to debug.
  649                  */
  650                 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
  651                         if (compare_with_wrap(asoc->last_acked_seq,
  652                             net->fast_recovery_tsn, MAX_TSN) ||
  653                             (asoc->last_acked_seq == net->fast_recovery_tsn) ||
  654                             compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
  655                             (net->pseudo_cumack == net->fast_recovery_tsn)) {
  656                                 net->will_exit_fast_recovery = 1;
  657                         }
  658                 }
  659 #endif
  660                 if (sctp_early_fr) {
  661                         /*
  662                          * So, first of all do we need to have a Early FR
  663                          * timer running?
  664                          */
  665                         if (((TAILQ_FIRST(&asoc->sent_queue)) &&
  666                             (net->ref_count > 1) &&
  667                             (net->flight_size < net->cwnd)) ||
  668                             (reneged_all)) {
  669                                 /*
  670                                  * yes, so in this case stop it if its
  671                                  * running, and then restart it. Reneging
  672                                  * all is a special case where we want to
  673                                  * run the Early FR timer and then force the
  674                                  * last few unacked to be sent, causing us
  675                                  * to illicit a sack with gaps to force out
  676                                  * the others.
  677                                  */
  678                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
  679                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
  680                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
  681                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
  682                                 }
  683                                 SCTP_STAT_INCR(sctps_earlyfrstrid);
  684                                 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
  685                         } else {
  686                                 /* No, stop it if its running */
  687                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
  688                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
  689                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
  690                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
  691                                 }
  692                         }
  693                 }
  694                 /* if nothing was acked on this destination skip it */
  695                 if (net->net_ack == 0) {
  696                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
  697                                 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
  698                         }
  699                         continue;
  700                 }
  701                 if (net->net_ack2 > 0) {
  702                         /*
  703                          * Karn's rule applies to clearing error count, this
  704                          * is optional.
  705                          */
  706                         net->error_count = 0;
  707                         if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
  708                             SCTP_ADDR_NOT_REACHABLE) {
  709                                 /* addr came good */
  710                                 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
  711                                 net->dest_state |= SCTP_ADDR_REACHABLE;
  712                                 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
  713                                     SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
  714                                 /* now was it the primary? if so restore */
  715                                 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
  716                                         (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
  717                                 }
  718                         }
  719                         /*
  720                          * JRS 5/14/07 - If CMT PF is on and the destination
  721                          * is in PF state, set the destination to active
  722                          * state and set the cwnd to one or two MTU's based
  723                          * on whether PF1 or PF2 is being used.
  724                          * 
  725                          * Should we stop any running T3 timer here?
  726                          */
  727                         if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
  728                             SCTP_ADDR_PF)) {
  729                                 net->dest_state &= ~SCTP_ADDR_PF;
  730                                 net->cwnd = net->mtu * sctp_cmt_pf;
  731                                 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
  732                                     net, net->cwnd);
  733                                 /*
  734                                  * Since the cwnd value is explicitly set,
  735                                  * skip the code that updates the cwnd
  736                                  * value.
  737                                  */
  738                                 goto skip_cwnd_update;
  739                         }
  740                 }
  741 #ifdef JANA_CMT_FAST_RECOVERY
  742                 /*
  743                  * CMT fast recovery code
  744                  */
  745                 /*
  746                  * if (sctp_cmt_on_off == 1 &&
  747                  * net->fast_retran_loss_recovery &&
  748                  * net->will_exit_fast_recovery == 0) { @@@ Do something }
  749                  * else if (sctp_cmt_on_off == 0 &&
  750                  * asoc->fast_retran_loss_recovery && will_exit == 0) {
  751                  */
  752 #endif
  753 
  754                 if (asoc->fast_retran_loss_recovery && will_exit == 0 && sctp_cmt_on_off == 0) {
  755                         /*
  756                          * If we are in loss recovery we skip any cwnd
  757                          * update
  758                          */
  759                         goto skip_cwnd_update;
  760                 }
  761                 /*
  762                  * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
  763                  * moved.
  764                  */
  765                 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
  766                         /* If the cumulative ack moved we can proceed */
  767                         if (net->cwnd <= net->ssthresh) {
  768                                 /* We are in slow start */
  769                                 if (net->flight_size + net->net_ack >=
  770                                     net->cwnd) {
  771 
  772                                         sctp_hs_cwnd_increase(stcb, net);
  773 
  774                                 } else {
  775                                         unsigned int dif;
  776 
  777                                         dif = net->cwnd - (net->flight_size +
  778                                             net->net_ack);
  779                                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
  780                                                 sctp_log_cwnd(stcb, net, net->net_ack,
  781                                                     SCTP_CWND_LOG_NOADV_SS);
  782                                         }
  783                                 }
  784                         } else {
  785                                 /* We are in congestion avoidance */
  786                                 if (net->flight_size + net->net_ack >=
  787                                     net->cwnd) {
  788                                         /*
  789                                          * add to pba only if we had a
  790                                          * cwnd's worth (or so) in flight OR
  791                                          * the burst limit was applied.
  792                                          */
  793                                         net->partial_bytes_acked +=
  794                                             net->net_ack;
  795 
  796                                         /*
  797                                          * Do we need to increase (if pba is
  798                                          * > cwnd)?
  799                                          */
  800                                         if (net->partial_bytes_acked >=
  801                                             net->cwnd) {
  802                                                 if (net->cwnd <
  803                                                     net->partial_bytes_acked) {
  804                                                         net->partial_bytes_acked -=
  805                                                             net->cwnd;
  806                                                 } else {
  807                                                         net->partial_bytes_acked =
  808                                                             0;
  809                                                 }
  810                                                 net->cwnd += net->mtu;
  811                                                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  812                                                         sctp_log_cwnd(stcb, net, net->mtu,
  813                                                             SCTP_CWND_LOG_FROM_CA);
  814                                                 }
  815                                         } else {
  816                                                 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
  817                                                         sctp_log_cwnd(stcb, net, net->net_ack,
  818                                                             SCTP_CWND_LOG_NOADV_CA);
  819                                                 }
  820                                         }
  821                                 } else {
  822                                         unsigned int dif;
  823 
  824                                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
  825                                                 sctp_log_cwnd(stcb, net, net->net_ack,
  826                                                     SCTP_CWND_LOG_NOADV_CA);
  827                                         }
  828                                         dif = net->cwnd - (net->flight_size +
  829                                             net->net_ack);
  830                                 }
  831                         }
  832                 } else {
  833                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
  834                                 sctp_log_cwnd(stcb, net, net->mtu,
  835                                     SCTP_CWND_LOG_NO_CUMACK);
  836                         }
  837                 }
  838 skip_cwnd_update:
  839                 /*
  840                  * NOW, according to Karn's rule do we need to restore the
  841                  * RTO timer back? Check our net_ack2. If not set then we
  842                  * have a ambiguity.. i.e. all data ack'd was sent to more
  843                  * than one place.
  844                  */
  845                 if (net->net_ack2) {
  846                         /* restore any doubled timers */
  847                         net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
  848                         if (net->RTO < stcb->asoc.minrto) {
  849                                 net->RTO = stcb->asoc.minrto;
  850                         }
  851                         if (net->RTO > stcb->asoc.maxrto) {
  852                                 net->RTO = stcb->asoc.maxrto;
  853                         }
  854                 }
  855         }
  856 }
  857 
  858 void
  859 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
  860     struct sctp_nets *net)
  861 {
  862         int old_cwnd;
  863 
  864         old_cwnd = net->cwnd;
  865 
  866         SCTP_STAT_INCR(sctps_ecnereducedcwnd);
  867         net->ssthresh = net->cwnd / 2;
  868         if (net->ssthresh < net->mtu) {
  869                 net->ssthresh = net->mtu;
  870                 /* here back off the timer as well, to slow us down */
  871                 net->RTO <<= 1;
  872         }
  873         net->cwnd = net->ssthresh;
  874         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  875                 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
  876         }
  877 }
  878 
  879 void
  880 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
  881     struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
  882     uint32_t * bottle_bw, uint32_t * on_queue)
  883 {
  884         uint32_t bw_avail;
  885         int rtt, incr;
  886         int old_cwnd = net->cwnd;
  887 
  888         /* need real RTT for this calc */
  889         rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
  890         /* get bottle neck bw */
  891         *bottle_bw = ntohl(cp->bottle_bw);
  892         /* and whats on queue */
  893         *on_queue = ntohl(cp->current_onq);
  894         /*
  895          * adjust the on-queue if our flight is more it could be that the
  896          * router has not yet gotten data "in-flight" to it
  897          */
  898         if (*on_queue < net->flight_size)
  899                 *on_queue = net->flight_size;
  900         /* calculate the available space */
  901         bw_avail = (*bottle_bw * rtt) / 1000;
  902         if (bw_avail > *bottle_bw) {
  903                 /*
  904                  * Cap the growth to no more than the bottle neck. This can
  905                  * happen as RTT slides up due to queues. It also means if
  906                  * you have more than a 1 second RTT with a empty queue you
  907                  * will be limited to the bottle_bw per second no matter if
  908                  * other points have 1/2 the RTT and you could get more
  909                  * out...
  910                  */
  911                 bw_avail = *bottle_bw;
  912         }
  913         if (*on_queue > bw_avail) {
  914                 /*
  915                  * No room for anything else don't allow anything else to be
  916                  * "added to the fire".
  917                  */
  918                 int seg_inflight, seg_onqueue, my_portion;
  919 
  920                 net->partial_bytes_acked = 0;
  921 
  922                 /* how much are we over queue size? */
  923                 incr = *on_queue - bw_avail;
  924                 if (stcb->asoc.seen_a_sack_this_pkt) {
  925                         /*
  926                          * undo any cwnd adjustment that the sack might have
  927                          * made
  928                          */
  929                         net->cwnd = net->prev_cwnd;
  930                 }
  931                 /* Now how much of that is mine? */
  932                 seg_inflight = net->flight_size / net->mtu;
  933                 seg_onqueue = *on_queue / net->mtu;
  934                 my_portion = (incr * seg_inflight) / seg_onqueue;
  935 
  936                 /* Have I made an adjustment already */
  937                 if (net->cwnd > net->flight_size) {
  938                         /*
  939                          * for this flight I made an adjustment we need to
  940                          * decrease the portion by a share our previous
  941                          * adjustment.
  942                          */
  943                         int diff_adj;
  944 
  945                         diff_adj = net->cwnd - net->flight_size;
  946                         if (diff_adj > my_portion)
  947                                 my_portion = 0;
  948                         else
  949                                 my_portion -= diff_adj;
  950                 }
  951                 /*
  952                  * back down to the previous cwnd (assume we have had a sack
  953                  * before this packet). minus what ever portion of the
  954                  * overage is my fault.
  955                  */
  956                 net->cwnd -= my_portion;
  957 
  958                 /* we will NOT back down more than 1 MTU */
  959                 if (net->cwnd <= net->mtu) {
  960                         net->cwnd = net->mtu;
  961                 }
  962                 /* force into CA */
  963                 net->ssthresh = net->cwnd - 1;
  964         } else {
  965                 /*
  966                  * Take 1/4 of the space left or max burst up .. whichever
  967                  * is less.
  968                  */
  969                 incr = min((bw_avail - *on_queue) >> 2,
  970                     stcb->asoc.max_burst * net->mtu);
  971                 net->cwnd += incr;
  972         }
  973         if (net->cwnd > bw_avail) {
  974                 /* We can't exceed the pipe size */
  975                 net->cwnd = bw_avail;
  976         }
  977         if (net->cwnd < net->mtu) {
  978                 /* We always have 1 MTU */
  979                 net->cwnd = net->mtu;
  980         }
  981         if (net->cwnd - old_cwnd != 0) {
  982                 /* log only changes */
  983                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
  984                         sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
  985                             SCTP_CWND_LOG_FROM_SAT);
  986                 }
  987         }
  988 }
  989 
  990 void
  991 sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
  992     struct sctp_nets *net, int burst_limit)
  993 {
  994         int old_cwnd;
  995 
  996         if (net->ssthresh < net->cwnd)
  997                 net->ssthresh = net->cwnd;
  998         old_cwnd = net->cwnd;
  999         net->cwnd = (net->flight_size + (burst_limit * net->mtu));
 1000 
 1001         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
 1002                 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
 1003         }
 1004 }
 1005 
 1006 void
 1007 sctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
 1008     struct sctp_tcb *stcb, struct sctp_nets *net)
 1009 {
 1010         int old_cwnd;
 1011 
 1012         old_cwnd = net->cwnd;
 1013 
 1014         sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
 1015         /*
 1016          * make a small adjustment to cwnd and force to CA.
 1017          */
 1018         if (net->cwnd > net->mtu)
 1019                 /* drop down one MTU after sending */
 1020                 net->cwnd -= net->mtu;
 1021         if (net->cwnd < net->ssthresh)
 1022                 /* still in SS move to CA */
 1023                 net->ssthresh = net->cwnd - 1;
 1024         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
 1025                 sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
 1026         }
 1027 }
 1028 
 1029 /*
 1030  * H-TCP congestion control. The algorithm is detailed in:
 1031  * R.N.Shorten, D.J.Leith:
 1032  *   "H-TCP: TCP for high-speed and long-distance networks"
 1033  *   Proc. PFLDnet, Argonne, 2004.
 1034  * http://www.hamilton.ie/net/htcp3.pdf
 1035  */
 1036 
 1037 
 1038 static int use_rtt_scaling = 1;
 1039 static int use_bandwidth_switch = 1;
 1040 
 1041 static inline int
 1042 between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
 1043 {
 1044         return seq3 - seq2 >= seq1 - seq2;
 1045 }
 1046 
 1047 static inline uint32_t
 1048 htcp_cong_time(struct htcp *ca)
 1049 {
 1050         return sctp_get_tick_count() - ca->last_cong;
 1051 }
 1052 
 1053 static inline uint32_t
 1054 htcp_ccount(struct htcp *ca)
 1055 {
 1056         return htcp_cong_time(ca) / ca->minRTT;
 1057 }
 1058 
 1059 static inline void
 1060 htcp_reset(struct htcp *ca)
 1061 {
 1062         ca->undo_last_cong = ca->last_cong;
 1063         ca->undo_maxRTT = ca->maxRTT;
 1064         ca->undo_old_maxB = ca->old_maxB;
 1065         ca->last_cong = sctp_get_tick_count();
 1066 }
 1067 
 1068 #ifdef SCTP_NOT_USED
 1069 
 1070 static uint32_t
 1071 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
 1072 {
 1073         net->htcp_ca.last_cong = net->htcp_ca.undo_last_cong;
 1074         net->htcp_ca.maxRTT = net->htcp_ca.undo_maxRTT;
 1075         net->htcp_ca.old_maxB = net->htcp_ca.undo_old_maxB;
 1076         return max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->htcp_ca.beta) * net->mtu);
 1077 }
 1078 
 1079 #endif
 1080 
 1081 static inline void
 1082 measure_rtt(struct sctp_tcb *stcb, struct sctp_nets *net)
 1083 {
 1084         uint32_t srtt = net->lastsa >> 3;
 1085 
 1086         /* keep track of minimum RTT seen so far, minRTT is zero at first */
 1087         if (net->htcp_ca.minRTT > srtt || !net->htcp_ca.minRTT)
 1088                 net->htcp_ca.minRTT = srtt;
 1089 
 1090         /* max RTT */
 1091         if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->htcp_ca) > 3) {
 1092                 if (net->htcp_ca.maxRTT < net->htcp_ca.minRTT)
 1093                         net->htcp_ca.maxRTT = net->htcp_ca.minRTT;
 1094                 if (net->htcp_ca.maxRTT < srtt && srtt <= net->htcp_ca.maxRTT + MSEC_TO_TICKS(20))
 1095                         net->htcp_ca.maxRTT = srtt;
 1096         }
 1097 }
 1098 
 1099 static void
 1100 measure_achieved_throughput(struct sctp_tcb *stcb, struct sctp_nets *net)
 1101 {
 1102         uint32_t now = sctp_get_tick_count();
 1103 
 1104         if (net->fast_retran_ip == 0)
 1105                 net->htcp_ca.bytes_acked = net->net_ack;
 1106 
 1107         if (!use_bandwidth_switch)
 1108                 return;
 1109 
 1110         /* achieved throughput calculations */
 1111         /* JRS - not 100% sure of this statement */
 1112         if (net->fast_retran_ip == 1) {
 1113                 net->htcp_ca.bytecount = 0;
 1114                 net->htcp_ca.lasttime = now;
 1115                 return;
 1116         }
 1117         net->htcp_ca.bytecount += net->net_ack;
 1118 
 1119         if (net->htcp_ca.bytecount >= net->cwnd - ((net->htcp_ca.alpha >> 7 ? : 1) * net->mtu)
 1120             && now - net->htcp_ca.lasttime >= net->htcp_ca.minRTT
 1121             && net->htcp_ca.minRTT > 0) {
 1122                 uint32_t cur_Bi = net->htcp_ca.bytecount / net->mtu * hz / (now - net->htcp_ca.lasttime);
 1123 
 1124                 if (htcp_ccount(&net->htcp_ca) <= 3) {
 1125                         /* just after backoff */
 1126                         net->htcp_ca.minB = net->htcp_ca.maxB = net->htcp_ca.Bi = cur_Bi;
 1127                 } else {
 1128                         net->htcp_ca.Bi = (3 * net->htcp_ca.Bi + cur_Bi) / 4;
 1129                         if (net->htcp_ca.Bi > net->htcp_ca.maxB)
 1130                                 net->htcp_ca.maxB = net->htcp_ca.Bi;
 1131                         if (net->htcp_ca.minB > net->htcp_ca.maxB)
 1132                                 net->htcp_ca.minB = net->htcp_ca.maxB;
 1133                 }
 1134                 net->htcp_ca.bytecount = 0;
 1135                 net->htcp_ca.lasttime = now;
 1136         }
 1137 }
 1138 
 1139 static inline void
 1140 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
 1141 {
 1142         if (use_bandwidth_switch) {
 1143                 uint32_t maxB = ca->maxB;
 1144                 uint32_t old_maxB = ca->old_maxB;
 1145 
 1146                 ca->old_maxB = ca->maxB;
 1147 
 1148                 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
 1149                         ca->beta = BETA_MIN;
 1150                         ca->modeswitch = 0;
 1151                         return;
 1152                 }
 1153         }
 1154         if (ca->modeswitch && minRTT > (uint32_t) MSEC_TO_TICKS(10) && maxRTT) {
 1155                 ca->beta = (minRTT << 7) / maxRTT;
 1156                 if (ca->beta < BETA_MIN)
 1157                         ca->beta = BETA_MIN;
 1158                 else if (ca->beta > BETA_MAX)
 1159                         ca->beta = BETA_MAX;
 1160         } else {
 1161                 ca->beta = BETA_MIN;
 1162                 ca->modeswitch = 1;
 1163         }
 1164 }
 1165 
 1166 static inline void
 1167 htcp_alpha_update(struct htcp *ca)
 1168 {
 1169         uint32_t minRTT = ca->minRTT;
 1170         uint32_t factor = 1;
 1171         uint32_t diff = htcp_cong_time(ca);
 1172 
 1173         if (diff > (uint32_t) hz) {
 1174                 diff -= hz;
 1175                 factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz;
 1176         }
 1177         if (use_rtt_scaling && minRTT) {
 1178                 uint32_t scale = (hz << 3) / (10 * minRTT);
 1179 
 1180                 scale = min(max(scale, 1U << 2), 10U << 3);     /* clamping ratio to
 1181                                                                  * interval [0.5,10]<<3 */
 1182                 factor = (factor << 3) / scale;
 1183                 if (!factor)
 1184                         factor = 1;
 1185         }
 1186         ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
 1187         if (!ca->alpha)
 1188                 ca->alpha = ALPHA_BASE;
 1189 }
 1190 
 1191 /* After we have the rtt data to calculate beta, we'd still prefer to wait one
 1192  * rtt before we adjust our beta to ensure we are working from a consistent
 1193  * data.
 1194  *
 1195  * This function should be called when we hit a congestion event since only at
 1196  * that point do we really have a real sense of maxRTT (the queues en route
 1197  * were getting just too full now).
 1198  */
 1199 static void
 1200 htcp_param_update(struct sctp_tcb *stcb, struct sctp_nets *net)
 1201 {
 1202         uint32_t minRTT = net->htcp_ca.minRTT;
 1203         uint32_t maxRTT = net->htcp_ca.maxRTT;
 1204 
 1205         htcp_beta_update(&net->htcp_ca, minRTT, maxRTT);
 1206         htcp_alpha_update(&net->htcp_ca);
 1207 
 1208         /*
 1209          * add slowly fading memory for maxRTT to accommodate routing
 1210          * changes etc
 1211          */
 1212         if (minRTT > 0 && maxRTT > minRTT)
 1213                 net->htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
 1214 }
 1215 
 1216 static uint32_t
 1217 htcp_recalc_ssthresh(struct sctp_tcb *stcb, struct sctp_nets *net)
 1218 {
 1219         htcp_param_update(stcb, net);
 1220         return max(((net->cwnd / net->mtu * net->htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu);
 1221 }
 1222 
 1223 static void
 1224 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
 1225 {
 1226         /*-
 1227          * How to handle these functions?
 1228          *      if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
 1229          *              return;
 1230          */
 1231         if (net->cwnd <= net->ssthresh) {
 1232                 /* We are in slow start */
 1233                 if (net->flight_size + net->net_ack >= net->cwnd) {
 1234                         if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
 1235                                 net->cwnd += (net->mtu * sctp_L2_abc_variable);
 1236                                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
 1237                                         sctp_log_cwnd(stcb, net, net->mtu,
 1238                                             SCTP_CWND_LOG_FROM_SS);
 1239                                 }
 1240                         } else {
 1241                                 net->cwnd += net->net_ack;
 1242                                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
 1243                                         sctp_log_cwnd(stcb, net, net->net_ack,
 1244                                             SCTP_CWND_LOG_FROM_SS);
 1245                                 }
 1246                         }
 1247                 } else {
 1248                         unsigned int dif;
 1249 
 1250                         dif = net->cwnd - (net->flight_size +
 1251                             net->net_ack);
 1252                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
 1253                                 sctp_log_cwnd(stcb, net, net->net_ack,
 1254                                     SCTP_CWND_LOG_NOADV_SS);
 1255                         }
 1256                 }
 1257         } else {
 1258                 measure_rtt(stcb, net);
 1259 
 1260                 /*
 1261                  * In dangerous area, increase slowly. In theory this is
 1262                  * net->cwnd += alpha / net->cwnd
 1263                  */
 1264                 /* What is snd_cwnd_cnt?? */
 1265                 if (((net->partial_bytes_acked / net->mtu * net->htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
 1266                         /*-
 1267                          * Does SCTP have a cwnd clamp?
 1268                          * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
 1269                          */
 1270                         net->cwnd += net->mtu;
 1271                         net->partial_bytes_acked = 0;
 1272                         htcp_alpha_update(&net->htcp_ca);
 1273                         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
 1274                                 sctp_log_cwnd(stcb, net, net->mtu,
 1275                                     SCTP_CWND_LOG_FROM_CA);
 1276                         }
 1277                 } else {
 1278                         net->partial_bytes_acked += net->net_ack;
 1279                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
 1280                                 sctp_log_cwnd(stcb, net, net->net_ack,
 1281                                     SCTP_CWND_LOG_NOADV_CA);
 1282                         }
 1283                 }
 1284 
 1285                 net->htcp_ca.bytes_acked = net->mtu;
 1286         }
 1287 }
 1288 
 1289 #ifdef SCTP_NOT_USED
 1290 /* Lower bound on congestion window. */
 1291 static uint32_t
 1292 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
 1293 {
 1294         return net->ssthresh;
 1295 }
 1296 
 1297 #endif
 1298 
 1299 static void
 1300 htcp_init(struct sctp_tcb *stcb, struct sctp_nets *net)
 1301 {
 1302         memset(&net->htcp_ca, 0, sizeof(struct htcp));
 1303         net->htcp_ca.alpha = ALPHA_BASE;
 1304         net->htcp_ca.beta = BETA_MIN;
 1305         net->htcp_ca.bytes_acked = net->mtu;
 1306         net->htcp_ca.last_cong = sctp_get_tick_count();
 1307 }
 1308 
 1309 void
 1310 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
 1311 {
 1312         /*
 1313          * We take the max of the burst limit times a MTU or the
 1314          * INITIAL_CWND. We then limit this to 4 MTU's of sending.
 1315          */
 1316         net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
 1317         /* we always get at LEAST 2 MTU's */
 1318         if (net->cwnd < (2 * net->mtu)) {
 1319                 net->cwnd = 2 * net->mtu;
 1320         }
 1321         net->ssthresh = stcb->asoc.peers_rwnd;
 1322         htcp_init(stcb, net);
 1323 
 1324         if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
 1325                 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
 1326         }
 1327 }
 1328 
 1329 void
 1330 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
 1331     struct sctp_association *asoc,
 1332     int accum_moved, int reneged_all, int will_exit)
 1333 {
 1334         struct sctp_nets *net;
 1335 
 1336         /******************************/
 1337         /* update cwnd and Early FR   */
 1338         /******************************/
 1339         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
 1340 
 1341 #ifdef JANA_CMT_FAST_RECOVERY
 1342                 /*
 1343                  * CMT fast recovery code. Need to debug.
 1344                  */
 1345                 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
 1346                         if (compare_with_wrap(asoc->last_acked_seq,
 1347                             net->fast_recovery_tsn, MAX_TSN) ||
 1348                             (asoc->last_acked_seq == net->fast_recovery_tsn) ||
 1349                             compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
 1350                             (net->pseudo_cumack == net->fast_recovery_tsn)) {
 1351                                 net->will_exit_fast_recovery = 1;
 1352                         }
 1353                 }
 1354 #endif
 1355                 if (sctp_early_fr) {
 1356                         /*
 1357                          * So, first of all do we need to have a Early FR
 1358                          * timer running?
 1359                          */
 1360                         if (((TAILQ_FIRST(&asoc->sent_queue)) &&
 1361                             (net->ref_count > 1) &&
 1362                             (net->flight_size < net->cwnd)) ||
 1363                             (reneged_all)) {
 1364                                 /*
 1365                                  * yes, so in this case stop it if its
 1366                                  * running, and then restart it. Reneging
 1367                                  * all is a special case where we want to
 1368                                  * run the Early FR timer and then force the
 1369                                  * last few unacked to be sent, causing us
 1370                                  * to illicit a sack with gaps to force out
 1371                                  * the others.
 1372                                  */
 1373                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
 1374                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
 1375                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
 1376                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
 1377                                 }
 1378                                 SCTP_STAT_INCR(sctps_earlyfrstrid);
 1379                                 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
 1380                         } else {
 1381                                 /* No, stop it if its running */
 1382                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
 1383                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
 1384                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
 1385                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
 1386                                 }
 1387                         }
 1388                 }
 1389                 /* if nothing was acked on this destination skip it */
 1390                 if (net->net_ack == 0) {
 1391                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
 1392                                 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
 1393                         }
 1394                         continue;
 1395                 }
 1396                 if (net->net_ack2 > 0) {
 1397                         /*
 1398                          * Karn's rule applies to clearing error count, this
 1399                          * is optional.
 1400                          */
 1401                         net->error_count = 0;
 1402                         if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
 1403                             SCTP_ADDR_NOT_REACHABLE) {
 1404                                 /* addr came good */
 1405                                 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
 1406                                 net->dest_state |= SCTP_ADDR_REACHABLE;
 1407                                 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
 1408                                     SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
 1409                                 /* now was it the primary? if so restore */
 1410                                 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
 1411                                         (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
 1412                                 }
 1413                         }
 1414                         /*
 1415                          * JRS 5/14/07 - If CMT PF is on and the destination
 1416                          * is in PF state, set the destination to active
 1417                          * state and set the cwnd to one or two MTU's based
 1418                          * on whether PF1 or PF2 is being used.
 1419                          * 
 1420                          * Should we stop any running T3 timer here?
 1421                          */
 1422                         if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
 1423                             SCTP_ADDR_PF)) {
 1424                                 net->dest_state &= ~SCTP_ADDR_PF;
 1425                                 net->cwnd = net->mtu * sctp_cmt_pf;
 1426                                 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
 1427                                     net, net->cwnd);
 1428                                 /*
 1429                                  * Since the cwnd value is explicitly set,
 1430                                  * skip the code that updates the cwnd
 1431                                  * value.
 1432                                  */
 1433                                 goto skip_cwnd_update;
 1434                         }
 1435                 }
 1436 #ifdef JANA_CMT_FAST_RECOVERY
 1437                 /*
 1438                  * CMT fast recovery code
 1439                  */
 1440                 /*
 1441                  * if (sctp_cmt_on_off == 1 &&
 1442                  * net->fast_retran_loss_recovery &&
 1443                  * net->will_exit_fast_recovery == 0) { @@@ Do something }
 1444                  * else if (sctp_cmt_on_off == 0 &&
 1445                  * asoc->fast_retran_loss_recovery && will_exit == 0) {
 1446                  */
 1447 #endif
 1448 
 1449                 if (asoc->fast_retran_loss_recovery && will_exit == 0 && sctp_cmt_on_off == 0) {
 1450                         /*
 1451                          * If we are in loss recovery we skip any cwnd
 1452                          * update
 1453                          */
 1454                         goto skip_cwnd_update;
 1455                 }
 1456                 /*
 1457                  * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
 1458                  * moved.
 1459                  */
 1460                 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
 1461                         htcp_cong_avoid(stcb, net);
 1462                         measure_achieved_throughput(stcb, net);
 1463                 } else {
 1464                         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
 1465                                 sctp_log_cwnd(stcb, net, net->mtu,
 1466                                     SCTP_CWND_LOG_NO_CUMACK);
 1467                         }
 1468                 }
 1469 skip_cwnd_update:
 1470                 /*
 1471                  * NOW, according to Karn's rule do we need to restore the
 1472                  * RTO timer back? Check our net_ack2. If not set then we
 1473                  * have a ambiguity.. i.e. all data ack'd was sent to more
 1474                  * than one place.
 1475                  */
 1476                 if (net->net_ack2) {
 1477                         /* restore any doubled timers */
 1478                         net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
 1479                         if (net->RTO < stcb->asoc.minrto) {
 1480                                 net->RTO = stcb->asoc.minrto;
 1481                         }
 1482                         if (net->RTO > stcb->asoc.maxrto) {
 1483                                 net->RTO = stcb->asoc.maxrto;
 1484                         }
 1485                 }
 1486         }
 1487 }
 1488 
 1489 void
 1490 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
 1491     struct sctp_association *asoc)
 1492 {
 1493         struct sctp_nets *net;
 1494 
 1495         /*
 1496          * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
 1497          * (net->fast_retran_loss_recovery == 0)))
 1498          */
 1499         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
 1500                 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
 1501                         /* out of a RFC2582 Fast recovery window? */
 1502                         if (net->net_ack > 0) {
 1503                                 /*
 1504                                  * per section 7.2.3, are there any
 1505                                  * destinations that had a fast retransmit
 1506                                  * to them. If so what we need to do is
 1507                                  * adjust ssthresh and cwnd.
 1508                                  */
 1509                                 struct sctp_tmit_chunk *lchk;
 1510                                 int old_cwnd = net->cwnd;
 1511 
 1512                                 /* JRS - reset as if state were changed */
 1513                                 htcp_reset(&net->htcp_ca);
 1514                                 net->ssthresh = htcp_recalc_ssthresh(stcb, net);
 1515                                 net->cwnd = net->ssthresh;
 1516                                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
 1517                                         sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
 1518                                             SCTP_CWND_LOG_FROM_FR);
 1519                                 }
 1520                                 lchk = TAILQ_FIRST(&asoc->send_queue);
 1521 
 1522                                 net->partial_bytes_acked = 0;
 1523                                 /* Turn on fast recovery window */
 1524                                 asoc->fast_retran_loss_recovery = 1;
 1525                                 if (lchk == NULL) {
 1526                                         /* Mark end of the window */
 1527                                         asoc->fast_recovery_tsn = asoc->sending_seq - 1;
 1528                                 } else {
 1529                                         asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
 1530                                 }
 1531 
 1532                                 /*
 1533                                  * CMT fast recovery -- per destination
 1534                                  * recovery variable.
 1535                                  */
 1536                                 net->fast_retran_loss_recovery = 1;
 1537 
 1538                                 if (lchk == NULL) {
 1539                                         /* Mark end of the window */
 1540                                         net->fast_recovery_tsn = asoc->sending_seq - 1;
 1541                                 } else {
 1542                                         net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
 1543                                 }
 1544 
 1545                                 /*
 1546                                  * Disable Nonce Sum Checking and store the
 1547                                  * resync tsn
 1548                                  */
 1549                                 asoc->nonce_sum_check = 0;
 1550                                 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
 1551 
 1552                                 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
 1553                                     stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
 1554                                 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
 1555                                     stcb->sctp_ep, stcb, net);
 1556                         }
 1557                 } else if (net->net_ack > 0) {
 1558                         /*
 1559                          * Mark a peg that we WOULD have done a cwnd
 1560                          * reduction but RFC2582 prevented this action.
 1561                          */
 1562                         SCTP_STAT_INCR(sctps_fastretransinrtt);
 1563                 }
 1564         }
 1565 }
 1566 
 1567 void
 1568 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
 1569     struct sctp_nets *net)
 1570 {
 1571         int old_cwnd = net->cwnd;
 1572 
 1573         /* JRS - reset as if the state were being changed to timeout */
 1574         htcp_reset(&net->htcp_ca);
 1575         net->ssthresh = htcp_recalc_ssthresh(stcb, net);
 1576         net->cwnd = net->mtu;
 1577         /* floor of 1 mtu */
 1578         if (net->cwnd < net->mtu)
 1579                 net->cwnd = net->mtu;
 1580         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
 1581                 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
 1582         }
 1583         net->partial_bytes_acked = 0;
 1584 }
 1585 
 1586 void
 1587 sctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
 1588     struct sctp_tcb *stcb, struct sctp_nets *net)
 1589 {
 1590         int old_cwnd;
 1591 
 1592         old_cwnd = net->cwnd;
 1593 
 1594         sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
 1595         net->htcp_ca.last_cong = sctp_get_tick_count();
 1596         /*
 1597          * make a small adjustment to cwnd and force to CA.
 1598          */
 1599         if (net->cwnd > net->mtu)
 1600                 /* drop down one MTU after sending */
 1601                 net->cwnd -= net->mtu;
 1602         if (net->cwnd < net->ssthresh)
 1603                 /* still in SS move to CA */
 1604                 net->ssthresh = net->cwnd - 1;
 1605         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
 1606                 sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
 1607         }
 1608 }
 1609 
 1610 void
 1611 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
 1612     struct sctp_nets *net)
 1613 {
 1614         int old_cwnd;
 1615 
 1616         old_cwnd = net->cwnd;
 1617 
 1618         /* JRS - reset hctp as if state changed */
 1619         htcp_reset(&net->htcp_ca);
 1620         SCTP_STAT_INCR(sctps_ecnereducedcwnd);
 1621         net->ssthresh = htcp_recalc_ssthresh(stcb, net);
 1622         if (net->ssthresh < net->mtu) {
 1623                 net->ssthresh = net->mtu;
 1624                 /* here back off the timer as well, to slow us down */
 1625                 net->RTO <<= 1;
 1626         }
 1627         net->cwnd = net->ssthresh;
 1628         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
 1629                 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
 1630         }
 1631 }

Cache object: 98e136e998455870ba22eeedad2c0772


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.