The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/sctp_cc_functions.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions are met:
    6  *
    7  * a) Redistributions of source code must retain the above copyright notice,
    8  *   this list of conditions and the following disclaimer.
    9  *
   10  * b) Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in
   12  *   the documentation and/or other materials provided with the distribution.
   13  *
   14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
   15  *    contributors may be used to endorse or promote products derived
   16  *    from this software without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
   20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   28  * THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 
   31 #include <netinet/sctp_os.h>
   32 #include <netinet/sctp_var.h>
   33 #include <netinet/sctp_sysctl.h>
   34 #include <netinet/sctp_pcb.h>
   35 #include <netinet/sctp_header.h>
   36 #include <netinet/sctputil.h>
   37 #include <netinet/sctp_output.h>
   38 #include <netinet/sctp_input.h>
   39 #include <netinet/sctp_indata.h>
   40 #include <netinet/sctp_uio.h>
   41 #include <netinet/sctp_timer.h>
   42 #include <netinet/sctp_auth.h>
   43 #include <netinet/sctp_asconf.h>
   44 #include <netinet/sctp_cc_functions.h>
   45 #include <sys/cdefs.h>
   46 __FBSDID("$FreeBSD: releng/8.0/sys/netinet/sctp_cc_functions.c 179783 2008-06-14 07:58:05Z rrs $");
   47 void
   48 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
   49 {
   50         /*
   51          * We take the max of the burst limit times a MTU or the
   52          * INITIAL_CWND. We then limit this to 4 MTU's of sending. cwnd must
   53          * be at least 2 MTU.
   54          */
   55         net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
   56         net->ssthresh = stcb->asoc.peers_rwnd;
   57 
   58         if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
   59                 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
   60         }
   61 }
   62 
   63 void
   64 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
   65     struct sctp_association *asoc)
   66 {
   67         struct sctp_nets *net;
   68 
   69         /*-
   70          * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
   71          * (net->fast_retran_loss_recovery == 0)))
   72          */
   73         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   74                 if ((asoc->fast_retran_loss_recovery == 0) || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1)) {
   75                         /* out of a RFC2582 Fast recovery window? */
   76                         if (net->net_ack > 0) {
   77                                 /*
   78                                  * per section 7.2.3, are there any
   79                                  * destinations that had a fast retransmit
   80                                  * to them. If so what we need to do is
   81                                  * adjust ssthresh and cwnd.
   82                                  */
   83                                 struct sctp_tmit_chunk *lchk;
   84                                 int old_cwnd = net->cwnd;
   85 
   86                                 net->ssthresh = net->cwnd / 2;
   87                                 if (net->ssthresh < (net->mtu * 2)) {
   88                                         net->ssthresh = 2 * net->mtu;
   89                                 }
   90                                 net->cwnd = net->ssthresh;
   91                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
   92                                         sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
   93                                             SCTP_CWND_LOG_FROM_FR);
   94                                 }
   95                                 lchk = TAILQ_FIRST(&asoc->send_queue);
   96 
   97                                 net->partial_bytes_acked = 0;
   98                                 /* Turn on fast recovery window */
   99                                 asoc->fast_retran_loss_recovery = 1;
  100                                 if (lchk == NULL) {
  101                                         /* Mark end of the window */
  102                                         asoc->fast_recovery_tsn = asoc->sending_seq - 1;
  103                                 } else {
  104                                         asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
  105                                 }
  106 
  107                                 /*
  108                                  * CMT fast recovery -- per destination
  109                                  * recovery variable.
  110                                  */
  111                                 net->fast_retran_loss_recovery = 1;
  112 
  113                                 if (lchk == NULL) {
  114                                         /* Mark end of the window */
  115                                         net->fast_recovery_tsn = asoc->sending_seq - 1;
  116                                 } else {
  117                                         net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
  118                                 }
  119 
  120                                 /*
  121                                  * Disable Nonce Sum Checking and store the
  122                                  * resync tsn
  123                                  */
  124                                 asoc->nonce_sum_check = 0;
  125                                 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
  126 
  127                                 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
  128                                     stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
  129                                 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
  130                                     stcb->sctp_ep, stcb, net);
  131                         }
  132                 } else if (net->net_ack > 0) {
  133                         /*
  134                          * Mark a peg that we WOULD have done a cwnd
  135                          * reduction but RFC2582 prevented this action.
  136                          */
  137                         SCTP_STAT_INCR(sctps_fastretransinrtt);
  138                 }
  139         }
  140 }
  141 
  142 void
  143 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
  144     struct sctp_association *asoc,
  145     int accum_moved, int reneged_all, int will_exit)
  146 {
  147         struct sctp_nets *net;
  148 
  149         /******************************/
  150         /* update cwnd and Early FR   */
  151         /******************************/
  152         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  153 
  154 #ifdef JANA_CMT_FAST_RECOVERY
  155                 /*
  156                  * CMT fast recovery code. Need to debug.
  157                  */
  158                 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
  159                         if (compare_with_wrap(asoc->last_acked_seq,
  160                             net->fast_recovery_tsn, MAX_TSN) ||
  161                             (asoc->last_acked_seq == net->fast_recovery_tsn) ||
  162                             compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
  163                             (net->pseudo_cumack == net->fast_recovery_tsn)) {
  164                                 net->will_exit_fast_recovery = 1;
  165                         }
  166                 }
  167 #endif
  168                 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
  169                         /*
  170                          * So, first of all do we need to have a Early FR
  171                          * timer running?
  172                          */
  173                         if (((TAILQ_FIRST(&asoc->sent_queue)) &&
  174                             (net->ref_count > 1) &&
  175                             (net->flight_size < net->cwnd)) ||
  176                             (reneged_all)) {
  177                                 /*
  178                                  * yes, so in this case stop it if its
  179                                  * running, and then restart it. Reneging
  180                                  * all is a special case where we want to
  181                                  * run the Early FR timer and then force the
  182                                  * last few unacked to be sent, causing us
  183                                  * to illicit a sack with gaps to force out
  184                                  * the others.
  185                                  */
  186                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
  187                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
  188                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
  189                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
  190                                 }
  191                                 SCTP_STAT_INCR(sctps_earlyfrstrid);
  192                                 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
  193                         } else {
  194                                 /* No, stop it if its running */
  195                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
  196                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
  197                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
  198                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
  199                                 }
  200                         }
  201                 }
  202                 /* if nothing was acked on this destination skip it */
  203                 if (net->net_ack == 0) {
  204                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  205                                 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
  206                         }
  207                         continue;
  208                 }
  209                 if (net->net_ack2 > 0) {
  210                         /*
  211                          * Karn's rule applies to clearing error count, this
  212                          * is optional.
  213                          */
  214                         net->error_count = 0;
  215                         if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
  216                             SCTP_ADDR_NOT_REACHABLE) {
  217                                 /* addr came good */
  218                                 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
  219                                 net->dest_state |= SCTP_ADDR_REACHABLE;
  220                                 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
  221                                     SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
  222                                 /* now was it the primary? if so restore */
  223                                 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
  224                                         (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
  225                                 }
  226                         }
  227                         /*
  228                          * JRS 5/14/07 - If CMT PF is on and the destination
  229                          * is in PF state, set the destination to active
  230                          * state and set the cwnd to one or two MTU's based
  231                          * on whether PF1 or PF2 is being used.
  232                          * 
  233                          * Should we stop any running T3 timer here?
  234                          */
  235                         if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
  236                             SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
  237                             ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
  238                                 net->dest_state &= ~SCTP_ADDR_PF;
  239                                 net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
  240                                 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
  241                                     net, net->cwnd);
  242                                 /*
  243                                  * Since the cwnd value is explicitly set,
  244                                  * skip the code that updates the cwnd
  245                                  * value.
  246                                  */
  247                                 goto skip_cwnd_update;
  248                         }
  249                 }
  250 #ifdef JANA_CMT_FAST_RECOVERY
  251                 /*
  252                  * CMT fast recovery code
  253                  */
  254                 /*
  255                  * if (sctp_cmt_on_off == 1 &&
  256                  * net->fast_retran_loss_recovery &&
  257                  * net->will_exit_fast_recovery == 0) { @@@ Do something }
  258                  * else if (sctp_cmt_on_off == 0 &&
  259                  * asoc->fast_retran_loss_recovery && will_exit == 0) {
  260                  */
  261 #endif
  262 
  263                 if (asoc->fast_retran_loss_recovery && will_exit == 0 && SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
  264                         /*
  265                          * If we are in loss recovery we skip any cwnd
  266                          * update
  267                          */
  268                         goto skip_cwnd_update;
  269                 }
  270                 /*
  271                  * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
  272                  * moved.
  273                  */
  274                 if (accum_moved || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && net->new_pseudo_cumack)) {
  275                         /* If the cumulative ack moved we can proceed */
  276                         if (net->cwnd <= net->ssthresh) {
  277                                 /* We are in slow start */
  278                                 if (net->flight_size + net->net_ack >= net->cwnd) {
  279                                         if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
  280                                                 net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
  281                                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  282                                                         sctp_log_cwnd(stcb, net, net->mtu,
  283                                                             SCTP_CWND_LOG_FROM_SS);
  284                                                 }
  285                                         } else {
  286                                                 net->cwnd += net->net_ack;
  287                                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  288                                                         sctp_log_cwnd(stcb, net, net->net_ack,
  289                                                             SCTP_CWND_LOG_FROM_SS);
  290                                                 }
  291                                         }
  292                                 } else {
  293                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  294                                                 sctp_log_cwnd(stcb, net, net->net_ack,
  295                                                     SCTP_CWND_LOG_NOADV_SS);
  296                                         }
  297                                 }
  298                         } else {
  299                                 /* We are in congestion avoidance */
  300                                 /*
  301                                  * Add to pba
  302                                  */
  303                                 net->partial_bytes_acked += net->net_ack;
  304 
  305                                 if ((net->flight_size + net->net_ack >= net->cwnd) &&
  306                                     (net->partial_bytes_acked >= net->cwnd)) {
  307                                         net->partial_bytes_acked -= net->cwnd;
  308                                         net->cwnd += net->mtu;
  309                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  310                                                 sctp_log_cwnd(stcb, net, net->mtu,
  311                                                     SCTP_CWND_LOG_FROM_CA);
  312                                         }
  313                                 } else {
  314                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  315                                                 sctp_log_cwnd(stcb, net, net->net_ack,
  316                                                     SCTP_CWND_LOG_NOADV_CA);
  317                                         }
  318                                 }
  319                         }
  320                 } else {
  321                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  322                                 sctp_log_cwnd(stcb, net, net->mtu,
  323                                     SCTP_CWND_LOG_NO_CUMACK);
  324                         }
  325                 }
  326 skip_cwnd_update:
  327                 /*
  328                  * NOW, according to Karn's rule do we need to restore the
  329                  * RTO timer back? Check our net_ack2. If not set then we
  330                  * have a ambiguity.. i.e. all data ack'd was sent to more
  331                  * than one place.
  332                  */
  333                 if (net->net_ack2) {
  334                         /* restore any doubled timers */
  335                         net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
  336                         if (net->RTO < stcb->asoc.minrto) {
  337                                 net->RTO = stcb->asoc.minrto;
  338                         }
  339                         if (net->RTO > stcb->asoc.maxrto) {
  340                                 net->RTO = stcb->asoc.maxrto;
  341                         }
  342                 }
  343         }
  344 }
  345 
  346 void
  347 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
  348 {
  349         int old_cwnd = net->cwnd;
  350 
  351         net->ssthresh = max(net->cwnd / 2, 2 * net->mtu);
  352         net->cwnd = net->mtu;
  353         net->partial_bytes_acked = 0;
  354 
  355         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  356                 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
  357         }
  358 }
  359 
  360 void
  361 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net)
  362 {
  363         int old_cwnd = net->cwnd;
  364 
  365         SCTP_STAT_INCR(sctps_ecnereducedcwnd);
  366         net->ssthresh = net->cwnd / 2;
  367         if (net->ssthresh < net->mtu) {
  368                 net->ssthresh = net->mtu;
  369                 /* here back off the timer as well, to slow us down */
  370                 net->RTO <<= 1;
  371         }
  372         net->cwnd = net->ssthresh;
  373         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  374                 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
  375         }
  376 }
  377 
  378 void
  379 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
  380     struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
  381     uint32_t * bottle_bw, uint32_t * on_queue)
  382 {
  383         uint32_t bw_avail;
  384         int rtt, incr;
  385         int old_cwnd = net->cwnd;
  386 
  387         /* need real RTT for this calc */
  388         rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
  389         /* get bottle neck bw */
  390         *bottle_bw = ntohl(cp->bottle_bw);
  391         /* and whats on queue */
  392         *on_queue = ntohl(cp->current_onq);
  393         /*
  394          * adjust the on-queue if our flight is more it could be that the
  395          * router has not yet gotten data "in-flight" to it
  396          */
  397         if (*on_queue < net->flight_size)
  398                 *on_queue = net->flight_size;
  399         /* calculate the available space */
  400         bw_avail = (*bottle_bw * rtt) / 1000;
  401         if (bw_avail > *bottle_bw) {
  402                 /*
  403                  * Cap the growth to no more than the bottle neck. This can
  404                  * happen as RTT slides up due to queues. It also means if
  405                  * you have more than a 1 second RTT with a empty queue you
  406                  * will be limited to the bottle_bw per second no matter if
  407                  * other points have 1/2 the RTT and you could get more
  408                  * out...
  409                  */
  410                 bw_avail = *bottle_bw;
  411         }
  412         if (*on_queue > bw_avail) {
  413                 /*
  414                  * No room for anything else don't allow anything else to be
  415                  * "added to the fire".
  416                  */
  417                 int seg_inflight, seg_onqueue, my_portion;
  418 
  419                 net->partial_bytes_acked = 0;
  420 
  421                 /* how much are we over queue size? */
  422                 incr = *on_queue - bw_avail;
  423                 if (stcb->asoc.seen_a_sack_this_pkt) {
  424                         /*
  425                          * undo any cwnd adjustment that the sack might have
  426                          * made
  427                          */
  428                         net->cwnd = net->prev_cwnd;
  429                 }
  430                 /* Now how much of that is mine? */
  431                 seg_inflight = net->flight_size / net->mtu;
  432                 seg_onqueue = *on_queue / net->mtu;
  433                 my_portion = (incr * seg_inflight) / seg_onqueue;
  434 
  435                 /* Have I made an adjustment already */
  436                 if (net->cwnd > net->flight_size) {
  437                         /*
  438                          * for this flight I made an adjustment we need to
  439                          * decrease the portion by a share our previous
  440                          * adjustment.
  441                          */
  442                         int diff_adj;
  443 
  444                         diff_adj = net->cwnd - net->flight_size;
  445                         if (diff_adj > my_portion)
  446                                 my_portion = 0;
  447                         else
  448                                 my_portion -= diff_adj;
  449                 }
  450                 /*
  451                  * back down to the previous cwnd (assume we have had a sack
  452                  * before this packet). minus what ever portion of the
  453                  * overage is my fault.
  454                  */
  455                 net->cwnd -= my_portion;
  456 
  457                 /* we will NOT back down more than 1 MTU */
  458                 if (net->cwnd <= net->mtu) {
  459                         net->cwnd = net->mtu;
  460                 }
  461                 /* force into CA */
  462                 net->ssthresh = net->cwnd - 1;
  463         } else {
  464                 /*
  465                  * Take 1/4 of the space left or max burst up .. whichever
  466                  * is less.
  467                  */
  468                 incr = min((bw_avail - *on_queue) >> 2,
  469                     stcb->asoc.max_burst * net->mtu);
  470                 net->cwnd += incr;
  471         }
  472         if (net->cwnd > bw_avail) {
  473                 /* We can't exceed the pipe size */
  474                 net->cwnd = bw_avail;
  475         }
  476         if (net->cwnd < net->mtu) {
  477                 /* We always have 1 MTU */
  478                 net->cwnd = net->mtu;
  479         }
  480         if (net->cwnd - old_cwnd != 0) {
  481                 /* log only changes */
  482                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  483                         sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
  484                             SCTP_CWND_LOG_FROM_SAT);
  485                 }
  486         }
  487 }
  488 
  489 void
  490 sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
  491     struct sctp_nets *net, int burst_limit)
  492 {
  493         int old_cwnd = net->cwnd;
  494 
  495         if (net->ssthresh < net->cwnd)
  496                 net->ssthresh = net->cwnd;
  497         net->cwnd = (net->flight_size + (burst_limit * net->mtu));
  498 
  499         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  500                 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
  501         }
  502 }
  503 
  504 void
  505 sctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
  506     struct sctp_tcb *stcb, struct sctp_nets *net)
  507 {
  508         int old_cwnd = net->cwnd;
  509 
  510         sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
  511         /*
  512          * make a small adjustment to cwnd and force to CA.
  513          */
  514         if (net->cwnd > net->mtu)
  515                 /* drop down one MTU after sending */
  516                 net->cwnd -= net->mtu;
  517         if (net->cwnd < net->ssthresh)
  518                 /* still in SS move to CA */
  519                 net->ssthresh = net->cwnd - 1;
  520         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  521                 sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
  522         }
  523 }
  524 
  525 struct sctp_hs_raise_drop {
  526         int32_t cwnd;
  527         int32_t increase;
  528         int32_t drop_percent;
  529 };
  530 
  531 #define SCTP_HS_TABLE_SIZE 73
  532 
  533 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
  534         {38, 1, 50},            /* 0   */
  535         {118, 2, 44},           /* 1   */
  536         {221, 3, 41},           /* 2   */
  537         {347, 4, 38},           /* 3   */
  538         {495, 5, 37},           /* 4   */
  539         {663, 6, 35},           /* 5   */
  540         {851, 7, 34},           /* 6   */
  541         {1058, 8, 33},          /* 7   */
  542         {1284, 9, 32},          /* 8   */
  543         {1529, 10, 31},         /* 9   */
  544         {1793, 11, 30},         /* 10  */
  545         {2076, 12, 29},         /* 11  */
  546         {2378, 13, 28},         /* 12  */
  547         {2699, 14, 28},         /* 13  */
  548         {3039, 15, 27},         /* 14  */
  549         {3399, 16, 27},         /* 15  */
  550         {3778, 17, 26},         /* 16  */
  551         {4177, 18, 26},         /* 17  */
  552         {4596, 19, 25},         /* 18  */
  553         {5036, 20, 25},         /* 19  */
  554         {5497, 21, 24},         /* 20  */
  555         {5979, 22, 24},         /* 21  */
  556         {6483, 23, 23},         /* 22  */
  557         {7009, 24, 23},         /* 23  */
  558         {7558, 25, 22},         /* 24  */
  559         {8130, 26, 22},         /* 25  */
  560         {8726, 27, 22},         /* 26  */
  561         {9346, 28, 21},         /* 27  */
  562         {9991, 29, 21},         /* 28  */
  563         {10661, 30, 21},        /* 29  */
  564         {11358, 31, 20},        /* 30  */
  565         {12082, 32, 20},        /* 31  */
  566         {12834, 33, 20},        /* 32  */
  567         {13614, 34, 19},        /* 33  */
  568         {14424, 35, 19},        /* 34  */
  569         {15265, 36, 19},        /* 35  */
  570         {16137, 37, 19},        /* 36  */
  571         {17042, 38, 18},        /* 37  */
  572         {17981, 39, 18},        /* 38  */
  573         {18955, 40, 18},        /* 39  */
  574         {19965, 41, 17},        /* 40  */
  575         {21013, 42, 17},        /* 41  */
  576         {22101, 43, 17},        /* 42  */
  577         {23230, 44, 17},        /* 43  */
  578         {24402, 45, 16},        /* 44  */
  579         {25618, 46, 16},        /* 45  */
  580         {26881, 47, 16},        /* 46  */
  581         {28193, 48, 16},        /* 47  */
  582         {29557, 49, 15},        /* 48  */
  583         {30975, 50, 15},        /* 49  */
  584         {32450, 51, 15},        /* 50  */
  585         {33986, 52, 15},        /* 51  */
  586         {35586, 53, 14},        /* 52  */
  587         {37253, 54, 14},        /* 53  */
  588         {38992, 55, 14},        /* 54  */
  589         {40808, 56, 14},        /* 55  */
  590         {42707, 57, 13},        /* 56  */
  591         {44694, 58, 13},        /* 57  */
  592         {46776, 59, 13},        /* 58  */
  593         {48961, 60, 13},        /* 59  */
  594         {51258, 61, 13},        /* 60  */
  595         {53677, 62, 12},        /* 61  */
  596         {56230, 63, 12},        /* 62  */
  597         {58932, 64, 12},        /* 63  */
  598         {61799, 65, 12},        /* 64  */
  599         {64851, 66, 11},        /* 65  */
  600         {68113, 67, 11},        /* 66  */
  601         {71617, 68, 11},        /* 67  */
  602         {75401, 69, 10},        /* 68  */
  603         {79517, 70, 10},        /* 69  */
  604         {84035, 71, 10},        /* 70  */
  605         {89053, 72, 10},        /* 71  */
  606         {94717, 73, 9}          /* 72  */
  607 };
  608 
  609 static void
  610 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
  611 {
  612         int cur_val, i, indx, incr;
  613 
  614         cur_val = net->cwnd >> 10;
  615         indx = SCTP_HS_TABLE_SIZE - 1;
  616 #ifdef SCTP_DEBUG
  617         printf("HS CC CAlled.\n");
  618 #endif
  619         if (cur_val < sctp_cwnd_adjust[0].cwnd) {
  620                 /* normal mode */
  621                 if (net->net_ack > net->mtu) {
  622                         net->cwnd += net->mtu;
  623                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  624                                 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
  625                         }
  626                 } else {
  627                         net->cwnd += net->net_ack;
  628                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  629                                 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
  630                         }
  631                 }
  632         } else {
  633                 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
  634                         if (cur_val < sctp_cwnd_adjust[i].cwnd) {
  635                                 indx = i;
  636                                 break;
  637                         }
  638                 }
  639                 net->last_hs_used = indx;
  640                 incr = ((sctp_cwnd_adjust[indx].increase) << 10);
  641                 net->cwnd += incr;
  642                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  643                         sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
  644                 }
  645         }
  646 }
  647 
  648 static void
  649 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
  650 {
  651         int cur_val, i, indx;
  652         int old_cwnd = net->cwnd;
  653 
  654         cur_val = net->cwnd >> 10;
  655         indx = net->last_hs_used;
  656         if (cur_val < sctp_cwnd_adjust[0].cwnd) {
  657                 /* normal mode */
  658                 net->ssthresh = net->cwnd / 2;
  659                 if (net->ssthresh < (net->mtu * 2)) {
  660                         net->ssthresh = 2 * net->mtu;
  661                 }
  662                 net->cwnd = net->ssthresh;
  663         } else {
  664                 /* drop by the proper amount */
  665                 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
  666                     sctp_cwnd_adjust[net->last_hs_used].drop_percent);
  667                 net->cwnd = net->ssthresh;
  668                 /* now where are we */
  669                 indx = net->last_hs_used;
  670                 cur_val = net->cwnd >> 10;
  671                 /* reset where we are in the table */
  672                 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
  673                         /* feel out of hs */
  674                         net->last_hs_used = 0;
  675                 } else {
  676                         for (i = indx; i >= 1; i--) {
  677                                 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
  678                                         break;
  679                                 }
  680                         }
  681                         net->last_hs_used = indx;
  682                 }
  683         }
  684         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  685                 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
  686         }
  687 }
  688 
  689 void
  690 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
  691     struct sctp_association *asoc)
  692 {
  693         struct sctp_nets *net;
  694 
  695         /*
  696          * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
  697          * (net->fast_retran_loss_recovery == 0)))
  698          */
  699         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  700                 if ((asoc->fast_retran_loss_recovery == 0) || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1)) {
  701                         /* out of a RFC2582 Fast recovery window? */
  702                         if (net->net_ack > 0) {
  703                                 /*
  704                                  * per section 7.2.3, are there any
  705                                  * destinations that had a fast retransmit
  706                                  * to them. If so what we need to do is
  707                                  * adjust ssthresh and cwnd.
  708                                  */
  709                                 struct sctp_tmit_chunk *lchk;
  710 
  711                                 sctp_hs_cwnd_decrease(stcb, net);
  712 
  713                                 lchk = TAILQ_FIRST(&asoc->send_queue);
  714 
  715                                 net->partial_bytes_acked = 0;
  716                                 /* Turn on fast recovery window */
  717                                 asoc->fast_retran_loss_recovery = 1;
  718                                 if (lchk == NULL) {
  719                                         /* Mark end of the window */
  720                                         asoc->fast_recovery_tsn = asoc->sending_seq - 1;
  721                                 } else {
  722                                         asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
  723                                 }
  724 
  725                                 /*
  726                                  * CMT fast recovery -- per destination
  727                                  * recovery variable.
  728                                  */
  729                                 net->fast_retran_loss_recovery = 1;
  730 
  731                                 if (lchk == NULL) {
  732                                         /* Mark end of the window */
  733                                         net->fast_recovery_tsn = asoc->sending_seq - 1;
  734                                 } else {
  735                                         net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
  736                                 }
  737 
  738                                 /*
  739                                  * Disable Nonce Sum Checking and store the
  740                                  * resync tsn
  741                                  */
  742                                 asoc->nonce_sum_check = 0;
  743                                 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
  744 
  745                                 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
  746                                     stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
  747                                 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
  748                                     stcb->sctp_ep, stcb, net);
  749                         }
  750                 } else if (net->net_ack > 0) {
  751                         /*
  752                          * Mark a peg that we WOULD have done a cwnd
  753                          * reduction but RFC2582 prevented this action.
  754                          */
  755                         SCTP_STAT_INCR(sctps_fastretransinrtt);
  756                 }
  757         }
  758 }
  759 
  760 void
  761 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
  762     struct sctp_association *asoc,
  763     int accum_moved, int reneged_all, int will_exit)
  764 {
  765         struct sctp_nets *net;
  766 
  767         /******************************/
  768         /* update cwnd and Early FR   */
  769         /******************************/
  770         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  771 
  772 #ifdef JANA_CMT_FAST_RECOVERY
  773                 /*
  774                  * CMT fast recovery code. Need to debug.
  775                  */
  776                 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
  777                         if (compare_with_wrap(asoc->last_acked_seq,
  778                             net->fast_recovery_tsn, MAX_TSN) ||
  779                             (asoc->last_acked_seq == net->fast_recovery_tsn) ||
  780                             compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
  781                             (net->pseudo_cumack == net->fast_recovery_tsn)) {
  782                                 net->will_exit_fast_recovery = 1;
  783                         }
  784                 }
  785 #endif
  786                 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
  787                         /*
  788                          * So, first of all do we need to have a Early FR
  789                          * timer running?
  790                          */
  791                         if (((TAILQ_FIRST(&asoc->sent_queue)) &&
  792                             (net->ref_count > 1) &&
  793                             (net->flight_size < net->cwnd)) ||
  794                             (reneged_all)) {
  795                                 /*
  796                                  * yes, so in this case stop it if its
  797                                  * running, and then restart it. Reneging
  798                                  * all is a special case where we want to
  799                                  * run the Early FR timer and then force the
  800                                  * last few unacked to be sent, causing us
  801                                  * to illicit a sack with gaps to force out
  802                                  * the others.
  803                                  */
  804                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
  805                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
  806                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
  807                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
  808                                 }
  809                                 SCTP_STAT_INCR(sctps_earlyfrstrid);
  810                                 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
  811                         } else {
  812                                 /* No, stop it if its running */
  813                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
  814                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
  815                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
  816                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
  817                                 }
  818                         }
  819                 }
  820                 /* if nothing was acked on this destination skip it */
  821                 if (net->net_ack == 0) {
  822                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  823                                 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
  824                         }
  825                         continue;
  826                 }
  827                 if (net->net_ack2 > 0) {
  828                         /*
  829                          * Karn's rule applies to clearing error count, this
  830                          * is optional.
  831                          */
  832                         net->error_count = 0;
  833                         if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
  834                             SCTP_ADDR_NOT_REACHABLE) {
  835                                 /* addr came good */
  836                                 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
  837                                 net->dest_state |= SCTP_ADDR_REACHABLE;
  838                                 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
  839                                     SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
  840                                 /* now was it the primary? if so restore */
  841                                 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
  842                                         (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
  843                                 }
  844                         }
  845                         /*
  846                          * JRS 5/14/07 - If CMT PF is on and the destination
  847                          * is in PF state, set the destination to active
  848                          * state and set the cwnd to one or two MTU's based
  849                          * on whether PF1 or PF2 is being used.
  850                          * 
  851                          * Should we stop any running T3 timer here?
  852                          */
  853                         if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
  854                             SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
  855                             ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
  856                                 net->dest_state &= ~SCTP_ADDR_PF;
  857                                 net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
  858                                 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
  859                                     net, net->cwnd);
  860                                 /*
  861                                  * Since the cwnd value is explicitly set,
  862                                  * skip the code that updates the cwnd
  863                                  * value.
  864                                  */
  865                                 goto skip_cwnd_update;
  866                         }
  867                 }
  868 #ifdef JANA_CMT_FAST_RECOVERY
  869                 /*
  870                  * CMT fast recovery code
  871                  */
  872                 /*
  873                  * if (sctp_cmt_on_off == 1 &&
  874                  * net->fast_retran_loss_recovery &&
  875                  * net->will_exit_fast_recovery == 0) { @@@ Do something }
  876                  * else if (sctp_cmt_on_off == 0 &&
  877                  * asoc->fast_retran_loss_recovery && will_exit == 0) {
  878                  */
  879 #endif
  880 
  881                 if (asoc->fast_retran_loss_recovery && will_exit == 0 && SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
  882                         /*
  883                          * If we are in loss recovery we skip any cwnd
  884                          * update
  885                          */
  886                         goto skip_cwnd_update;
  887                 }
  888                 /*
  889                  * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
  890                  * moved.
  891                  */
  892                 if (accum_moved || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && net->new_pseudo_cumack)) {
  893                         /* If the cumulative ack moved we can proceed */
  894                         if (net->cwnd <= net->ssthresh) {
  895                                 /* We are in slow start */
  896                                 if (net->flight_size + net->net_ack >= net->cwnd) {
  897 
  898                                         sctp_hs_cwnd_increase(stcb, net);
  899 
  900                                 } else {
  901                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  902                                                 sctp_log_cwnd(stcb, net, net->net_ack,
  903                                                     SCTP_CWND_LOG_NOADV_SS);
  904                                         }
  905                                 }
  906                         } else {
  907                                 /* We are in congestion avoidance */
  908                                 net->partial_bytes_acked += net->net_ack;
  909                                 if ((net->flight_size + net->net_ack >= net->cwnd) &&
  910                                     (net->partial_bytes_acked >= net->cwnd)) {
  911                                         net->partial_bytes_acked -= net->cwnd;
  912                                         net->cwnd += net->mtu;
  913                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
  914                                                 sctp_log_cwnd(stcb, net, net->mtu,
  915                                                     SCTP_CWND_LOG_FROM_CA);
  916                                         }
  917                                 } else {
  918                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  919                                                 sctp_log_cwnd(stcb, net, net->net_ack,
  920                                                     SCTP_CWND_LOG_NOADV_CA);
  921                                         }
  922                                 }
  923                         }
  924                 } else {
  925                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  926                                 sctp_log_cwnd(stcb, net, net->mtu,
  927                                     SCTP_CWND_LOG_NO_CUMACK);
  928                         }
  929                 }
  930 skip_cwnd_update:
  931                 /*
  932                  * NOW, according to Karn's rule do we need to restore the
  933                  * RTO timer back? Check our net_ack2. If not set then we
  934                  * have a ambiguity.. i.e. all data ack'd was sent to more
  935                  * than one place.
  936                  */
  937                 if (net->net_ack2) {
  938                         /* restore any doubled timers */
  939                         net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
  940                         if (net->RTO < stcb->asoc.minrto) {
  941                                 net->RTO = stcb->asoc.minrto;
  942                         }
  943                         if (net->RTO > stcb->asoc.maxrto) {
  944                                 net->RTO = stcb->asoc.maxrto;
  945                         }
  946                 }
  947         }
  948 }
  949 
  950 
  951 /*
  952  * H-TCP congestion control. The algorithm is detailed in:
  953  * R.N.Shorten, D.J.Leith:
  954  *   "H-TCP: TCP for high-speed and long-distance networks"
  955  *   Proc. PFLDnet, Argonne, 2004.
  956  * http://www.hamilton.ie/net/htcp3.pdf
  957  */
  958 
  959 
  960 static int use_rtt_scaling = 1;
  961 static int use_bandwidth_switch = 1;
  962 
  963 static inline int
  964 between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
  965 {
  966         return seq3 - seq2 >= seq1 - seq2;
  967 }
  968 
  969 static inline uint32_t
  970 htcp_cong_time(struct htcp *ca)
  971 {
  972         return sctp_get_tick_count() - ca->last_cong;
  973 }
  974 
  975 static inline uint32_t
  976 htcp_ccount(struct htcp *ca)
  977 {
  978         return htcp_cong_time(ca) / ca->minRTT;
  979 }
  980 
  981 static inline void
  982 htcp_reset(struct htcp *ca)
  983 {
  984         ca->undo_last_cong = ca->last_cong;
  985         ca->undo_maxRTT = ca->maxRTT;
  986         ca->undo_old_maxB = ca->old_maxB;
  987         ca->last_cong = sctp_get_tick_count();
  988 }
  989 
  990 #ifdef SCTP_NOT_USED
  991 
  992 static uint32_t
  993 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
  994 {
  995         net->htcp_ca.last_cong = net->htcp_ca.undo_last_cong;
  996         net->htcp_ca.maxRTT = net->htcp_ca.undo_maxRTT;
  997         net->htcp_ca.old_maxB = net->htcp_ca.undo_old_maxB;
  998         return max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->htcp_ca.beta) * net->mtu);
  999 }
 1000 
 1001 #endif
 1002 
 1003 static inline void
 1004 measure_rtt(struct sctp_tcb *stcb, struct sctp_nets *net)
 1005 {
 1006         uint32_t srtt = net->lastsa >> 3;
 1007 
 1008         /* keep track of minimum RTT seen so far, minRTT is zero at first */
 1009         if (net->htcp_ca.minRTT > srtt || !net->htcp_ca.minRTT)
 1010                 net->htcp_ca.minRTT = srtt;
 1011 
 1012         /* max RTT */
 1013         if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->htcp_ca) > 3) {
 1014                 if (net->htcp_ca.maxRTT < net->htcp_ca.minRTT)
 1015                         net->htcp_ca.maxRTT = net->htcp_ca.minRTT;
 1016                 if (net->htcp_ca.maxRTT < srtt && srtt <= net->htcp_ca.maxRTT + MSEC_TO_TICKS(20))
 1017                         net->htcp_ca.maxRTT = srtt;
 1018         }
 1019 }
 1020 
 1021 static void
 1022 measure_achieved_throughput(struct sctp_tcb *stcb, struct sctp_nets *net)
 1023 {
 1024         uint32_t now = sctp_get_tick_count();
 1025 
 1026         if (net->fast_retran_ip == 0)
 1027                 net->htcp_ca.bytes_acked = net->net_ack;
 1028 
 1029         if (!use_bandwidth_switch)
 1030                 return;
 1031 
 1032         /* achieved throughput calculations */
 1033         /* JRS - not 100% sure of this statement */
 1034         if (net->fast_retran_ip == 1) {
 1035                 net->htcp_ca.bytecount = 0;
 1036                 net->htcp_ca.lasttime = now;
 1037                 return;
 1038         }
 1039         net->htcp_ca.bytecount += net->net_ack;
 1040 
 1041         if (net->htcp_ca.bytecount >= net->cwnd - ((net->htcp_ca.alpha >> 7 ? : 1) * net->mtu)
 1042             && now - net->htcp_ca.lasttime >= net->htcp_ca.minRTT
 1043             && net->htcp_ca.minRTT > 0) {
 1044                 uint32_t cur_Bi = net->htcp_ca.bytecount / net->mtu * hz / (now - net->htcp_ca.lasttime);
 1045 
 1046                 if (htcp_ccount(&net->htcp_ca) <= 3) {
 1047                         /* just after backoff */
 1048                         net->htcp_ca.minB = net->htcp_ca.maxB = net->htcp_ca.Bi = cur_Bi;
 1049                 } else {
 1050                         net->htcp_ca.Bi = (3 * net->htcp_ca.Bi + cur_Bi) / 4;
 1051                         if (net->htcp_ca.Bi > net->htcp_ca.maxB)
 1052                                 net->htcp_ca.maxB = net->htcp_ca.Bi;
 1053                         if (net->htcp_ca.minB > net->htcp_ca.maxB)
 1054                                 net->htcp_ca.minB = net->htcp_ca.maxB;
 1055                 }
 1056                 net->htcp_ca.bytecount = 0;
 1057                 net->htcp_ca.lasttime = now;
 1058         }
 1059 }
 1060 
 1061 static inline void
 1062 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
 1063 {
 1064         if (use_bandwidth_switch) {
 1065                 uint32_t maxB = ca->maxB;
 1066                 uint32_t old_maxB = ca->old_maxB;
 1067 
 1068                 ca->old_maxB = ca->maxB;
 1069 
 1070                 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
 1071                         ca->beta = BETA_MIN;
 1072                         ca->modeswitch = 0;
 1073                         return;
 1074                 }
 1075         }
 1076         if (ca->modeswitch && minRTT > (uint32_t) MSEC_TO_TICKS(10) && maxRTT) {
 1077                 ca->beta = (minRTT << 7) / maxRTT;
 1078                 if (ca->beta < BETA_MIN)
 1079                         ca->beta = BETA_MIN;
 1080                 else if (ca->beta > BETA_MAX)
 1081                         ca->beta = BETA_MAX;
 1082         } else {
 1083                 ca->beta = BETA_MIN;
 1084                 ca->modeswitch = 1;
 1085         }
 1086 }
 1087 
 1088 static inline void
 1089 htcp_alpha_update(struct htcp *ca)
 1090 {
 1091         uint32_t minRTT = ca->minRTT;
 1092         uint32_t factor = 1;
 1093         uint32_t diff = htcp_cong_time(ca);
 1094 
 1095         if (diff > (uint32_t) hz) {
 1096                 diff -= hz;
 1097                 factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz;
 1098         }
 1099         if (use_rtt_scaling && minRTT) {
 1100                 uint32_t scale = (hz << 3) / (10 * minRTT);
 1101 
 1102                 scale = min(max(scale, 1U << 2), 10U << 3);     /* clamping ratio to
 1103                                                                  * interval [0.5,10]<<3 */
 1104                 factor = (factor << 3) / scale;
 1105                 if (!factor)
 1106                         factor = 1;
 1107         }
 1108         ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
 1109         if (!ca->alpha)
 1110                 ca->alpha = ALPHA_BASE;
 1111 }
 1112 
 1113 /* After we have the rtt data to calculate beta, we'd still prefer to wait one
 1114  * rtt before we adjust our beta to ensure we are working from a consistent
 1115  * data.
 1116  *
 1117  * This function should be called when we hit a congestion event since only at
 1118  * that point do we really have a real sense of maxRTT (the queues en route
 1119  * were getting just too full now).
 1120  */
 1121 static void
 1122 htcp_param_update(struct sctp_tcb *stcb, struct sctp_nets *net)
 1123 {
 1124         uint32_t minRTT = net->htcp_ca.minRTT;
 1125         uint32_t maxRTT = net->htcp_ca.maxRTT;
 1126 
 1127         htcp_beta_update(&net->htcp_ca, minRTT, maxRTT);
 1128         htcp_alpha_update(&net->htcp_ca);
 1129 
 1130         /*
 1131          * add slowly fading memory for maxRTT to accommodate routing
 1132          * changes etc
 1133          */
 1134         if (minRTT > 0 && maxRTT > minRTT)
 1135                 net->htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
 1136 }
 1137 
 1138 static uint32_t
 1139 htcp_recalc_ssthresh(struct sctp_tcb *stcb, struct sctp_nets *net)
 1140 {
 1141         htcp_param_update(stcb, net);
 1142         return max(((net->cwnd / net->mtu * net->htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu);
 1143 }
 1144 
 1145 static void
 1146 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
 1147 {
 1148         /*-
 1149          * How to handle these functions?
 1150          *      if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
 1151          *              return;
 1152          */
 1153         if (net->cwnd <= net->ssthresh) {
 1154                 /* We are in slow start */
 1155                 if (net->flight_size + net->net_ack >= net->cwnd) {
 1156                         if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
 1157                                 net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
 1158                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 1159                                         sctp_log_cwnd(stcb, net, net->mtu,
 1160                                             SCTP_CWND_LOG_FROM_SS);
 1161                                 }
 1162                         } else {
 1163                                 net->cwnd += net->net_ack;
 1164                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 1165                                         sctp_log_cwnd(stcb, net, net->net_ack,
 1166                                             SCTP_CWND_LOG_FROM_SS);
 1167                                 }
 1168                         }
 1169                 } else {
 1170                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
 1171                                 sctp_log_cwnd(stcb, net, net->net_ack,
 1172                                     SCTP_CWND_LOG_NOADV_SS);
 1173                         }
 1174                 }
 1175         } else {
 1176                 measure_rtt(stcb, net);
 1177 
 1178                 /*
 1179                  * In dangerous area, increase slowly. In theory this is
 1180                  * net->cwnd += alpha / net->cwnd
 1181                  */
 1182                 /* What is snd_cwnd_cnt?? */
 1183                 if (((net->partial_bytes_acked / net->mtu * net->htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
 1184                         /*-
 1185                          * Does SCTP have a cwnd clamp?
 1186                          * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
 1187                          */
 1188                         net->cwnd += net->mtu;
 1189                         net->partial_bytes_acked = 0;
 1190                         htcp_alpha_update(&net->htcp_ca);
 1191                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 1192                                 sctp_log_cwnd(stcb, net, net->mtu,
 1193                                     SCTP_CWND_LOG_FROM_CA);
 1194                         }
 1195                 } else {
 1196                         net->partial_bytes_acked += net->net_ack;
 1197                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
 1198                                 sctp_log_cwnd(stcb, net, net->net_ack,
 1199                                     SCTP_CWND_LOG_NOADV_CA);
 1200                         }
 1201                 }
 1202 
 1203                 net->htcp_ca.bytes_acked = net->mtu;
 1204         }
 1205 }
 1206 
 1207 #ifdef SCTP_NOT_USED
 1208 /* Lower bound on congestion window. */
 1209 static uint32_t
 1210 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
 1211 {
 1212         return net->ssthresh;
 1213 }
 1214 
 1215 #endif
 1216 
 1217 static void
 1218 htcp_init(struct sctp_tcb *stcb, struct sctp_nets *net)
 1219 {
 1220         memset(&net->htcp_ca, 0, sizeof(struct htcp));
 1221         net->htcp_ca.alpha = ALPHA_BASE;
 1222         net->htcp_ca.beta = BETA_MIN;
 1223         net->htcp_ca.bytes_acked = net->mtu;
 1224         net->htcp_ca.last_cong = sctp_get_tick_count();
 1225 }
 1226 
 1227 void
 1228 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
 1229 {
 1230         /*
 1231          * We take the max of the burst limit times a MTU or the
 1232          * INITIAL_CWND. We then limit this to 4 MTU's of sending.
 1233          */
 1234         net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
 1235         net->ssthresh = stcb->asoc.peers_rwnd;
 1236         htcp_init(stcb, net);
 1237 
 1238         if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
 1239                 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
 1240         }
 1241 }
 1242 
 1243 void
 1244 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
 1245     struct sctp_association *asoc,
 1246     int accum_moved, int reneged_all, int will_exit)
 1247 {
 1248         struct sctp_nets *net;
 1249 
 1250         /******************************/
 1251         /* update cwnd and Early FR   */
 1252         /******************************/
 1253         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
 1254 
 1255 #ifdef JANA_CMT_FAST_RECOVERY
 1256                 /*
 1257                  * CMT fast recovery code. Need to debug.
 1258                  */
 1259                 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
 1260                         if (compare_with_wrap(asoc->last_acked_seq,
 1261                             net->fast_recovery_tsn, MAX_TSN) ||
 1262                             (asoc->last_acked_seq == net->fast_recovery_tsn) ||
 1263                             compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
 1264                             (net->pseudo_cumack == net->fast_recovery_tsn)) {
 1265                                 net->will_exit_fast_recovery = 1;
 1266                         }
 1267                 }
 1268 #endif
 1269                 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
 1270                         /*
 1271                          * So, first of all do we need to have a Early FR
 1272                          * timer running?
 1273                          */
 1274                         if (((TAILQ_FIRST(&asoc->sent_queue)) &&
 1275                             (net->ref_count > 1) &&
 1276                             (net->flight_size < net->cwnd)) ||
 1277                             (reneged_all)) {
 1278                                 /*
 1279                                  * yes, so in this case stop it if its
 1280                                  * running, and then restart it. Reneging
 1281                                  * all is a special case where we want to
 1282                                  * run the Early FR timer and then force the
 1283                                  * last few unacked to be sent, causing us
 1284                                  * to illicit a sack with gaps to force out
 1285                                  * the others.
 1286                                  */
 1287                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
 1288                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
 1289                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
 1290                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
 1291                                 }
 1292                                 SCTP_STAT_INCR(sctps_earlyfrstrid);
 1293                                 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
 1294                         } else {
 1295                                 /* No, stop it if its running */
 1296                                 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
 1297                                         SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
 1298                                         sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
 1299                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
 1300                                 }
 1301                         }
 1302                 }
 1303                 /* if nothing was acked on this destination skip it */
 1304                 if (net->net_ack == 0) {
 1305                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
 1306                                 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
 1307                         }
 1308                         continue;
 1309                 }
 1310                 if (net->net_ack2 > 0) {
 1311                         /*
 1312                          * Karn's rule applies to clearing error count, this
 1313                          * is optional.
 1314                          */
 1315                         net->error_count = 0;
 1316                         if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
 1317                             SCTP_ADDR_NOT_REACHABLE) {
 1318                                 /* addr came good */
 1319                                 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
 1320                                 net->dest_state |= SCTP_ADDR_REACHABLE;
 1321                                 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
 1322                                     SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
 1323                                 /* now was it the primary? if so restore */
 1324                                 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
 1325                                         (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
 1326                                 }
 1327                         }
 1328                         /*
 1329                          * JRS 5/14/07 - If CMT PF is on and the destination
 1330                          * is in PF state, set the destination to active
 1331                          * state and set the cwnd to one or two MTU's based
 1332                          * on whether PF1 or PF2 is being used.
 1333                          * 
 1334                          * Should we stop any running T3 timer here?
 1335                          */
 1336                         if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
 1337                             SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
 1338                             ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
 1339                                 net->dest_state &= ~SCTP_ADDR_PF;
 1340                                 net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
 1341                                 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
 1342                                     net, net->cwnd);
 1343                                 /*
 1344                                  * Since the cwnd value is explicitly set,
 1345                                  * skip the code that updates the cwnd
 1346                                  * value.
 1347                                  */
 1348                                 goto skip_cwnd_update;
 1349                         }
 1350                 }
 1351 #ifdef JANA_CMT_FAST_RECOVERY
 1352                 /*
 1353                  * CMT fast recovery code
 1354                  */
 1355                 /*
 1356                  * if (sctp_cmt_on_off == 1 &&
 1357                  * net->fast_retran_loss_recovery &&
 1358                  * net->will_exit_fast_recovery == 0) { @@@ Do something }
 1359                  * else if (sctp_cmt_on_off == 0 &&
 1360                  * asoc->fast_retran_loss_recovery && will_exit == 0) {
 1361                  */
 1362 #endif
 1363 
 1364                 if (asoc->fast_retran_loss_recovery && will_exit == 0 && SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
 1365                         /*
 1366                          * If we are in loss recovery we skip any cwnd
 1367                          * update
 1368                          */
 1369                         goto skip_cwnd_update;
 1370                 }
 1371                 /*
 1372                  * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
 1373                  * moved.
 1374                  */
 1375                 if (accum_moved || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && net->new_pseudo_cumack)) {
 1376                         htcp_cong_avoid(stcb, net);
 1377                         measure_achieved_throughput(stcb, net);
 1378                 } else {
 1379                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
 1380                                 sctp_log_cwnd(stcb, net, net->mtu,
 1381                                     SCTP_CWND_LOG_NO_CUMACK);
 1382                         }
 1383                 }
 1384 skip_cwnd_update:
 1385                 /*
 1386                  * NOW, according to Karn's rule do we need to restore the
 1387                  * RTO timer back? Check our net_ack2. If not set then we
 1388                  * have a ambiguity.. i.e. all data ack'd was sent to more
 1389                  * than one place.
 1390                  */
 1391                 if (net->net_ack2) {
 1392                         /* restore any doubled timers */
 1393                         net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
 1394                         if (net->RTO < stcb->asoc.minrto) {
 1395                                 net->RTO = stcb->asoc.minrto;
 1396                         }
 1397                         if (net->RTO > stcb->asoc.maxrto) {
 1398                                 net->RTO = stcb->asoc.maxrto;
 1399                         }
 1400                 }
 1401         }
 1402 }
 1403 
 1404 void
 1405 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
 1406     struct sctp_association *asoc)
 1407 {
 1408         struct sctp_nets *net;
 1409 
 1410         /*
 1411          * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
 1412          * (net->fast_retran_loss_recovery == 0)))
 1413          */
 1414         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
 1415                 if ((asoc->fast_retran_loss_recovery == 0) || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1)) {
 1416                         /* out of a RFC2582 Fast recovery window? */
 1417                         if (net->net_ack > 0) {
 1418                                 /*
 1419                                  * per section 7.2.3, are there any
 1420                                  * destinations that had a fast retransmit
 1421                                  * to them. If so what we need to do is
 1422                                  * adjust ssthresh and cwnd.
 1423                                  */
 1424                                 struct sctp_tmit_chunk *lchk;
 1425                                 int old_cwnd = net->cwnd;
 1426 
 1427                                 /* JRS - reset as if state were changed */
 1428                                 htcp_reset(&net->htcp_ca);
 1429                                 net->ssthresh = htcp_recalc_ssthresh(stcb, net);
 1430                                 net->cwnd = net->ssthresh;
 1431                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 1432                                         sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
 1433                                             SCTP_CWND_LOG_FROM_FR);
 1434                                 }
 1435                                 lchk = TAILQ_FIRST(&asoc->send_queue);
 1436 
 1437                                 net->partial_bytes_acked = 0;
 1438                                 /* Turn on fast recovery window */
 1439                                 asoc->fast_retran_loss_recovery = 1;
 1440                                 if (lchk == NULL) {
 1441                                         /* Mark end of the window */
 1442                                         asoc->fast_recovery_tsn = asoc->sending_seq - 1;
 1443                                 } else {
 1444                                         asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
 1445                                 }
 1446 
 1447                                 /*
 1448                                  * CMT fast recovery -- per destination
 1449                                  * recovery variable.
 1450                                  */
 1451                                 net->fast_retran_loss_recovery = 1;
 1452 
 1453                                 if (lchk == NULL) {
 1454                                         /* Mark end of the window */
 1455                                         net->fast_recovery_tsn = asoc->sending_seq - 1;
 1456                                 } else {
 1457                                         net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
 1458                                 }
 1459 
 1460                                 /*
 1461                                  * Disable Nonce Sum Checking and store the
 1462                                  * resync tsn
 1463                                  */
 1464                                 asoc->nonce_sum_check = 0;
 1465                                 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
 1466 
 1467                                 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
 1468                                     stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
 1469                                 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
 1470                                     stcb->sctp_ep, stcb, net);
 1471                         }
 1472                 } else if (net->net_ack > 0) {
 1473                         /*
 1474                          * Mark a peg that we WOULD have done a cwnd
 1475                          * reduction but RFC2582 prevented this action.
 1476                          */
 1477                         SCTP_STAT_INCR(sctps_fastretransinrtt);
 1478                 }
 1479         }
 1480 }
 1481 
 1482 void
 1483 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
 1484     struct sctp_nets *net)
 1485 {
 1486         int old_cwnd = net->cwnd;
 1487 
 1488         /* JRS - reset as if the state were being changed to timeout */
 1489         htcp_reset(&net->htcp_ca);
 1490         net->ssthresh = htcp_recalc_ssthresh(stcb, net);
 1491         net->cwnd = net->mtu;
 1492         net->partial_bytes_acked = 0;
 1493         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 1494                 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
 1495         }
 1496 }
 1497 
 1498 void
 1499 sctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
 1500     struct sctp_tcb *stcb, struct sctp_nets *net)
 1501 {
 1502         int old_cwnd;
 1503 
 1504         old_cwnd = net->cwnd;
 1505 
 1506         sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
 1507         net->htcp_ca.last_cong = sctp_get_tick_count();
 1508         /*
 1509          * make a small adjustment to cwnd and force to CA.
 1510          */
 1511         if (net->cwnd > net->mtu)
 1512                 /* drop down one MTU after sending */
 1513                 net->cwnd -= net->mtu;
 1514         if (net->cwnd < net->ssthresh)
 1515                 /* still in SS move to CA */
 1516                 net->ssthresh = net->cwnd - 1;
 1517         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 1518                 sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
 1519         }
 1520 }
 1521 
 1522 void
 1523 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
 1524     struct sctp_nets *net)
 1525 {
 1526         int old_cwnd;
 1527 
 1528         old_cwnd = net->cwnd;
 1529 
 1530         /* JRS - reset hctp as if state changed */
 1531         htcp_reset(&net->htcp_ca);
 1532         SCTP_STAT_INCR(sctps_ecnereducedcwnd);
 1533         net->ssthresh = htcp_recalc_ssthresh(stcb, net);
 1534         if (net->ssthresh < net->mtu) {
 1535                 net->ssthresh = net->mtu;
 1536                 /* here back off the timer as well, to slow us down */
 1537                 net->RTO <<= 1;
 1538         }
 1539         net->cwnd = net->ssthresh;
 1540         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 1541                 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
 1542         }
 1543 }

Cache object: ca364cb8b120aa4520a1d31df239b49d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.