The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/tcp_output.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
    3  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to The DragonFly Project
    6  * by Jeffrey M. Hsu.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. Neither the name of The DragonFly Project nor the names of its
   17  *    contributors may be used to endorse or promote products derived
   18  *    from this software without specific, prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
   24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
   26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   31  * SUCH DAMAGE.
   32  */
   33 
   34 /*
   35  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
   36  *      The Regents of the University of California.  All rights reserved.
   37  *
   38  * Redistribution and use in source and binary forms, with or without
   39  * modification, are permitted provided that the following conditions
   40  * are met:
   41  * 1. Redistributions of source code must retain the above copyright
   42  *    notice, this list of conditions and the following disclaimer.
   43  * 2. Redistributions in binary form must reproduce the above copyright
   44  *    notice, this list of conditions and the following disclaimer in the
   45  *    documentation and/or other materials provided with the distribution.
   46  * 3. Neither the name of the University nor the names of its contributors
   47  *    may be used to endorse or promote products derived from this software
   48  *    without specific prior written permission.
   49  *
   50  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   51  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   52  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   53  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   54  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   55  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   56  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   57  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   58  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   59  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   60  * SUCH DAMAGE.
   61  *
   62  *      @(#)tcp_output.c        8.4 (Berkeley) 5/24/95
   63  * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.20 2003/01/29 22:45:36 hsu Exp $
   64  */
   65 
   66 #include "opt_inet.h"
   67 #include "opt_inet6.h"
   68 #include "opt_ipsec.h"
   69 #include "opt_tcpdebug.h"
   70 
   71 #include <sys/param.h>
   72 #include <sys/systm.h>
   73 #include <sys/kernel.h>
   74 #include <sys/sysctl.h>
   75 #include <sys/mbuf.h>
   76 #include <sys/domain.h>
   77 #include <sys/protosw.h>
   78 #include <sys/socket.h>
   79 #include <sys/socketvar.h>
   80 #include <sys/in_cksum.h>
   81 #include <sys/thread.h>
   82 #include <sys/globaldata.h>
   83 
   84 #include <net/if_var.h>
   85 #include <net/route.h>
   86 #include <net/netmsg2.h>
   87 #include <net/netisr2.h>
   88 
   89 #include <netinet/in.h>
   90 #include <netinet/in_systm.h>
   91 #include <netinet/ip.h>
   92 #include <netinet/in_pcb.h>
   93 #include <netinet/ip_var.h>
   94 #include <netinet6/in6_pcb.h>
   95 #include <netinet/ip6.h>
   96 #include <netinet6/ip6_var.h>
   97 #include <netinet/tcp.h>
   98 #define TCPOUTFLAGS
   99 #include <netinet/tcp_fsm.h>
  100 #include <netinet/tcp_seq.h>
  101 #include <netinet/tcp_timer.h>
  102 #include <netinet/tcp_timer2.h>
  103 #include <netinet/tcp_var.h>
  104 #include <netinet/tcpip.h>
  105 #ifdef TCPDEBUG
  106 #include <netinet/tcp_debug.h>
  107 #endif
  108 
  109 #ifdef IPSEC
  110 #include <netinet6/ipsec.h>
  111 #endif /*IPSEC*/
  112 
  113 #ifdef FAST_IPSEC
  114 #include <netproto/ipsec/ipsec.h>
  115 #define IPSEC
  116 #endif /*FAST_IPSEC*/
  117 
  118 #ifdef notyet
  119 extern struct mbuf *m_copypack();
  120 #endif
  121 
  122 int path_mtu_discovery = 0;
  123 SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_RW,
  124         &path_mtu_discovery, 1, "Enable Path MTU Discovery");
  125 
  126 static int avoid_pure_win_update = 1;
  127 SYSCTL_INT(_net_inet_tcp, OID_AUTO, avoid_pure_win_update, CTLFLAG_RW,
  128         &avoid_pure_win_update, 1, "Avoid pure window updates when possible");
  129 
  130 int tcp_do_autosndbuf = 1;
  131 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_RW,
  132     &tcp_do_autosndbuf, 0, "Enable automatic send buffer sizing");
  133 
  134 int tcp_autosndbuf_inc = 8*1024;
  135 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_RW,
  136     &tcp_autosndbuf_inc, 0, "Incrementor step size of automatic send buffer");
  137 
  138 int tcp_autosndbuf_max = 2*1024*1024;
  139 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_RW,
  140     &tcp_autosndbuf_max, 0, "Max size of automatic send buffer");
  141 
  142 int tcp_prio_synack = 1;
  143 SYSCTL_INT(_net_inet_tcp, OID_AUTO, prio_synack, CTLFLAG_RW,
  144     &tcp_prio_synack, 0, "Prioritize SYN, SYN|ACK and pure ACK");
  145 
  146 static int tcp_idle_cwv = 1;
  147 SYSCTL_INT(_net_inet_tcp, OID_AUTO, idle_cwv, CTLFLAG_RW,
  148     &tcp_idle_cwv, 0,
  149     "Congestion window validation after idle period (part of RFC2861)");
  150 
  151 static int tcp_idle_restart = 1;
  152 SYSCTL_INT(_net_inet_tcp, OID_AUTO, idle_restart, CTLFLAG_RW,
  153     &tcp_idle_restart, 0, "Reset congestion window after idle period");
  154 
  155 static int tcp_do_tso = 1;
  156 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_RW,
  157     &tcp_do_tso, 0, "Enable TCP Segmentation Offload (TSO)");
  158 
  159 static int tcp_fairsend = 4;
  160 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fairsend, CTLFLAG_RW,
  161     &tcp_fairsend, 0,
  162     "Amount of segments sent before yield to other senders or receivers");
  163 
  164 static void     tcp_idle_cwnd_validate(struct tcpcb *);
  165 
  166 static int      tcp_tso_getsize(struct tcpcb *tp, u_int *segsz, u_int *hlen);
  167 static void     tcp_output_sched(struct tcpcb *tp);
  168 
  169 /*
  170  * Tcp output routine: figure out what should be sent and send it.
  171  */
  172 int
  173 tcp_output(struct tcpcb *tp)
  174 {
  175         struct inpcb * const inp = tp->t_inpcb;
  176         struct socket *so = inp->inp_socket;
  177         long len, recvwin, sendwin;
  178         int nsacked = 0;
  179         int off, flags, error = 0;
  180 #ifdef TCP_SIGNATURE
  181         int sigoff = 0;
  182 #endif
  183         struct mbuf *m;
  184         struct ip *ip;
  185         struct tcphdr *th;
  186         u_char opt[TCP_MAXOLEN];
  187         unsigned int ipoptlen, optlen, hdrlen;
  188         int idle;
  189         boolean_t sendalot;
  190         struct ip6_hdr *ip6;
  191 #ifdef INET6
  192         const boolean_t isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
  193 #else
  194         const boolean_t isipv6 = FALSE;
  195 #endif
  196         boolean_t can_tso = FALSE, use_tso;
  197         boolean_t report_sack, idle_cwv = FALSE;
  198         u_int segsz, tso_hlen, tso_lenmax = 0;
  199         int segcnt = 0;
  200         boolean_t need_sched = FALSE;
  201 
  202         KKASSERT(so->so_port == &curthread->td_msgport);
  203 
  204         /*
  205          * Determine length of data that should be transmitted,
  206          * and flags that will be used.
  207          * If there is some data or critical controls (SYN, RST)
  208          * to send, then transmit; otherwise, investigate further.
  209          */
  210 
  211         /*
  212          * If we have been idle for a while, the send congestion window
  213          * could be no longer representative of the current state of the
  214          * link; need to validate congestion window.  However, we should
  215          * not perform congestion window validation here, since we could
  216          * be asked to send pure ACK.
  217          */
  218         if (tp->snd_max == tp->snd_una &&
  219             (ticks - tp->snd_last) >= tp->t_rxtcur && tcp_idle_restart)
  220                 idle_cwv = TRUE;
  221 
  222         /*
  223          * Calculate whether the transmit stream was previously idle 
  224          * and adjust TF_LASTIDLE for the next time.
  225          */
  226         idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
  227         if (idle && (tp->t_flags & TF_MORETOCOME))
  228                 tp->t_flags |= TF_LASTIDLE;
  229         else
  230                 tp->t_flags &= ~TF_LASTIDLE;
  231 
  232         if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max &&
  233             !IN_FASTRECOVERY(tp))
  234                 nsacked = tcp_sack_bytes_below(&tp->scb, tp->snd_nxt);
  235 
  236         /*
  237          * Find out whether TSO could be used or not
  238          *
  239          * For TSO capable devices, the following assumptions apply to
  240          * the processing of TCP flags:
  241          * - If FIN is set on the large TCP segment, the device must set
  242          *   FIN on the last segment that it creates from the large TCP
  243          *   segment.
  244          * - If PUSH is set on the large TCP segment, the device must set
  245          *   PUSH on the last segment that it creates from the large TCP
  246          *   segment.
  247          */
  248 #if !defined(IPSEC) && !defined(FAST_IPSEC)
  249         if (tcp_do_tso
  250 #ifdef TCP_SIGNATURE
  251             && (tp->t_flags & TF_SIGNATURE) == 0
  252 #endif
  253         ) {
  254                 if (!isipv6) {
  255                         struct rtentry *rt = inp->inp_route.ro_rt;
  256 
  257                         if (rt != NULL && (rt->rt_flags & RTF_UP) &&
  258                             (rt->rt_ifp->if_hwassist & CSUM_TSO)) {
  259                                 can_tso = TRUE;
  260                                 tso_lenmax = rt->rt_ifp->if_tsolen;
  261                         }
  262                 }
  263         }
  264 #endif  /* !IPSEC && !FAST_IPSEC */
  265 
  266 again:
  267         m = NULL;
  268         ip = NULL;
  269         th = NULL;
  270         ip6 = NULL;
  271 
  272         if ((tp->t_flags & (TF_SACK_PERMITTED | TF_NOOPT)) ==
  273                 TF_SACK_PERMITTED &&
  274             (!TAILQ_EMPTY(&tp->t_segq) ||
  275              tp->reportblk.rblk_start != tp->reportblk.rblk_end))
  276                 report_sack = TRUE;
  277         else
  278                 report_sack = FALSE;
  279 
  280         /* Make use of SACK information when slow-starting after a RTO. */
  281         if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max &&
  282             !IN_FASTRECOVERY(tp)) {
  283                 tcp_seq old_snd_nxt = tp->snd_nxt;
  284 
  285                 tcp_sack_skip_sacked(&tp->scb, &tp->snd_nxt);
  286                 nsacked += tp->snd_nxt - old_snd_nxt;
  287         }
  288 
  289         sendalot = FALSE;
  290         off = tp->snd_nxt - tp->snd_una;
  291         sendwin = min(tp->snd_wnd, tp->snd_cwnd + nsacked);
  292         sendwin = min(sendwin, tp->snd_bwnd);
  293 
  294         flags = tcp_outflags[tp->t_state];
  295         /*
  296          * Get standard flags, and add SYN or FIN if requested by 'hidden'
  297          * state flags.
  298          */
  299         if (tp->t_flags & TF_NEEDFIN)
  300                 flags |= TH_FIN;
  301         if (tp->t_flags & TF_NEEDSYN)
  302                 flags |= TH_SYN;
  303 
  304         /*
  305          * If in persist timeout with window of 0, send 1 byte.
  306          * Otherwise, if window is small but nonzero
  307          * and timer expired, we will send what we can
  308          * and go to transmit state.
  309          */
  310         if (tp->t_flags & TF_FORCE) {
  311                 if (sendwin == 0) {
  312                         /*
  313                          * If we still have some data to send, then
  314                          * clear the FIN bit.  Usually this would
  315                          * happen below when it realizes that we
  316                          * aren't sending all the data.  However,
  317                          * if we have exactly 1 byte of unsent data,
  318                          * then it won't clear the FIN bit below,
  319                          * and if we are in persist state, we wind
  320                          * up sending the packet without recording
  321                          * that we sent the FIN bit.
  322                          *
  323                          * We can't just blindly clear the FIN bit,
  324                          * because if we don't have any more data
  325                          * to send then the probe will be the FIN
  326                          * itself.
  327                          */
  328                         if (off < so->so_snd.ssb_cc)
  329                                 flags &= ~TH_FIN;
  330                         sendwin = 1;
  331                 } else {
  332                         tcp_callout_stop(tp, tp->tt_persist);
  333                         tp->t_rxtshift = 0;
  334                 }
  335         }
  336 
  337         /*
  338          * If snd_nxt == snd_max and we have transmitted a FIN, the
  339          * offset will be > 0 even if so_snd.ssb_cc is 0, resulting in
  340          * a negative length.  This can also occur when TCP opens up
  341          * its congestion window while receiving additional duplicate
  342          * acks after fast-retransmit because TCP will reset snd_nxt
  343          * to snd_max after the fast-retransmit.
  344          *
  345          * A negative length can also occur when we are in the
  346          * TCPS_SYN_RECEIVED state due to a simultanious connect where
  347          * our SYN has not been acked yet.
  348          *
  349          * In the normal retransmit-FIN-only case, however, snd_nxt will
  350          * be set to snd_una, the offset will be 0, and the length may
  351          * wind up 0.
  352          */
  353         len = (long)ulmin(so->so_snd.ssb_cc, sendwin) - off;
  354 
  355         /*
  356          * Lop off SYN bit if it has already been sent.  However, if this
  357          * is SYN-SENT state and if segment contains data, suppress sending
  358          * segment (sending the segment would be an option if we still
  359          * did TAO and the remote host supported it).
  360          */
  361         if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
  362                 flags &= ~TH_SYN;
  363                 off--, len++;
  364                 if (len > 0 && tp->t_state == TCPS_SYN_SENT) {
  365                         tp->t_flags &= ~(TF_ACKNOW | TF_XMITNOW);
  366                         return 0;
  367                 }
  368         }
  369 
  370         /*
  371          * Be careful not to send data and/or FIN on SYN segments.
  372          * This measure is needed to prevent interoperability problems
  373          * with not fully conformant TCP implementations.
  374          */
  375         if (flags & TH_SYN) {
  376                 len = 0;
  377                 flags &= ~TH_FIN;
  378         }
  379 
  380         if (len < 0) {
  381                 /*
  382                  * A negative len can occur if our FIN has been sent but not
  383                  * acked, or if we are in a simultanious connect in the
  384                  * TCPS_SYN_RECEIVED state with our SYN sent but not yet
  385                  * acked.
  386                  *
  387                  * If our window has contracted to 0 in the FIN case
  388                  * (which can only occur if we have NOT been called to
  389                  * retransmit as per code a few paragraphs up) then we
  390                  * want to shift the retransmit timer over to the
  391                  * persist timer.
  392                  *
  393                  * However, if we are in the TCPS_SYN_RECEIVED state
  394                  * (the SYN case) we will be in a simultanious connect and
  395                  * the window may be zero degeneratively.  In this case we
  396                  * do not want to shift to the persist timer after the SYN
  397                  * or the SYN+ACK transmission.
  398                  */
  399                 len = 0;
  400                 if (sendwin == 0 && tp->t_state != TCPS_SYN_RECEIVED) {
  401                         tcp_callout_stop(tp, tp->tt_rexmt);
  402                         tp->t_rxtshift = 0;
  403                         tp->snd_nxt = tp->snd_una;
  404                         if (!tcp_callout_active(tp, tp->tt_persist))
  405                                 tcp_setpersist(tp);
  406                 }
  407         }
  408 
  409         KASSERT(len >= 0, ("%s: len < 0", __func__));
  410         /*
  411          * Automatic sizing of send socket buffer.  Often the send buffer
  412          * size is not optimally adjusted to the actual network conditions
  413          * at hand (delay bandwidth product).  Setting the buffer size too
  414          * small limits throughput on links with high bandwidth and high
  415          * delay (eg. trans-continental/oceanic links).  Setting the
  416          * buffer size too big consumes too much real kernel memory,
  417          * especially with many connections on busy servers.
  418          *
  419          * The criteria to step up the send buffer one notch are:
  420          *  1. receive window of remote host is larger than send buffer
  421          *     (with a fudge factor of 5/4th);
  422          *  2. send buffer is filled to 7/8th with data (so we actually
  423          *     have data to make use of it);
  424          *  3. send buffer fill has not hit maximal automatic size;
  425          *  4. our send window (slow start and cogestion controlled) is
  426          *     larger than sent but unacknowledged data in send buffer.
  427          *
  428          * The remote host receive window scaling factor may limit the
  429          * growing of the send buffer before it reaches its allowed
  430          * maximum.
  431          *
  432          * It scales directly with slow start or congestion window
  433          * and does at most one step per received ACK.  This fast
  434          * scaling has the drawback of growing the send buffer beyond
  435          * what is strictly necessary to make full use of a given
  436          * delay*bandwith product.  However testing has shown this not
  437          * to be much of an problem.  At worst we are trading wasting
  438          * of available bandwith (the non-use of it) for wasting some
  439          * socket buffer memory.
  440          *
  441          * TODO: Shrink send buffer during idle periods together
  442          * with congestion window.  Requires another timer.  Has to
  443          * wait for upcoming tcp timer rewrite.
  444          */
  445         if (tcp_do_autosndbuf && so->so_snd.ssb_flags & SSB_AUTOSIZE) {
  446                 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.ssb_hiwat &&
  447                     so->so_snd.ssb_cc >= (so->so_snd.ssb_hiwat / 8 * 7) &&
  448                     so->so_snd.ssb_cc < tcp_autosndbuf_max &&
  449                     sendwin >= (so->so_snd.ssb_cc - (tp->snd_nxt - tp->snd_una))) {
  450                         u_long newsize;
  451 
  452                         newsize = ulmin(so->so_snd.ssb_hiwat +
  453                                          tcp_autosndbuf_inc,
  454                                         tcp_autosndbuf_max);
  455                         if (!ssb_reserve(&so->so_snd, newsize, so, NULL))
  456                                 atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE);
  457                         if (newsize >= (TCP_MAXWIN << tp->snd_scale))
  458                                 atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE);
  459                 }
  460         }
  461 
  462         /*
  463          * Don't use TSO, if:
  464          * - Congestion window needs validation
  465          * - There are SACK blocks to report
  466          * - RST or SYN flags is set
  467          * - URG will be set
  468          *
  469          * XXX
  470          * Checking for SYN|RST looks overkill, just to be safe than sorry
  471          */
  472         use_tso = can_tso;
  473         if (report_sack || idle_cwv || (flags & (TH_RST | TH_SYN)))
  474                 use_tso = FALSE;
  475         if (use_tso) {
  476                 tcp_seq ugr_nxt = tp->snd_nxt;
  477 
  478                 if ((flags & TH_FIN) && (tp->t_flags & TF_SENTFIN) &&
  479                     tp->snd_nxt == tp->snd_max)
  480                         --ugr_nxt;
  481 
  482                 if (SEQ_GT(tp->snd_up, ugr_nxt))
  483                         use_tso = FALSE;
  484         }
  485 
  486         if (use_tso) {
  487                 /*
  488                  * Find out segment size and header length for TSO
  489                  */
  490                 error = tcp_tso_getsize(tp, &segsz, &tso_hlen);
  491                 if (error)
  492                         use_tso = FALSE;
  493         }
  494         if (!use_tso) {
  495                 segsz = tp->t_maxseg;
  496                 tso_hlen = 0; /* not used */
  497         }
  498 
  499         /*
  500          * Truncate to the maximum segment length if not TSO, and ensure that
  501          * FIN is removed if the length no longer contains the last data byte.
  502          */
  503         if (len > segsz) {
  504                 if (!use_tso) {
  505                         len = segsz;
  506                         ++segcnt;
  507                 } else {
  508                         int nsegs;
  509 
  510                         if (__predict_false(tso_lenmax < segsz))
  511                                 tso_lenmax = segsz << 1;
  512 
  513                         /*
  514                          * Truncate TSO transfers to (IP_MAXPACKET - iphlen -
  515                          * thoff), and make sure that we send equal size
  516                          * transfers down the stack (rather than big-small-
  517                          * big-small-...).
  518                          */
  519                         len = min(len, tso_lenmax);
  520                         nsegs = min(len, (IP_MAXPACKET - tso_hlen)) / segsz;
  521                         KKASSERT(nsegs > 0);
  522 
  523                         len = nsegs * segsz;
  524 
  525                         if (len <= segsz) {
  526                                 use_tso = FALSE;
  527                                 ++segcnt;
  528                         } else {
  529                                 segcnt += nsegs;
  530                         }
  531                 }
  532                 sendalot = TRUE;
  533         } else {
  534                 use_tso = FALSE;
  535                 if (len > 0)
  536                         ++segcnt;
  537         }
  538         if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.ssb_cc))
  539                 flags &= ~TH_FIN;
  540 
  541         recvwin = ssb_space(&so->so_rcv);
  542 
  543         /*
  544          * Sender silly window avoidance.   We transmit under the following
  545          * conditions when len is non-zero:
  546          *
  547          *      - We have a full segment
  548          *      - This is the last buffer in a write()/send() and we are
  549          *        either idle or running NODELAY
  550          *      - we've timed out (e.g. persist timer)
  551          *      - we have more then 1/2 the maximum send window's worth of
  552          *        data (receiver may be limiting the window size)
  553          *      - we need to retransmit
  554          */
  555         if (len) {
  556                 if (len >= segsz)
  557                         goto send;
  558                 /*
  559                  * NOTE! on localhost connections an 'ack' from the remote
  560                  * end may occur synchronously with the output and cause
  561                  * us to flush a buffer queued with moretocome.  XXX
  562                  *
  563                  * note: the len + off check is almost certainly unnecessary.
  564                  */
  565                 if (!(tp->t_flags & TF_MORETOCOME) &&   /* normal case */
  566                     (idle || (tp->t_flags & TF_NODELAY)) &&
  567                     len + off >= so->so_snd.ssb_cc &&
  568                     !(tp->t_flags & TF_NOPUSH)) {
  569                         goto send;
  570                 }
  571                 if (tp->t_flags & TF_FORCE)             /* typ. timeout case */
  572                         goto send;
  573                 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
  574                         goto send;
  575                 if (SEQ_LT(tp->snd_nxt, tp->snd_max))   /* retransmit case */
  576                         goto send;
  577                 if (tp->t_flags & TF_XMITNOW)
  578                         goto send;
  579         }
  580 
  581         /*
  582          * Compare available window to amount of window
  583          * known to peer (as advertised window less
  584          * next expected input).  If the difference is at least two
  585          * max size segments, or at least 50% of the maximum possible
  586          * window, then want to send a window update to peer.
  587          */
  588         if (recvwin > 0) {
  589                 /*
  590                  * "adv" is the amount we can increase the window,
  591                  * taking into account that we are limited by
  592                  * TCP_MAXWIN << tp->rcv_scale.
  593                  */
  594                 long adv = min(recvwin, (long)TCP_MAXWIN << tp->rcv_scale) -
  595                         (tp->rcv_adv - tp->rcv_nxt);
  596                 long hiwat;
  597 
  598                 /*
  599                  * This ack case typically occurs when the user has drained
  600                  * the TCP socket buffer sufficiently to warrent an ack
  601                  * containing a 'pure window update'... that is, an ack that
  602                  * ONLY updates the tcp window.
  603                  *
  604                  * It is unclear why we would need to do a pure window update
  605                  * past 2 segments if we are going to do one at 1/2 the high
  606                  * water mark anyway, especially since under normal conditions
  607                  * the user program will drain the socket buffer quickly.
  608                  * The 2-segment pure window update will often add a large
  609                  * number of extra, unnecessary acks to the stream.
  610                  *
  611                  * avoid_pure_win_update now defaults to 1.
  612                  */
  613                 if (avoid_pure_win_update == 0 ||
  614                     (tp->t_flags & TF_RXRESIZED)) {
  615                         if (adv >= (long) (2 * segsz)) {
  616                                 goto send;
  617                         }
  618                 }
  619                 hiwat = (long)(TCP_MAXWIN << tp->rcv_scale);
  620                 if (hiwat > (long)so->so_rcv.ssb_hiwat)
  621                         hiwat = (long)so->so_rcv.ssb_hiwat;
  622                 if (adv >= hiwat / 2)
  623                         goto send;
  624         }
  625 
  626         /*
  627          * Send if we owe the peer an ACK, RST, SYN, or urgent data.  ACKNOW
  628          * is also a catch-all for the retransmit timer timeout case.
  629          */
  630         if (tp->t_flags & TF_ACKNOW)
  631                 goto send;
  632         if ((flags & TH_RST) ||
  633             ((flags & TH_SYN) && !(tp->t_flags & TF_NEEDSYN)))
  634                 goto send;
  635         if (SEQ_GT(tp->snd_up, tp->snd_una))
  636                 goto send;
  637         /*
  638          * If our state indicates that FIN should be sent
  639          * and we have not yet done so, then we need to send.
  640          */
  641         if ((flags & TH_FIN) &&
  642             (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una))
  643                 goto send;
  644 
  645         /*
  646          * TCP window updates are not reliable, rather a polling protocol
  647          * using ``persist'' packets is used to insure receipt of window
  648          * updates.  The three ``states'' for the output side are:
  649          *      idle                    not doing retransmits or persists
  650          *      persisting              to move a small or zero window
  651          *      (re)transmitting        and thereby not persisting
  652          *
  653          * tcp_callout_active(tp, tp->tt_persist)
  654          *      is true when we are in persist state.
  655          * The TF_FORCE flag in tp->t_flags
  656          *      is set when we are called to send a persist packet.
  657          * tcp_callout_active(tp, tp->tt_rexmt)
  658          *      is set when we are retransmitting
  659          * The output side is idle when both timers are zero.
  660          *
  661          * If send window is too small, there is data to transmit, and no
  662          * retransmit or persist is pending, then go to persist state.
  663          *
  664          * If nothing happens soon, send when timer expires:
  665          * if window is nonzero, transmit what we can, otherwise force out
  666          * a byte.
  667          *
  668          * Don't try to set the persist state if we are in TCPS_SYN_RECEIVED
  669          * with data pending.  This situation can occur during a
  670          * simultanious connect.
  671          */
  672         if (so->so_snd.ssb_cc > 0 &&
  673             tp->t_state != TCPS_SYN_RECEIVED &&
  674             !tcp_callout_active(tp, tp->tt_rexmt) &&
  675             !tcp_callout_active(tp, tp->tt_persist)) {
  676                 tp->t_rxtshift = 0;
  677                 tcp_setpersist(tp);
  678         }
  679 
  680         /*
  681          * No reason to send a segment, just return.
  682          */
  683         tp->t_flags &= ~TF_XMITNOW;
  684         return (0);
  685 
  686 send:
  687         if (need_sched && len > 0) {
  688                 tcp_output_sched(tp);
  689                 return 0;
  690         }
  691 
  692         /*
  693          * Before ESTABLISHED, force sending of initial options
  694          * unless TCP set not to do any options.
  695          * NOTE: we assume that the IP/TCP header plus TCP options
  696          * always fit in a single mbuf, leaving room for a maximum
  697          * link header, i.e.
  698          *      max_linkhdr + sizeof(struct tcpiphdr) + optlen <= MCLBYTES
  699          */
  700         optlen = 0;
  701         if (isipv6)
  702                 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
  703         else
  704                 hdrlen = sizeof(struct tcpiphdr);
  705         if (flags & TH_SYN) {
  706                 tp->snd_nxt = tp->iss;
  707                 if (!(tp->t_flags & TF_NOOPT)) {
  708                         u_short mss;
  709 
  710                         opt[0] = TCPOPT_MAXSEG;
  711                         opt[1] = TCPOLEN_MAXSEG;
  712                         mss = htons((u_short) tcp_mssopt(tp));
  713                         memcpy(opt + 2, &mss, sizeof mss);
  714                         optlen = TCPOLEN_MAXSEG;
  715 
  716                         if ((tp->t_flags & TF_REQ_SCALE) &&
  717                             (!(flags & TH_ACK) ||
  718                              (tp->t_flags & TF_RCVD_SCALE))) {
  719                                 *((u_int32_t *)(opt + optlen)) = htonl(
  720                                         TCPOPT_NOP << 24 |
  721                                         TCPOPT_WINDOW << 16 |
  722                                         TCPOLEN_WINDOW << 8 |
  723                                         tp->request_r_scale);
  724                                 optlen += 4;
  725                         }
  726 
  727                         if ((tcp_do_sack && !(flags & TH_ACK)) ||
  728                             tp->t_flags & TF_SACK_PERMITTED) {
  729                                 uint32_t *lp = (uint32_t *)(opt + optlen);
  730 
  731                                 *lp = htonl(TCPOPT_SACK_PERMITTED_ALIGNED);
  732                                 optlen += TCPOLEN_SACK_PERMITTED_ALIGNED;
  733                         }
  734                 }
  735         }
  736 
  737         /*
  738          * Send a timestamp and echo-reply if this is a SYN and our side
  739          * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
  740          * and our peer have sent timestamps in our SYN's.
  741          */
  742         if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
  743             !(flags & TH_RST) &&
  744             (!(flags & TH_ACK) || (tp->t_flags & TF_RCVD_TSTMP))) {
  745                 u_int32_t *lp = (u_int32_t *)(opt + optlen);
  746 
  747                 /* Form timestamp option as shown in appendix A of RFC 1323. */
  748                 *lp++ = htonl(TCPOPT_TSTAMP_HDR);
  749                 *lp++ = htonl(ticks);
  750                 *lp   = htonl(tp->ts_recent);
  751                 optlen += TCPOLEN_TSTAMP_APPA;
  752         }
  753 
  754         /* Set receive buffer autosizing timestamp. */
  755         if (tp->rfbuf_ts == 0 && (so->so_rcv.ssb_flags & SSB_AUTOSIZE))
  756                 tp->rfbuf_ts = ticks;
  757 
  758         /*
  759          * If this is a SACK connection and we have a block to report,
  760          * fill in the SACK blocks in the TCP options.
  761          */
  762         if (report_sack)
  763                 tcp_sack_fill_report(tp, opt, &optlen);
  764 
  765 #ifdef TCP_SIGNATURE
  766         if (tp->t_flags & TF_SIGNATURE) {
  767                 int i;
  768                 u_char *bp;
  769                 /*
  770                  * Initialize TCP-MD5 option (RFC2385)
  771                  */
  772                 bp = (u_char *)opt + optlen;
  773                 *bp++ = TCPOPT_SIGNATURE;
  774                 *bp++ = TCPOLEN_SIGNATURE;
  775                 sigoff = optlen + 2;
  776                 for (i = 0; i < TCP_SIGLEN; i++)
  777                         *bp++ = 0;
  778                 optlen += TCPOLEN_SIGNATURE;
  779                 /*
  780                  * Terminate options list and maintain 32-bit alignment.
  781                  */
  782                 *bp++ = TCPOPT_NOP;
  783                 *bp++ = TCPOPT_EOL;
  784                 optlen += 2;
  785         }
  786 #endif /* TCP_SIGNATURE */
  787         KASSERT(optlen <= TCP_MAXOLEN, ("too many TCP options"));
  788         hdrlen += optlen;
  789 
  790         if (isipv6) {
  791                 ipoptlen = ip6_optlen(inp);
  792         } else {
  793                 if (inp->inp_options) {
  794                         ipoptlen = inp->inp_options->m_len -
  795                             offsetof(struct ipoption, ipopt_list);
  796                 } else {
  797                         ipoptlen = 0;
  798                 }
  799         }
  800 #ifdef IPSEC
  801         ipoptlen += ipsec_hdrsiz_tcp(tp);
  802 #endif
  803 
  804         if (use_tso) {
  805                 /* TSO segment length must be multiple of segment size */
  806                 KASSERT(len >= (2 * segsz) && (len % segsz == 0),
  807                     ("invalid TSO len %ld, segsz %u", len, segsz));
  808         } else {
  809                 KASSERT(len <= segsz,
  810                     ("invalid len %ld, segsz %u", len, segsz));
  811 
  812                 /*
  813                  * Adjust data length if insertion of options will bump
  814                  * the packet length beyond the t_maxopd length.  Clear
  815                  * FIN to prevent premature closure since there is still
  816                  * more data to send after this (now truncated) packet.
  817                  *
  818                  * If just the options do not fit we are in a no-win
  819                  * situation and we treat it as an unreachable host.
  820                  */
  821                 if (len + optlen + ipoptlen > tp->t_maxopd) {
  822                         if (tp->t_maxopd <= optlen + ipoptlen) {
  823                                 static time_t last_optlen_report;
  824 
  825                                 if (last_optlen_report != time_uptime) {
  826                                         last_optlen_report = time_uptime;
  827                                         kprintf("tcpcb %p: MSS (%d) too "
  828                                             "small to hold options!\n",
  829                                             tp, tp->t_maxopd);
  830                                 }
  831                                 error = EHOSTUNREACH;
  832                                 goto out;
  833                         } else {
  834                                 flags &= ~TH_FIN;
  835                                 len = tp->t_maxopd - optlen - ipoptlen;
  836                                 sendalot = TRUE;
  837                         }
  838                 }
  839         }
  840 
  841 #ifdef INET6
  842         KASSERT(max_linkhdr + hdrlen <= MCLBYTES, ("tcphdr too big"));
  843 #else
  844         KASSERT(max_linkhdr + hdrlen <= MHLEN, ("tcphdr too big"));
  845 #endif
  846 
  847         /*
  848          * Grab a header mbuf, attaching a copy of data to
  849          * be transmitted, and initialize the header from
  850          * the template for sends on this connection.
  851          */
  852         if (len) {
  853                 if ((tp->t_flags & TF_FORCE) && len == 1)
  854                         tcpstat.tcps_sndprobe++;
  855                 else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
  856                         if (tp->snd_nxt == tp->snd_una)
  857                                 tp->snd_max_rexmt = tp->snd_max;
  858                         if (nsacked) {
  859                                 tcpstat.tcps_sndsackrtopack++;
  860                                 tcpstat.tcps_sndsackrtobyte += len;
  861                         }
  862                         tcpstat.tcps_sndrexmitpack++;
  863                         tcpstat.tcps_sndrexmitbyte += len;
  864                 } else {
  865                         tcpstat.tcps_sndpack++;
  866                         tcpstat.tcps_sndbyte += len;
  867                 }
  868                 if (idle_cwv) {
  869                         idle_cwv = FALSE;
  870                         tcp_idle_cwnd_validate(tp);
  871                 }
  872                 /* Update last send time after CWV */
  873                 tp->snd_last = ticks;
  874 #ifdef notyet
  875                 if ((m = m_copypack(so->so_snd.ssb_mb, off, (int)len,
  876                     max_linkhdr + hdrlen)) == NULL) {
  877                         error = ENOBUFS;
  878                         goto after_th;
  879                 }
  880                 /*
  881                  * m_copypack left space for our hdr; use it.
  882                  */
  883                 m->m_len += hdrlen;
  884                 m->m_data -= hdrlen;
  885 #else
  886 #ifndef INET6
  887                 m = m_gethdr(MB_DONTWAIT, MT_HEADER);
  888 #else
  889                 m = m_getl(hdrlen + max_linkhdr, MB_DONTWAIT, MT_HEADER,
  890                            M_PKTHDR, NULL);
  891 #endif
  892                 if (m == NULL) {
  893                         error = ENOBUFS;
  894                         goto after_th;
  895                 }
  896                 m->m_data += max_linkhdr;
  897                 m->m_len = hdrlen;
  898                 if (len <= MHLEN - hdrlen - max_linkhdr) {
  899                         m_copydata(so->so_snd.ssb_mb, off, (int) len,
  900                             mtod(m, caddr_t) + hdrlen);
  901                         m->m_len += len;
  902                 } else {
  903                         m->m_next = m_copy(so->so_snd.ssb_mb, off, (int) len);
  904                         if (m->m_next == NULL) {
  905                                 m_free(m);
  906                                 m = NULL;
  907                                 error = ENOBUFS;
  908                                 goto after_th;
  909                         }
  910                 }
  911 #endif
  912                 /*
  913                  * If we're sending everything we've got, set PUSH.
  914                  * (This will keep happy those implementations which only
  915                  * give data to the user when a buffer fills or
  916                  * a PUSH comes in.)
  917                  */
  918                 if (off + len == so->so_snd.ssb_cc)
  919                         flags |= TH_PUSH;
  920         } else {
  921                 if (tp->t_flags & TF_ACKNOW)
  922                         tcpstat.tcps_sndacks++;
  923                 else if (flags & (TH_SYN | TH_FIN | TH_RST))
  924                         tcpstat.tcps_sndctrl++;
  925                 else if (SEQ_GT(tp->snd_up, tp->snd_una))
  926                         tcpstat.tcps_sndurg++;
  927                 else
  928                         tcpstat.tcps_sndwinup++;
  929 
  930                 MGETHDR(m, MB_DONTWAIT, MT_HEADER);
  931                 if (m == NULL) {
  932                         error = ENOBUFS;
  933                         goto after_th;
  934                 }
  935                 if (isipv6 &&
  936                     (hdrlen + max_linkhdr > MHLEN) && hdrlen <= MHLEN)
  937                         MH_ALIGN(m, hdrlen);
  938                 else
  939                         m->m_data += max_linkhdr;
  940                 m->m_len = hdrlen;
  941 
  942                 /*
  943                  * Prioritize SYN, SYN|ACK and pure ACK.
  944                  * Leave FIN and RST as they are.
  945                  */
  946                 if (tcp_prio_synack && (flags & (TH_FIN | TH_RST)) == 0)
  947                         m->m_flags |= M_PRIO;
  948         }
  949         m->m_pkthdr.rcvif = NULL;
  950         if (isipv6) {
  951                 ip6 = mtod(m, struct ip6_hdr *);
  952                 th = (struct tcphdr *)(ip6 + 1);
  953                 tcp_fillheaders(tp, ip6, th, use_tso);
  954         } else {
  955                 ip = mtod(m, struct ip *);
  956                 th = (struct tcphdr *)(ip + 1);
  957                 /* this picks up the pseudo header (w/o the length) */
  958                 tcp_fillheaders(tp, ip, th, use_tso);
  959         }
  960 after_th:
  961         /*
  962          * Fill in fields, remembering maximum advertised
  963          * window for use in delaying messages about window sizes.
  964          * If resending a FIN, be sure not to use a new sequence number.
  965          */
  966         if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
  967             tp->snd_nxt == tp->snd_max)
  968                 tp->snd_nxt--;
  969 
  970         if (th != NULL) {
  971                 /*
  972                  * If we are doing retransmissions, then snd_nxt will
  973                  * not reflect the first unsent octet.  For ACK only
  974                  * packets, we do not want the sequence number of the
  975                  * retransmitted packet, we want the sequence number
  976                  * of the next unsent octet.  So, if there is no data
  977                  * (and no SYN or FIN), use snd_max instead of snd_nxt
  978                  * when filling in ti_seq.  But if we are in persist
  979                  * state, snd_max might reflect one byte beyond the
  980                  * right edge of the window, so use snd_nxt in that
  981                  * case, since we know we aren't doing a retransmission.
  982                  * (retransmit and persist are mutually exclusive...)
  983                  */
  984                 if (len || (flags & (TH_SYN|TH_FIN)) ||
  985                     tcp_callout_active(tp, tp->tt_persist))
  986                         th->th_seq = htonl(tp->snd_nxt);
  987                 else
  988                         th->th_seq = htonl(tp->snd_max);
  989                 th->th_ack = htonl(tp->rcv_nxt);
  990                 if (optlen) {
  991                         bcopy(opt, th + 1, optlen);
  992                         th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
  993                 }
  994                 th->th_flags = flags;
  995         }
  996 
  997         /*
  998          * Calculate receive window.  Don't shrink window, but avoid
  999          * silly window syndrome by sending a 0 window if the actual
 1000          * window is less then one segment.
 1001          */
 1002         if (recvwin < (long)(so->so_rcv.ssb_hiwat / 4) &&
 1003             recvwin < (long)segsz)
 1004                 recvwin = 0;
 1005         if (recvwin < (tcp_seq_diff_t)(tp->rcv_adv - tp->rcv_nxt))
 1006                 recvwin = (tcp_seq_diff_t)(tp->rcv_adv - tp->rcv_nxt);
 1007         if (recvwin > (long)TCP_MAXWIN << tp->rcv_scale)
 1008                 recvwin = (long)TCP_MAXWIN << tp->rcv_scale;
 1009 
 1010         /*
 1011          * Adjust the RXWIN0SENT flag - indicate that we have advertised
 1012          * a 0 window.  This may cause the remote transmitter to stall.  This
 1013          * flag tells soreceive() to disable delayed acknowledgements when
 1014          * draining the buffer.  This can occur if the receiver is attempting
 1015          * to read more data then can be buffered prior to transmitting on
 1016          * the connection.
 1017          */
 1018         if (recvwin == 0)
 1019                 tp->t_flags |= TF_RXWIN0SENT;
 1020         else
 1021                 tp->t_flags &= ~TF_RXWIN0SENT;
 1022 
 1023         if (th != NULL)
 1024                 th->th_win = htons((u_short) (recvwin>>tp->rcv_scale));
 1025 
 1026         if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
 1027                 KASSERT(!use_tso, ("URG with TSO"));
 1028                 if (th != NULL) {
 1029                         th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
 1030                         th->th_flags |= TH_URG;
 1031                 }
 1032         } else {
 1033                 /*
 1034                  * If no urgent pointer to send, then we pull
 1035                  * the urgent pointer to the left edge of the send window
 1036                  * so that it doesn't drift into the send window on sequence
 1037                  * number wraparound.
 1038                  */
 1039                 tp->snd_up = tp->snd_una;               /* drag it along */
 1040         }
 1041 
 1042         if (th != NULL) {
 1043 #ifdef TCP_SIGNATURE
 1044                 if (tp->t_flags & TF_SIGNATURE) {
 1045                         tcpsignature_compute(m, len, optlen,
 1046                             (u_char *)(th + 1) + sigoff, IPSEC_DIR_OUTBOUND);
 1047                 }
 1048 #endif /* TCP_SIGNATURE */
 1049 
 1050                 /*
 1051                  * Put TCP length in extended header, and then
 1052                  * checksum extended header and data.
 1053                  */
 1054                 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
 1055                 if (isipv6) {
 1056                         /*
 1057                          * ip6_plen is not need to be filled now, and will be
 1058                          * filled in ip6_output().
 1059                          */
 1060                         th->th_sum = in6_cksum(m, IPPROTO_TCP,
 1061                             sizeof(struct ip6_hdr),
 1062                             sizeof(struct tcphdr) + optlen + len);
 1063                 } else {
 1064                         m->m_pkthdr.csum_thlen = sizeof(struct tcphdr) + optlen;
 1065                         if (use_tso) {
 1066                                 m->m_pkthdr.csum_flags = CSUM_TSO;
 1067                                 m->m_pkthdr.tso_segsz = segsz;
 1068                         } else {
 1069                                 m->m_pkthdr.csum_flags = CSUM_TCP;
 1070                                 m->m_pkthdr.csum_data =
 1071                                     offsetof(struct tcphdr, th_sum);
 1072                                 if (len + optlen) {
 1073                                         th->th_sum = in_addword(th->th_sum,
 1074                                             htons((u_short)(optlen + len)));
 1075                                 }
 1076                         }
 1077 
 1078                         /*
 1079                          * IP version must be set here for ipv4/ipv6 checking
 1080                          * later
 1081                          */
 1082                         KASSERT(ip->ip_v == IPVERSION,
 1083                             ("%s: IP version incorrect: %d",
 1084                              __func__, ip->ip_v));
 1085                 }
 1086         }
 1087 
 1088         /*
 1089          * In transmit state, time the transmission and arrange for
 1090          * the retransmit.  In persist state, just set snd_max.
 1091          */
 1092         if (!(tp->t_flags & TF_FORCE) ||
 1093             !tcp_callout_active(tp, tp->tt_persist)) {
 1094                 tcp_seq startseq = tp->snd_nxt;
 1095 
 1096                 /*
 1097                  * Advance snd_nxt over sequence space of this segment.
 1098                  */
 1099                 if (flags & (TH_SYN | TH_FIN)) {
 1100                         if (flags & TH_SYN)
 1101                                 tp->snd_nxt++;
 1102                         if (flags & TH_FIN) {
 1103                                 tp->snd_nxt++;
 1104                                 tp->t_flags |= TF_SENTFIN;
 1105                         }
 1106                 }
 1107                 tp->snd_nxt += len;
 1108                 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
 1109                         tp->snd_max = tp->snd_nxt;
 1110                         /*
 1111                          * Time this transmission if not a retransmission and
 1112                          * not currently timing anything.
 1113                          */
 1114                         if (tp->t_rtttime == 0) {
 1115                                 tp->t_rtttime = ticks;
 1116                                 tp->t_rtseq = startseq;
 1117                                 tcpstat.tcps_segstimed++;
 1118                         }
 1119                 }
 1120 
 1121                 /*
 1122                  * Set retransmit timer if not currently set,
 1123                  * and not doing a pure ack or a keep-alive probe.
 1124                  * Initial value for retransmit timer is smoothed
 1125                  * round-trip time + 2 * round-trip time variance.
 1126                  * Initialize shift counter which is used for backoff
 1127                  * of retransmit time.
 1128                  */
 1129                 if (!tcp_callout_active(tp, tp->tt_rexmt) &&
 1130                     tp->snd_nxt != tp->snd_una) {
 1131                         if (tcp_callout_active(tp, tp->tt_persist)) {
 1132                                 tcp_callout_stop(tp, tp->tt_persist);
 1133                                 tp->t_rxtshift = 0;
 1134                         }
 1135                         tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur,
 1136                             tcp_timer_rexmt);
 1137                 }
 1138         } else {
 1139                 /*
 1140                  * Persist case, update snd_max but since we are in
 1141                  * persist mode (no window) we do not update snd_nxt.
 1142                  */
 1143                 int xlen = len;
 1144                 if (flags & TH_SYN)
 1145                         panic("tcp_output: persist timer to send SYN");
 1146                 if (flags & TH_FIN) {
 1147                         ++xlen;
 1148                         tp->t_flags |= TF_SENTFIN;
 1149                 }
 1150                 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max))
 1151                         tp->snd_max = tp->snd_nxt + xlen;
 1152         }
 1153 
 1154         if (th != NULL) {
 1155 #ifdef TCPDEBUG
 1156                 /* Trace. */
 1157                 if (so->so_options & SO_DEBUG) {
 1158                         tcp_trace(TA_OUTPUT, tp->t_state, tp,
 1159                             mtod(m, void *), th, 0);
 1160                 }
 1161 #endif
 1162 
 1163                 /*
 1164                  * Fill in IP length and desired time to live and
 1165                  * send to IP level.  There should be a better way
 1166                  * to handle ttl and tos; we could keep them in
 1167                  * the template, but need a way to checksum without them.
 1168                  */
 1169                 /*
 1170                  * m->m_pkthdr.len should have been set before cksum
 1171                  * calcuration, because in6_cksum() need it.
 1172                  */
 1173                 if (isipv6) {
 1174                         /*
 1175                          * we separately set hoplimit for every segment,
 1176                          * since the user might want to change the value
 1177                          * via setsockopt.  Also, desired default hop
 1178                          * limit might be changed via Neighbor Discovery.
 1179                          */
 1180                         ip6->ip6_hlim = in6_selecthlim(inp,
 1181                             (inp->in6p_route.ro_rt ?
 1182                              inp->in6p_route.ro_rt->rt_ifp : NULL));
 1183 
 1184                         /* TODO: IPv6 IP6TOS_ECT bit on */
 1185                         error = ip6_output(m, inp->in6p_outputopts,
 1186                             &inp->in6p_route, (so->so_options & SO_DONTROUTE),
 1187                             NULL, NULL, inp);
 1188                 } else {
 1189                         struct rtentry *rt;
 1190                         ip->ip_len = m->m_pkthdr.len;
 1191 #ifdef INET6
 1192                         if (INP_CHECK_SOCKAF(so, AF_INET6))
 1193                                 ip->ip_ttl = in6_selecthlim(inp,
 1194                                     (inp->in6p_route.ro_rt ?
 1195                                      inp->in6p_route.ro_rt->rt_ifp : NULL));
 1196                         else
 1197 #endif
 1198                                 ip->ip_ttl = inp->inp_ip_ttl;   /* XXX */
 1199 
 1200                         ip->ip_tos = inp->inp_ip_tos;   /* XXX */
 1201                         /*
 1202                          * See if we should do MTU discovery.
 1203                          * We do it only if the following are true:
 1204                          *      1) we have a valid route to the destination
 1205                          *      2) the MTU is not locked (if it is,
 1206                          *         then discovery has been disabled)
 1207                          */
 1208                         if (path_mtu_discovery &&
 1209                             (rt = inp->inp_route.ro_rt) &&
 1210                             (rt->rt_flags & RTF_UP) &&
 1211                             !(rt->rt_rmx.rmx_locks & RTV_MTU))
 1212                                 ip->ip_off |= IP_DF;
 1213 
 1214                         error = ip_output(m, inp->inp_options, &inp->inp_route,
 1215                                           (so->so_options & SO_DONTROUTE) |
 1216                                           IP_DEBUGROUTE, NULL, inp);
 1217                 }
 1218         } else {
 1219                 KASSERT(error != 0, ("no error, but th not set"));
 1220         }
 1221         if (error) {
 1222                 tp->t_flags &= ~(TF_ACKNOW | TF_XMITNOW);
 1223 
 1224                 /*
 1225                  * We know that the packet was lost, so back out the
 1226                  * sequence number advance, if any.
 1227                  */
 1228                 if (!(tp->t_flags & TF_FORCE) ||
 1229                     !tcp_callout_active(tp, tp->tt_persist)) {
 1230                         /*
 1231                          * No need to check for TH_FIN here because
 1232                          * the TF_SENTFIN flag handles that case.
 1233                          */
 1234                         if (!(flags & TH_SYN))
 1235                                 tp->snd_nxt -= len;
 1236                 }
 1237 
 1238 out:
 1239                 if (error == ENOBUFS) {
 1240                         /*
 1241                          * If we can't send, make sure there is something
 1242                          * to get us going again later.
 1243                          *
 1244                          * The persist timer isn't necessarily allowed in all
 1245                          * states, use the rexmt timer.
 1246                          */
 1247                         if (!tcp_callout_active(tp, tp->tt_rexmt) &&
 1248                             !tcp_callout_active(tp, tp->tt_persist)) {
 1249                                 tcp_callout_reset(tp, tp->tt_rexmt,
 1250                                                   tp->t_rxtcur,
 1251                                                   tcp_timer_rexmt);
 1252 #if 0
 1253                                 tp->t_rxtshift = 0;
 1254                                 tcp_setpersist(tp);
 1255 #endif
 1256                         }
 1257                         tcp_quench(inp, 0);
 1258                         return (0);
 1259                 }
 1260                 if (error == EMSGSIZE) {
 1261                         /*
 1262                          * ip_output() will have already fixed the route
 1263                          * for us.  tcp_mtudisc() will, as its last action,
 1264                          * initiate retransmission, so it is important to
 1265                          * not do so here.
 1266                          */
 1267                         tcp_mtudisc(inp, 0);
 1268                         return 0;
 1269                 }
 1270                 if ((error == EHOSTUNREACH || error == ENETDOWN) &&
 1271                     TCPS_HAVERCVDSYN(tp->t_state)) {
 1272                         tp->t_softerror = error;
 1273                         return (0);
 1274                 }
 1275                 return (error);
 1276         }
 1277         tcpstat.tcps_sndtotal++;
 1278 
 1279         /*
 1280          * Data sent (as far as we can tell).
 1281          *
 1282          * If this advertises a larger window than any other segment,
 1283          * then remember the size of the advertised window.
 1284          *
 1285          * Any pending ACK has now been sent.
 1286          */
 1287         if (recvwin > 0 && SEQ_GT(tp->rcv_nxt + recvwin, tp->rcv_adv)) {
 1288                 tp->rcv_adv = tp->rcv_nxt + recvwin;
 1289                 tp->t_flags &= ~TF_RXRESIZED;
 1290         }
 1291         tp->last_ack_sent = tp->rcv_nxt;
 1292         tp->t_flags &= ~(TF_ACKNOW | TF_XMITNOW);
 1293         if (tcp_delack_enabled)
 1294                 tcp_callout_stop(tp, tp->tt_delack);
 1295         if (sendalot) {
 1296                 if (tcp_fairsend > 0 && (tp->t_flags & TF_FAIRSEND) &&
 1297                     segcnt >= tcp_fairsend)
 1298                         need_sched = TRUE;
 1299                 goto again;
 1300         }
 1301         return (0);
 1302 }
 1303 
 1304 void
 1305 tcp_setpersist(struct tcpcb *tp)
 1306 {
 1307         int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
 1308         int tt;
 1309 
 1310         if (tp->t_state == TCPS_SYN_SENT ||
 1311             tp->t_state == TCPS_SYN_RECEIVED) {
 1312                 panic("tcp_setpersist: not established yet, current %s",
 1313                       tp->t_state == TCPS_SYN_SENT ?
 1314                       "SYN_SENT" : "SYN_RECEIVED");
 1315         }
 1316 
 1317         if (tcp_callout_active(tp, tp->tt_rexmt))
 1318                 panic("tcp_setpersist: retransmit pending");
 1319         /*
 1320          * Start/restart persistance timer.
 1321          */
 1322         TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], TCPTV_PERSMIN,
 1323                       TCPTV_PERSMAX);
 1324         tcp_callout_reset(tp, tp->tt_persist, tt, tcp_timer_persist);
 1325         if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
 1326                 tp->t_rxtshift++;
 1327 }
 1328 
 1329 static void
 1330 tcp_idle_cwnd_validate(struct tcpcb *tp)
 1331 {
 1332         u_long initial_cwnd = tcp_initial_window(tp);
 1333         u_long min_cwnd;
 1334 
 1335         tcpstat.tcps_sndidle++;
 1336 
 1337         /* According to RFC5681: RW=min(IW,cwnd) */
 1338         min_cwnd = min(tp->snd_cwnd, initial_cwnd);
 1339 
 1340         if (tcp_idle_cwv) {
 1341                 u_long idle_time, decay_cwnd;
 1342 
 1343                 /*
 1344                  * RFC2861, but only after idle period.
 1345                  */
 1346 
 1347                 /*
 1348                  * Before the congestion window is reduced, ssthresh
 1349                  * is set to the maximum of its current value and 3/4
 1350                  * cwnd.  If the sender then has more data to send
 1351                  * than the decayed cwnd allows, the TCP will slow-
 1352                  * start (perform exponential increase) at least
 1353                  * half-way back up to the old value of cwnd.
 1354                  */
 1355                 tp->snd_ssthresh = max(tp->snd_ssthresh,
 1356                     (3 * tp->snd_cwnd) / 4);
 1357 
 1358                 /*
 1359                  * Decay the congestion window by half for every RTT
 1360                  * that the flow remains inactive.
 1361                  *
 1362                  * The difference between our implementation and
 1363                  * RFC2861 is that we don't allow cwnd to go below
 1364                  * the value allowed by RFC5681 (min_cwnd).
 1365                  */
 1366                 idle_time = ticks - tp->snd_last;
 1367                 decay_cwnd = tp->snd_cwnd;
 1368                 while (idle_time >= tp->t_rxtcur &&
 1369                     decay_cwnd > min_cwnd) {
 1370                         decay_cwnd >>= 1;
 1371                         idle_time -= tp->t_rxtcur;
 1372                 }
 1373                 tp->snd_cwnd = max(decay_cwnd, min_cwnd);
 1374         } else {
 1375                 /*
 1376                  * Slow-start from scratch to re-determine the send
 1377                  * congestion window.
 1378                  */
 1379                 tp->snd_cwnd = min_cwnd;
 1380         }
 1381 
 1382         /* Restart ABC counting during congestion avoidance */
 1383         tp->snd_wacked = 0;
 1384 }
 1385 
 1386 static int
 1387 tcp_tso_getsize(struct tcpcb *tp, u_int *segsz, u_int *hlen0)
 1388 {
 1389         struct inpcb * const inp = tp->t_inpcb;
 1390 #ifdef INET6
 1391         const boolean_t isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
 1392 #else
 1393         const boolean_t isipv6 = FALSE;
 1394 #endif
 1395         unsigned int ipoptlen, optlen;
 1396         u_int hlen;
 1397 
 1398         hlen = sizeof(struct ip) + sizeof(struct tcphdr);
 1399 
 1400         if (isipv6) {
 1401                 ipoptlen = ip6_optlen(inp);
 1402         } else {
 1403                 if (inp->inp_options) {
 1404                         ipoptlen = inp->inp_options->m_len -
 1405                             offsetof(struct ipoption, ipopt_list);
 1406                 } else {
 1407                         ipoptlen = 0;
 1408                 }
 1409         }
 1410 #ifdef IPSEC
 1411         ipoptlen += ipsec_hdrsiz_tcp(tp);
 1412 #endif
 1413         hlen += ipoptlen;
 1414 
 1415         optlen = 0;
 1416         if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
 1417             (tp->t_flags & TF_RCVD_TSTMP))
 1418                 optlen += TCPOLEN_TSTAMP_APPA;
 1419         hlen += optlen;
 1420 
 1421         if (tp->t_maxopd <= optlen + ipoptlen)
 1422                 return EHOSTUNREACH;
 1423 
 1424         *segsz = tp->t_maxopd - optlen - ipoptlen;
 1425         *hlen0 = hlen;
 1426         return 0;
 1427 }
 1428 
 1429 static void
 1430 tcp_output_sched_handler(netmsg_t nmsg)
 1431 {
 1432         struct tcpcb *tp = nmsg->lmsg.u.ms_resultp;
 1433 
 1434         /* Reply ASAP */
 1435         crit_enter();
 1436         lwkt_replymsg(&nmsg->lmsg, 0);
 1437         crit_exit();
 1438 
 1439         tcp_output_fair(tp);
 1440 }
 1441 
 1442 void
 1443 tcp_output_init(struct tcpcb *tp)
 1444 {
 1445         netmsg_init(tp->tt_sndmore, NULL, &netisr_adone_rport, MSGF_DROPABLE,
 1446             tcp_output_sched_handler);
 1447         tp->tt_sndmore->lmsg.u.ms_resultp = tp;
 1448 }
 1449 
 1450 void
 1451 tcp_output_cancel(struct tcpcb *tp)
 1452 {
 1453         /*
 1454          * This message is still pending to be processed;
 1455          * drop it.  Optimized.
 1456          */
 1457         crit_enter();
 1458         if ((tp->tt_sndmore->lmsg.ms_flags & MSGF_DONE) == 0) {
 1459                 lwkt_dropmsg(&tp->tt_sndmore->lmsg);
 1460         }
 1461         crit_exit();
 1462 }
 1463 
 1464 boolean_t
 1465 tcp_output_pending(struct tcpcb *tp)
 1466 {
 1467         if ((tp->tt_sndmore->lmsg.ms_flags & MSGF_DONE) == 0)
 1468                 return TRUE;
 1469         else
 1470                 return FALSE;
 1471 }
 1472 
 1473 static void
 1474 tcp_output_sched(struct tcpcb *tp)
 1475 {
 1476         crit_enter();
 1477         if (tp->tt_sndmore->lmsg.ms_flags & MSGF_DONE)
 1478                 lwkt_sendmsg(netisr_cpuport(mycpuid), &tp->tt_sndmore->lmsg);
 1479         crit_exit();
 1480 }
 1481 
 1482 /*
 1483  * Fairsend
 1484  *
 1485  * Yield to other senders or receivers on the same netisr if the current
 1486  * TCP stream has sent tcp_fairsend segments and is going to burst more
 1487  * segments.  Bursting large amount of segements in a single TCP stream
 1488  * could delay other senders' segments and receivers' ACKs quite a lot,
 1489  * if others segments and ACKs are queued on to the same hardware transmit
 1490  * queue; thus cause unfairness between senders and suppress receiving
 1491  * performance.
 1492  * 
 1493  * Fairsend should be performed at the places that do not affect segment
 1494  * sending during congestion control, e.g.
 1495  * - User requested output
 1496  * - ACK input triggered output
 1497  *
 1498  * NOTE:
 1499  * For devices that are TSO capable, their TSO aggregation size limit could
 1500  * affect fairsend.
 1501  */
 1502 int
 1503 tcp_output_fair(struct tcpcb *tp)
 1504 {
 1505         int ret;
 1506 
 1507         tp->t_flags |= TF_FAIRSEND;
 1508         ret = tcp_output(tp);
 1509         tp->t_flags &= ~TF_FAIRSEND;
 1510 
 1511         return ret;
 1512 }

Cache object: 1de7003eec5dc17c66ae628b83e72f2f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.