1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_ipsec.h"
40 #include "opt_tcpdebug.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/domain.h>
45 #ifdef TCP_HHOOK
46 #include <sys/hhook.h>
47 #endif
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/mbuf.h>
51 #include <sys/mutex.h>
52 #include <sys/protosw.h>
53 #include <sys/sdt.h>
54 #include <sys/socket.h>
55 #include <sys/socketvar.h>
56 #include <sys/sysctl.h>
57
58 #include <net/if.h>
59 #include <net/route.h>
60 #include <net/vnet.h>
61
62 #include <netinet/in.h>
63 #include <netinet/in_kdtrace.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/ip.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/ip_var.h>
68 #include <netinet/ip_options.h>
69 #ifdef INET6
70 #include <netinet6/in6_pcb.h>
71 #include <netinet/ip6.h>
72 #include <netinet6/ip6_var.h>
73 #endif
74 #include <netinet/tcp.h>
75 #define TCPOUTFLAGS
76 #include <netinet/tcp_fsm.h>
77 #include <netinet/tcp_log_buf.h>
78 #include <netinet/tcp_seq.h>
79 #include <netinet/tcp_timer.h>
80 #include <netinet/tcp_var.h>
81 #include <netinet/tcpip.h>
82 #include <netinet/cc/cc.h>
83 #include <netinet/tcp_fastopen.h>
84 #ifdef TCPPCAP
85 #include <netinet/tcp_pcap.h>
86 #endif
87 #ifdef TCPDEBUG
88 #include <netinet/tcp_debug.h>
89 #endif
90 #ifdef TCP_OFFLOAD
91 #include <netinet/tcp_offload.h>
92 #endif
93
94 #include <netipsec/ipsec_support.h>
95
96 #include <machine/in_cksum.h>
97
98 #include <security/mac/mac_framework.h>
99
100 VNET_DEFINE(int, path_mtu_discovery) = 1;
101 SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_VNET | CTLFLAG_RW,
102 &VNET_NAME(path_mtu_discovery), 1,
103 "Enable Path MTU Discovery");
104
105 VNET_DEFINE(int, tcp_do_tso) = 1;
106 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_VNET | CTLFLAG_RW,
107 &VNET_NAME(tcp_do_tso), 0,
108 "Enable TCP Segmentation Offload");
109
110 VNET_DEFINE(int, tcp_sendspace) = 1024*32;
111 #define V_tcp_sendspace VNET(tcp_sendspace)
112 SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_VNET | CTLFLAG_RW,
113 &VNET_NAME(tcp_sendspace), 0, "Initial send socket buffer size");
114
115 VNET_DEFINE(int, tcp_do_autosndbuf) = 1;
116 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
117 &VNET_NAME(tcp_do_autosndbuf), 0,
118 "Enable automatic send buffer sizing");
119
120 VNET_DEFINE(int, tcp_autosndbuf_inc) = 8*1024;
121 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
122 &VNET_NAME(tcp_autosndbuf_inc), 0,
123 "Incrementor step size of automatic send buffer");
124
125 VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024;
126 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
127 &VNET_NAME(tcp_autosndbuf_max), 0,
128 "Max size of automatic send buffer");
129
130 VNET_DEFINE(int, tcp_sendbuf_auto_lowat) = 0;
131 #define V_tcp_sendbuf_auto_lowat VNET(tcp_sendbuf_auto_lowat)
132 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto_lowat, CTLFLAG_VNET | CTLFLAG_RW,
133 &VNET_NAME(tcp_sendbuf_auto_lowat), 0,
134 "Modify threshold for auto send buffer growth to account for SO_SNDLOWAT");
135
136 /*
137 * Make sure that either retransmit or persist timer is set for SYN, FIN and
138 * non-ACK.
139 */
140 #define TCP_XMIT_TIMER_ASSERT(tp, len, th_flags) \
141 KASSERT(((len) == 0 && ((th_flags) & (TH_SYN | TH_FIN)) == 0) ||\
142 tcp_timer_active((tp), TT_REXMT) || \
143 tcp_timer_active((tp), TT_PERSIST), \
144 ("neither rexmt nor persist timer is set"))
145
146 static void inline cc_after_idle(struct tcpcb *tp);
147
148 #ifdef TCP_HHOOK
149 /*
150 * Wrapper for the TCP established output helper hook.
151 */
152 void
153 hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th,
154 struct tcpopt *to, uint32_t len, int tso)
155 {
156 struct tcp_hhook_data hhook_data;
157
158 if (V_tcp_hhh[HHOOK_TCP_EST_OUT]->hhh_nhooks > 0) {
159 hhook_data.tp = tp;
160 hhook_data.th = th;
161 hhook_data.to = to;
162 hhook_data.len = len;
163 hhook_data.tso = tso;
164
165 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_OUT], &hhook_data,
166 tp->osd);
167 }
168 }
169 #endif
170
171 /*
172 * CC wrapper hook functions
173 */
174 static void inline
175 cc_after_idle(struct tcpcb *tp)
176 {
177 INP_WLOCK_ASSERT(tp->t_inpcb);
178
179 if (CC_ALGO(tp)->after_idle != NULL)
180 CC_ALGO(tp)->after_idle(tp->ccv);
181 }
182
183 /*
184 * Tcp output routine: figure out what should be sent and send it.
185 */
186 int
187 tcp_output(struct tcpcb *tp)
188 {
189 struct socket *so = tp->t_inpcb->inp_socket;
190 int32_t len;
191 uint32_t recwin, sendwin;
192 int off, flags, error = 0; /* Keep compiler happy */
193 u_int if_hw_tsomaxsegcount = 0;
194 u_int if_hw_tsomaxsegsize;
195 struct mbuf *m;
196 struct ip *ip = NULL;
197 #ifdef TCPDEBUG
198 struct ipovly *ipov = NULL;
199 #endif
200 struct tcphdr *th;
201 u_char opt[TCP_MAXOLEN];
202 unsigned ipoptlen, optlen, hdrlen;
203 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
204 unsigned ipsec_optlen = 0;
205 #endif
206 int idle, sendalot, curticks;
207 int sack_rxmit, sack_bytes_rxmt;
208 struct sackhole *p;
209 int tso, mtu;
210 struct tcpopt to;
211 unsigned int wanted_cookie = 0;
212 unsigned int dont_sendalot = 0;
213 #if 0
214 int maxburst = TCP_MAXBURST;
215 #endif
216 #ifdef INET6
217 struct ip6_hdr *ip6 = NULL;
218 int isipv6;
219
220 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
221 #endif
222
223 INP_WLOCK_ASSERT(tp->t_inpcb);
224
225 #ifdef TCP_OFFLOAD
226 if (tp->t_flags & TF_TOE)
227 return (tcp_offload_output(tp));
228 #endif
229
230 /*
231 * For TFO connections in SYN_SENT or SYN_RECEIVED,
232 * only allow the initial SYN or SYN|ACK and those sent
233 * by the retransmit timer.
234 */
235 if (IS_FASTOPEN(tp->t_flags) &&
236 ((tp->t_state == TCPS_SYN_SENT) ||
237 (tp->t_state == TCPS_SYN_RECEIVED)) &&
238 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
239 (tp->snd_nxt != tp->snd_una)) /* not a retransmit */
240 return (0);
241
242 /*
243 * Determine length of data that should be transmitted,
244 * and flags that will be used.
245 * If there is some data or critical controls (SYN, RST)
246 * to send, then transmit; otherwise, investigate further.
247 */
248 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
249 if (idle && (((ticks - tp->t_rcvtime) >= tp->t_rxtcur) ||
250 (tp->t_sndtime && ((ticks - tp->t_sndtime) >= tp->t_rxtcur))))
251 cc_after_idle(tp);
252 tp->t_flags &= ~TF_LASTIDLE;
253 if (idle) {
254 if (tp->t_flags & TF_MORETOCOME) {
255 tp->t_flags |= TF_LASTIDLE;
256 idle = 0;
257 }
258 }
259 again:
260 /*
261 * If we've recently taken a timeout, snd_max will be greater than
262 * snd_nxt. There may be SACK information that allows us to avoid
263 * resending already delivered data. Adjust snd_nxt accordingly.
264 */
265 if ((tp->t_flags & TF_SACK_PERMIT) &&
266 SEQ_LT(tp->snd_nxt, tp->snd_max))
267 tcp_sack_adjust(tp);
268 sendalot = 0;
269 tso = 0;
270 mtu = 0;
271 off = tp->snd_nxt - tp->snd_una;
272 sendwin = min(tp->snd_wnd, tp->snd_cwnd);
273
274 flags = tcp_outflags[tp->t_state];
275 /*
276 * Send any SACK-generated retransmissions. If we're explicitly trying
277 * to send out new data (when sendalot is 1), bypass this function.
278 * If we retransmit in fast recovery mode, decrement snd_cwnd, since
279 * we're replacing a (future) new transmission with a retransmission
280 * now, and we previously incremented snd_cwnd in tcp_input().
281 */
282 /*
283 * Still in sack recovery , reset rxmit flag to zero.
284 */
285 sack_rxmit = 0;
286 sack_bytes_rxmt = 0;
287 len = 0;
288 p = NULL;
289 if ((tp->t_flags & TF_SACK_PERMIT) && IN_FASTRECOVERY(tp->t_flags) &&
290 (p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
291 uint32_t cwin;
292
293 cwin =
294 imax(min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt, 0);
295 /* Do not retransmit SACK segments beyond snd_recover */
296 if (SEQ_GT(p->end, tp->snd_recover)) {
297 /*
298 * (At least) part of sack hole extends beyond
299 * snd_recover. Check to see if we can rexmit data
300 * for this hole.
301 */
302 if (SEQ_GEQ(p->rxmit, tp->snd_recover)) {
303 /*
304 * Can't rexmit any more data for this hole.
305 * That data will be rexmitted in the next
306 * sack recovery episode, when snd_recover
307 * moves past p->rxmit.
308 */
309 p = NULL;
310 goto after_sack_rexmit;
311 } else {
312 /* Can rexmit part of the current hole */
313 len = ((int32_t)ulmin(cwin,
314 SEQ_SUB(tp->snd_recover, p->rxmit)));
315 }
316 } else {
317 len = ((int32_t)ulmin(cwin,
318 SEQ_SUB(p->end, p->rxmit)));
319 }
320 if (len > 0) {
321 off = SEQ_SUB(p->rxmit, tp->snd_una);
322 KASSERT(off >= 0,("%s: sack block to the left of una : %d",
323 __func__, off));
324 sack_rxmit = 1;
325 sendalot = 1;
326 TCPSTAT_INC(tcps_sack_rexmits);
327 TCPSTAT_ADD(tcps_sack_rexmit_bytes,
328 min(len, tcp_maxseg(tp)));
329 }
330 }
331 after_sack_rexmit:
332 /*
333 * Get standard flags, and add SYN or FIN if requested by 'hidden'
334 * state flags.
335 */
336 if (tp->t_flags & TF_NEEDFIN)
337 flags |= TH_FIN;
338 if (tp->t_flags & TF_NEEDSYN)
339 flags |= TH_SYN;
340
341 SOCKBUF_LOCK(&so->so_snd);
342 /*
343 * If in persist timeout with window of 0, send 1 byte.
344 * Otherwise, if window is small but nonzero
345 * and timer expired, we will send what we can
346 * and go to transmit state.
347 */
348 if (tp->t_flags & TF_FORCEDATA) {
349 if (sendwin == 0) {
350 /*
351 * If we still have some data to send, then
352 * clear the FIN bit. Usually this would
353 * happen below when it realizes that we
354 * aren't sending all the data. However,
355 * if we have exactly 1 byte of unsent data,
356 * then it won't clear the FIN bit below,
357 * and if we are in persist state, we wind
358 * up sending the packet without recording
359 * that we sent the FIN bit.
360 *
361 * We can't just blindly clear the FIN bit,
362 * because if we don't have any more data
363 * to send then the probe will be the FIN
364 * itself.
365 */
366 if (off < sbused(&so->so_snd))
367 flags &= ~TH_FIN;
368 sendwin = 1;
369 } else {
370 tcp_timer_activate(tp, TT_PERSIST, 0);
371 tp->t_rxtshift = 0;
372 }
373 }
374
375 /*
376 * If snd_nxt == snd_max and we have transmitted a FIN, the
377 * offset will be > 0 even if so_snd.sb_cc is 0, resulting in
378 * a negative length. This can also occur when TCP opens up
379 * its congestion window while receiving additional duplicate
380 * acks after fast-retransmit because TCP will reset snd_nxt
381 * to snd_max after the fast-retransmit.
382 *
383 * In the normal retransmit-FIN-only case, however, snd_nxt will
384 * be set to snd_una, the offset will be 0, and the length may
385 * wind up 0.
386 *
387 * If sack_rxmit is true we are retransmitting from the scoreboard
388 * in which case len is already set.
389 */
390 if (sack_rxmit == 0) {
391 if (sack_bytes_rxmt == 0)
392 len = ((int32_t)min(sbavail(&so->so_snd), sendwin) -
393 off);
394 else {
395 int32_t cwin;
396
397 /*
398 * We are inside of a SACK recovery episode and are
399 * sending new data, having retransmitted all the
400 * data possible in the scoreboard.
401 */
402 len = ((int32_t)min(sbavail(&so->so_snd), tp->snd_wnd) -
403 off);
404 /*
405 * Don't remove this (len > 0) check !
406 * We explicitly check for len > 0 here (although it
407 * isn't really necessary), to work around a gcc
408 * optimization issue - to force gcc to compute
409 * len above. Without this check, the computation
410 * of len is bungled by the optimizer.
411 */
412 if (len > 0) {
413 cwin = tp->snd_cwnd - imax(0, (int32_t)
414 (tp->snd_nxt - tp->sack_newdata)) -
415 sack_bytes_rxmt;
416 if (cwin < 0)
417 cwin = 0;
418 len = imin(len, cwin);
419 }
420 }
421 }
422
423 /*
424 * Lop off SYN bit if it has already been sent. However, if this
425 * is SYN-SENT state and if segment contains data and if we don't
426 * know that foreign host supports TAO, suppress sending segment.
427 */
428 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
429 if (tp->t_state != TCPS_SYN_RECEIVED)
430 flags &= ~TH_SYN;
431 /*
432 * When sending additional segments following a TFO SYN|ACK,
433 * do not include the SYN bit.
434 */
435 if (IS_FASTOPEN(tp->t_flags) &&
436 (tp->t_state == TCPS_SYN_RECEIVED))
437 flags &= ~TH_SYN;
438 off--, len++;
439 }
440
441 /*
442 * Be careful not to send data and/or FIN on SYN segments.
443 * This measure is needed to prevent interoperability problems
444 * with not fully conformant TCP implementations.
445 */
446 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
447 len = 0;
448 flags &= ~TH_FIN;
449 }
450
451 /*
452 * On TFO sockets, ensure no data is sent in the following cases:
453 *
454 * - When retransmitting SYN|ACK on a passively-created socket
455 *
456 * - When retransmitting SYN on an actively created socket
457 *
458 * - When sending a zero-length cookie (cookie request) on an
459 * actively created socket
460 *
461 * - When the socket is in the CLOSED state (RST is being sent)
462 */
463 if (IS_FASTOPEN(tp->t_flags) &&
464 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
465 ((tp->t_state == TCPS_SYN_SENT) &&
466 (tp->t_tfo_client_cookie_len == 0)) ||
467 (flags & TH_RST)))
468 len = 0;
469 if (len <= 0) {
470 /*
471 * If FIN has been sent but not acked,
472 * but we haven't been called to retransmit,
473 * len will be < 0. Otherwise, window shrank
474 * after we sent into it. If window shrank to 0,
475 * cancel pending retransmit, pull snd_nxt back
476 * to (closed) window, and set the persist timer
477 * if it isn't already going. If the window didn't
478 * close completely, just wait for an ACK.
479 *
480 * We also do a general check here to ensure that
481 * we will set the persist timer when we have data
482 * to send, but a 0-byte window. This makes sure
483 * the persist timer is set even if the packet
484 * hits one of the "goto send" lines below.
485 */
486 len = 0;
487 if ((sendwin == 0) && (TCPS_HAVEESTABLISHED(tp->t_state)) &&
488 (off < (int) sbavail(&so->so_snd))) {
489 tcp_timer_activate(tp, TT_REXMT, 0);
490 tp->t_rxtshift = 0;
491 tp->snd_nxt = tp->snd_una;
492 if (!tcp_timer_active(tp, TT_PERSIST))
493 tcp_setpersist(tp);
494 }
495 }
496
497 /* len will be >= 0 after this point. */
498 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
499
500 tcp_sndbuf_autoscale(tp, so, sendwin);
501
502 /*
503 * Decide if we can use TCP Segmentation Offloading (if supported by
504 * hardware).
505 *
506 * TSO may only be used if we are in a pure bulk sending state. The
507 * presence of TCP-MD5, SACK retransmits, SACK advertizements and
508 * IP options prevent using TSO. With TSO the TCP header is the same
509 * (except for the sequence number) for all generated packets. This
510 * makes it impossible to transmit any options which vary per generated
511 * segment or packet.
512 *
513 * IPv4 handling has a clear separation of ip options and ip header
514 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
515 * the right thing below to provide length of just ip options and thus
516 * checking for ipoptlen is enough to decide if ip options are present.
517 */
518 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
519 /*
520 * Pre-calculate here as we save another lookup into the darknesses
521 * of IPsec that way and can actually decide if TSO is ok.
522 */
523 #ifdef INET6
524 if (isipv6 && IPSEC_ENABLED(ipv6))
525 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
526 #ifdef INET
527 else
528 #endif
529 #endif /* INET6 */
530 #ifdef INET
531 if (IPSEC_ENABLED(ipv4))
532 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
533 #endif /* INET */
534 #endif /* IPSEC */
535 #ifdef INET6
536 if (isipv6)
537 ipoptlen = ip6_optlen(tp->t_inpcb);
538 else
539 #endif
540 if (tp->t_inpcb->inp_options)
541 ipoptlen = tp->t_inpcb->inp_options->m_len -
542 offsetof(struct ipoption, ipopt_list);
543 else
544 ipoptlen = 0;
545 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
546 ipoptlen += ipsec_optlen;
547 #endif
548
549 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg &&
550 ((tp->t_flags & TF_SIGNATURE) == 0) &&
551 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
552 ipoptlen == 0 && !(flags & TH_SYN))
553 tso = 1;
554
555 if (sack_rxmit) {
556 if (SEQ_LT(p->rxmit + len, tp->snd_una + sbused(&so->so_snd)))
557 flags &= ~TH_FIN;
558 } else {
559 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
560 sbused(&so->so_snd)))
561 flags &= ~TH_FIN;
562 }
563
564 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
565 (long)TCP_MAXWIN << tp->rcv_scale);
566
567 /*
568 * Sender silly window avoidance. We transmit under the following
569 * conditions when len is non-zero:
570 *
571 * - We have a full segment (or more with TSO)
572 * - This is the last buffer in a write()/send() and we are
573 * either idle or running NODELAY
574 * - we've timed out (e.g. persist timer)
575 * - we have more then 1/2 the maximum send window's worth of
576 * data (receiver may be limited the window size)
577 * - we need to retransmit
578 */
579 if (len) {
580 if (len >= tp->t_maxseg)
581 goto send;
582 /*
583 * As the TCP header options are now
584 * considered when setting up the initial
585 * window, we would not send the last segment
586 * if we skip considering the option length here.
587 * Note: this may not work when tcp headers change
588 * very dynamically in the future.
589 */
590 if ((((tp->t_flags & TF_SIGNATURE) ?
591 PADTCPOLEN(TCPOLEN_SIGNATURE) : 0) +
592 ((tp->t_flags & TF_RCVD_TSTMP) ?
593 PADTCPOLEN(TCPOLEN_TIMESTAMP) : 0) +
594 len) >= tp->t_maxseg)
595 goto send;
596 /*
597 * NOTE! on localhost connections an 'ack' from the remote
598 * end may occur synchronously with the output and cause
599 * us to flush a buffer queued with moretocome. XXX
600 *
601 * note: the len + off check is almost certainly unnecessary.
602 */
603 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
604 (idle || (tp->t_flags & TF_NODELAY)) &&
605 (uint32_t)len + (uint32_t)off >= sbavail(&so->so_snd) &&
606 (tp->t_flags & TF_NOPUSH) == 0) {
607 goto send;
608 }
609 if (tp->t_flags & TF_FORCEDATA) /* typ. timeout case */
610 goto send;
611 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
612 goto send;
613 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */
614 goto send;
615 if (sack_rxmit)
616 goto send;
617 }
618
619 /*
620 * Sending of standalone window updates.
621 *
622 * Window updates are important when we close our window due to a
623 * full socket buffer and are opening it again after the application
624 * reads data from it. Once the window has opened again and the
625 * remote end starts to send again the ACK clock takes over and
626 * provides the most current window information.
627 *
628 * We must avoid the silly window syndrome whereas every read
629 * from the receive buffer, no matter how small, causes a window
630 * update to be sent. We also should avoid sending a flurry of
631 * window updates when the socket buffer had queued a lot of data
632 * and the application is doing small reads.
633 *
634 * Prevent a flurry of pointless window updates by only sending
635 * an update when we can increase the advertized window by more
636 * than 1/4th of the socket buffer capacity. When the buffer is
637 * getting full or is very small be more aggressive and send an
638 * update whenever we can increase by two mss sized segments.
639 * In all other situations the ACK's to new incoming data will
640 * carry further window increases.
641 *
642 * Don't send an independent window update if a delayed
643 * ACK is pending (it will get piggy-backed on it) or the
644 * remote side already has done a half-close and won't send
645 * more data. Skip this if the connection is in T/TCP
646 * half-open state.
647 */
648 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
649 !(tp->t_flags & TF_DELACK) &&
650 !TCPS_HAVERCVDFIN(tp->t_state)) {
651 /*
652 * "adv" is the amount we could increase the window,
653 * taking into account that we are limited by
654 * TCP_MAXWIN << tp->rcv_scale.
655 */
656 int32_t adv;
657 int oldwin;
658
659 adv = recwin;
660 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
661 oldwin = (tp->rcv_adv - tp->rcv_nxt);
662 adv -= oldwin;
663 } else
664 oldwin = 0;
665
666 /*
667 * If the new window size ends up being the same as or less
668 * than the old size when it is scaled, then don't force
669 * a window update.
670 */
671 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
672 goto dontupdate;
673
674 if (adv >= (int32_t)(2 * tp->t_maxseg) &&
675 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
676 recwin <= (so->so_rcv.sb_hiwat / 8) ||
677 so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg ||
678 adv >= TCP_MAXWIN << tp->rcv_scale))
679 goto send;
680 if (2 * adv >= (int32_t)so->so_rcv.sb_hiwat)
681 goto send;
682 }
683 dontupdate:
684
685 /*
686 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
687 * is also a catch-all for the retransmit timer timeout case.
688 */
689 if (tp->t_flags & TF_ACKNOW)
690 goto send;
691 if ((flags & TH_RST) ||
692 ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0))
693 goto send;
694 if (SEQ_GT(tp->snd_up, tp->snd_una))
695 goto send;
696 /*
697 * If our state indicates that FIN should be sent
698 * and we have not yet done so, then we need to send.
699 */
700 if (flags & TH_FIN &&
701 ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
702 goto send;
703 /*
704 * In SACK, it is possible for tcp_output to fail to send a segment
705 * after the retransmission timer has been turned off. Make sure
706 * that the retransmission timer is set.
707 */
708 if ((tp->t_flags & TF_SACK_PERMIT) &&
709 SEQ_GT(tp->snd_max, tp->snd_una) &&
710 !tcp_timer_active(tp, TT_REXMT) &&
711 !tcp_timer_active(tp, TT_PERSIST)) {
712 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
713 goto just_return;
714 }
715 /*
716 * TCP window updates are not reliable, rather a polling protocol
717 * using ``persist'' packets is used to insure receipt of window
718 * updates. The three ``states'' for the output side are:
719 * idle not doing retransmits or persists
720 * persisting to move a small or zero window
721 * (re)transmitting and thereby not persisting
722 *
723 * tcp_timer_active(tp, TT_PERSIST)
724 * is true when we are in persist state.
725 * (tp->t_flags & TF_FORCEDATA)
726 * is set when we are called to send a persist packet.
727 * tcp_timer_active(tp, TT_REXMT)
728 * is set when we are retransmitting
729 * The output side is idle when both timers are zero.
730 *
731 * If send window is too small, there is data to transmit, and no
732 * retransmit or persist is pending, then go to persist state.
733 * If nothing happens soon, send when timer expires:
734 * if window is nonzero, transmit what we can,
735 * otherwise force out a byte.
736 */
737 if (sbavail(&so->so_snd) && !tcp_timer_active(tp, TT_REXMT) &&
738 !tcp_timer_active(tp, TT_PERSIST)) {
739 tp->t_rxtshift = 0;
740 tcp_setpersist(tp);
741 }
742
743 /*
744 * No reason to send a segment, just return.
745 */
746 just_return:
747 SOCKBUF_UNLOCK(&so->so_snd);
748 return (0);
749
750 send:
751 SOCKBUF_LOCK_ASSERT(&so->so_snd);
752 if (len > 0) {
753 if (len >= tp->t_maxseg)
754 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
755 else
756 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
757 }
758 /*
759 * Before ESTABLISHED, force sending of initial options
760 * unless TCP set not to do any options.
761 * NOTE: we assume that the IP/TCP header plus TCP options
762 * always fit in a single mbuf, leaving room for a maximum
763 * link header, i.e.
764 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
765 */
766 optlen = 0;
767 #ifdef INET6
768 if (isipv6)
769 hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
770 else
771 #endif
772 hdrlen = sizeof (struct tcpiphdr);
773
774 /*
775 * Compute options for segment.
776 * We only have to care about SYN and established connection
777 * segments. Options for SYN-ACK segments are handled in TCP
778 * syncache.
779 */
780 to.to_flags = 0;
781 if ((tp->t_flags & TF_NOOPT) == 0) {
782 /* Maximum segment size. */
783 if (flags & TH_SYN) {
784 tp->snd_nxt = tp->iss;
785 to.to_mss = tcp_mssopt(&tp->t_inpcb->inp_inc);
786 to.to_flags |= TOF_MSS;
787
788 /*
789 * On SYN or SYN|ACK transmits on TFO connections,
790 * only include the TFO option if it is not a
791 * retransmit, as the presence of the TFO option may
792 * have caused the original SYN or SYN|ACK to have
793 * been dropped by a middlebox.
794 */
795 if (IS_FASTOPEN(tp->t_flags) &&
796 (tp->t_rxtshift == 0)) {
797 if (tp->t_state == TCPS_SYN_RECEIVED) {
798 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
799 to.to_tfo_cookie =
800 (u_int8_t *)&tp->t_tfo_cookie.server;
801 to.to_flags |= TOF_FASTOPEN;
802 wanted_cookie = 1;
803 } else if (tp->t_state == TCPS_SYN_SENT) {
804 to.to_tfo_len =
805 tp->t_tfo_client_cookie_len;
806 to.to_tfo_cookie =
807 tp->t_tfo_cookie.client;
808 to.to_flags |= TOF_FASTOPEN;
809 wanted_cookie = 1;
810 /*
811 * If we wind up having more data to
812 * send with the SYN than can fit in
813 * one segment, don't send any more
814 * until the SYN|ACK comes back from
815 * the other end.
816 */
817 dont_sendalot = 1;
818 }
819 }
820 }
821 /* Window scaling. */
822 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
823 to.to_wscale = tp->request_r_scale;
824 to.to_flags |= TOF_SCALE;
825 }
826 /* Timestamps. */
827 if ((tp->t_flags & TF_RCVD_TSTMP) ||
828 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
829 curticks = tcp_ts_getticks();
830 to.to_tsval = curticks + tp->ts_offset;
831 to.to_tsecr = tp->ts_recent;
832 to.to_flags |= TOF_TS;
833 if (tp->t_rxtshift == 1)
834 tp->t_badrxtwin = curticks;
835 }
836
837 /* Set receive buffer autosizing timestamp. */
838 if (tp->rfbuf_ts == 0 &&
839 (so->so_rcv.sb_flags & SB_AUTOSIZE))
840 tp->rfbuf_ts = tcp_ts_getticks();
841
842 /* Selective ACK's. */
843 if (tp->t_flags & TF_SACK_PERMIT) {
844 if (flags & TH_SYN)
845 to.to_flags |= TOF_SACKPERM;
846 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
847 tp->rcv_numsacks > 0) {
848 to.to_flags |= TOF_SACK;
849 to.to_nsacks = tp->rcv_numsacks;
850 to.to_sacks = (u_char *)tp->sackblks;
851 }
852 }
853 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
854 /* TCP-MD5 (RFC2385). */
855 /*
856 * Check that TCP_MD5SIG is enabled in tcpcb to
857 * account the size needed to set this TCP option.
858 */
859 if (tp->t_flags & TF_SIGNATURE)
860 to.to_flags |= TOF_SIGNATURE;
861 #endif /* TCP_SIGNATURE */
862
863 /* Processing the options. */
864 hdrlen += optlen = tcp_addoptions(&to, opt);
865 /*
866 * If we wanted a TFO option to be added, but it was unable
867 * to fit, ensure no data is sent.
868 */
869 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
870 !(to.to_flags & TOF_FASTOPEN))
871 len = 0;
872 }
873
874 /*
875 * Adjust data length if insertion of options will
876 * bump the packet length beyond the t_maxseg length.
877 * Clear the FIN bit because we cut off the tail of
878 * the segment.
879 */
880 if (len + optlen + ipoptlen > tp->t_maxseg) {
881 flags &= ~TH_FIN;
882
883 if (tso) {
884 u_int if_hw_tsomax;
885 u_int moff;
886 int max_len;
887
888 /* extract TSO information */
889 if_hw_tsomax = tp->t_tsomax;
890 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
891 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
892
893 /*
894 * Limit a TSO burst to prevent it from
895 * overflowing or exceeding the maximum length
896 * allowed by the network interface:
897 */
898 KASSERT(ipoptlen == 0,
899 ("%s: TSO can't do IP options", __func__));
900
901 /*
902 * Check if we should limit by maximum payload
903 * length:
904 */
905 if (if_hw_tsomax != 0) {
906 /* compute maximum TSO length */
907 max_len = (if_hw_tsomax - hdrlen -
908 max_linkhdr);
909 if (max_len <= 0) {
910 len = 0;
911 } else if (len > max_len) {
912 sendalot = 1;
913 len = max_len;
914 }
915 }
916 /*
917 * Prevent the last segment from being
918 * fractional unless the send sockbuf can be
919 * emptied:
920 */
921 max_len = (tp->t_maxseg - optlen);
922 if (((uint32_t)off + (uint32_t)len) <
923 sbavail(&so->so_snd)) {
924 moff = len % max_len;
925 if (moff != 0) {
926 len -= moff;
927 sendalot = 1;
928 }
929 }
930
931 /*
932 * In case there are too many small fragments
933 * don't use TSO:
934 */
935 if (len <= max_len) {
936 len = max_len;
937 sendalot = 1;
938 tso = 0;
939 }
940
941 /*
942 * Send the FIN in a separate segment
943 * after the bulk sending is done.
944 * We don't trust the TSO implementations
945 * to clear the FIN flag on all but the
946 * last segment.
947 */
948 if (tp->t_flags & TF_NEEDFIN)
949 sendalot = 1;
950 } else {
951 if (optlen + ipoptlen >= tp->t_maxseg) {
952 /*
953 * Since we don't have enough space to put
954 * the IP header chain and the TCP header in
955 * one packet as required by RFC 7112, don't
956 * send it. Also ensure that at least one
957 * byte of the payload can be put into the
958 * TCP segment.
959 */
960 SOCKBUF_UNLOCK(&so->so_snd);
961 error = EMSGSIZE;
962 sack_rxmit = 0;
963 goto out;
964 }
965 len = tp->t_maxseg - optlen - ipoptlen;
966 sendalot = 1;
967 if (dont_sendalot)
968 sendalot = 0;
969 }
970 } else
971 tso = 0;
972
973 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
974 ("%s: len > IP_MAXPACKET", __func__));
975
976 /*#ifdef DIAGNOSTIC*/
977 #ifdef INET6
978 if (max_linkhdr + hdrlen > MCLBYTES)
979 #else
980 if (max_linkhdr + hdrlen > MHLEN)
981 #endif
982 panic("tcphdr too big");
983 /*#endif*/
984
985 /*
986 * This KASSERT is here to catch edge cases at a well defined place.
987 * Before, those had triggered (random) panic conditions further down.
988 */
989 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
990
991 /*
992 * Grab a header mbuf, attaching a copy of data to
993 * be transmitted, and initialize the header from
994 * the template for sends on this connection.
995 */
996 if (len) {
997 struct mbuf *mb;
998 struct sockbuf *msb;
999 u_int moff;
1000
1001 if ((tp->t_flags & TF_FORCEDATA) && len == 1)
1002 TCPSTAT_INC(tcps_sndprobe);
1003 else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
1004 tp->t_sndrexmitpack++;
1005 TCPSTAT_INC(tcps_sndrexmitpack);
1006 TCPSTAT_ADD(tcps_sndrexmitbyte, len);
1007 } else {
1008 TCPSTAT_INC(tcps_sndpack);
1009 TCPSTAT_ADD(tcps_sndbyte, len);
1010 }
1011 #ifdef INET6
1012 if (MHLEN < hdrlen + max_linkhdr)
1013 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1014 else
1015 #endif
1016 m = m_gethdr(M_NOWAIT, MT_DATA);
1017
1018 if (m == NULL) {
1019 SOCKBUF_UNLOCK(&so->so_snd);
1020 error = ENOBUFS;
1021 sack_rxmit = 0;
1022 goto out;
1023 }
1024
1025 m->m_data += max_linkhdr;
1026 m->m_len = hdrlen;
1027
1028 /*
1029 * Start the m_copy functions from the closest mbuf
1030 * to the offset in the socket buffer chain.
1031 */
1032 mb = sbsndptr_noadv(&so->so_snd, off, &moff);
1033 if (len <= MHLEN - hdrlen - max_linkhdr) {
1034 m_copydata(mb, moff, len,
1035 mtod(m, caddr_t) + hdrlen);
1036 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1037 sbsndptr_adv(&so->so_snd, mb, len);
1038 m->m_len += len;
1039 } else {
1040 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1041 msb = NULL;
1042 else
1043 msb = &so->so_snd;
1044 m->m_next = tcp_m_copym(mb, moff,
1045 &len, if_hw_tsomaxsegcount,
1046 if_hw_tsomaxsegsize, msb);
1047 if (len <= (tp->t_maxseg - optlen)) {
1048 /*
1049 * Must have ran out of mbufs for the copy
1050 * shorten it to no longer need tso. Lets
1051 * not put on sendalot since we are low on
1052 * mbufs.
1053 */
1054 tso = 0;
1055 }
1056 if (m->m_next == NULL) {
1057 SOCKBUF_UNLOCK(&so->so_snd);
1058 (void) m_free(m);
1059 error = ENOBUFS;
1060 sack_rxmit = 0;
1061 goto out;
1062 }
1063 }
1064
1065 /*
1066 * If we're sending everything we've got, set PUSH.
1067 * (This will keep happy those implementations which only
1068 * give data to the user when a buffer fills or
1069 * a PUSH comes in.)
1070 */
1071 if (((uint32_t)off + (uint32_t)len == sbused(&so->so_snd)) &&
1072 !(flags & TH_SYN))
1073 flags |= TH_PUSH;
1074 SOCKBUF_UNLOCK(&so->so_snd);
1075 } else {
1076 SOCKBUF_UNLOCK(&so->so_snd);
1077 if (tp->t_flags & TF_ACKNOW)
1078 TCPSTAT_INC(tcps_sndacks);
1079 else if (flags & (TH_SYN|TH_FIN|TH_RST))
1080 TCPSTAT_INC(tcps_sndctrl);
1081 else if (SEQ_GT(tp->snd_up, tp->snd_una))
1082 TCPSTAT_INC(tcps_sndurg);
1083 else
1084 TCPSTAT_INC(tcps_sndwinup);
1085
1086 m = m_gethdr(M_NOWAIT, MT_DATA);
1087 if (m == NULL) {
1088 error = ENOBUFS;
1089 sack_rxmit = 0;
1090 goto out;
1091 }
1092 #ifdef INET6
1093 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
1094 MHLEN >= hdrlen) {
1095 M_ALIGN(m, hdrlen);
1096 } else
1097 #endif
1098 m->m_data += max_linkhdr;
1099 m->m_len = hdrlen;
1100 }
1101 SOCKBUF_UNLOCK_ASSERT(&so->so_snd);
1102 m->m_pkthdr.rcvif = (struct ifnet *)0;
1103 #ifdef MAC
1104 mac_inpcb_create_mbuf(tp->t_inpcb, m);
1105 #endif
1106 #ifdef INET6
1107 if (isipv6) {
1108 ip6 = mtod(m, struct ip6_hdr *);
1109 th = (struct tcphdr *)(ip6 + 1);
1110 tcpip_fillheaders(tp->t_inpcb, ip6, th);
1111 } else
1112 #endif /* INET6 */
1113 {
1114 ip = mtod(m, struct ip *);
1115 #ifdef TCPDEBUG
1116 ipov = (struct ipovly *)ip;
1117 #endif
1118 th = (struct tcphdr *)(ip + 1);
1119 tcpip_fillheaders(tp->t_inpcb, ip, th);
1120 }
1121
1122 /*
1123 * Fill in fields, remembering maximum advertised
1124 * window for use in delaying messages about window sizes.
1125 * If resending a FIN, be sure not to use a new sequence number.
1126 */
1127 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
1128 tp->snd_nxt == tp->snd_max)
1129 tp->snd_nxt--;
1130 /*
1131 * If we are starting a connection, send ECN setup
1132 * SYN packet. If we are on a retransmit, we may
1133 * resend those bits a number of times as per
1134 * RFC 3168.
1135 */
1136 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
1137 if (tp->t_rxtshift >= 1) {
1138 if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
1139 flags |= TH_ECE|TH_CWR;
1140 } else
1141 flags |= TH_ECE|TH_CWR;
1142 }
1143
1144 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1145 (tp->t_flags & TF_ECN_PERMIT)) {
1146 /*
1147 * If the peer has ECN, mark data packets with
1148 * ECN capable transmission (ECT).
1149 * Ignore pure ack packets, retransmissions and window probes.
1150 */
1151 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
1152 (sack_rxmit == 0) &&
1153 !((tp->t_flags & TF_FORCEDATA) && len == 1 &&
1154 SEQ_LT(tp->snd_una, tp->snd_max))) {
1155 #ifdef INET6
1156 if (isipv6)
1157 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
1158 else
1159 #endif
1160 ip->ip_tos |= IPTOS_ECN_ECT0;
1161 TCPSTAT_INC(tcps_ecn_ect0);
1162 /*
1163 * Reply with proper ECN notifications.
1164 * Only set CWR on new data segments.
1165 */
1166 if (tp->t_flags & TF_ECN_SND_CWR) {
1167 flags |= TH_CWR;
1168 tp->t_flags &= ~TF_ECN_SND_CWR;
1169 }
1170 }
1171 if (tp->t_flags & TF_ECN_SND_ECE)
1172 flags |= TH_ECE;
1173 }
1174
1175 /*
1176 * If we are doing retransmissions, then snd_nxt will
1177 * not reflect the first unsent octet. For ACK only
1178 * packets, we do not want the sequence number of the
1179 * retransmitted packet, we want the sequence number
1180 * of the next unsent octet. So, if there is no data
1181 * (and no SYN or FIN), use snd_max instead of snd_nxt
1182 * when filling in ti_seq. But if we are in persist
1183 * state, snd_max might reflect one byte beyond the
1184 * right edge of the window, so use snd_nxt in that
1185 * case, since we know we aren't doing a retransmission.
1186 * (retransmit and persist are mutually exclusive...)
1187 */
1188 if (sack_rxmit == 0) {
1189 if (len || (flags & (TH_SYN|TH_FIN)) ||
1190 tcp_timer_active(tp, TT_PERSIST))
1191 th->th_seq = htonl(tp->snd_nxt);
1192 else
1193 th->th_seq = htonl(tp->snd_max);
1194 } else {
1195 th->th_seq = htonl(p->rxmit);
1196 p->rxmit += len;
1197 tp->sackhint.sack_bytes_rexmit += len;
1198 }
1199 th->th_ack = htonl(tp->rcv_nxt);
1200 if (optlen) {
1201 bcopy(opt, th + 1, optlen);
1202 th->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
1203 }
1204 th->th_flags = flags;
1205 /*
1206 * Calculate receive window. Don't shrink window,
1207 * but avoid silly window syndrome.
1208 * If a RST segment is sent, advertise a window of zero.
1209 */
1210 if (flags & TH_RST) {
1211 recwin = 0;
1212 } else {
1213 if (recwin < (so->so_rcv.sb_hiwat / 4) &&
1214 recwin < tp->t_maxseg)
1215 recwin = 0;
1216 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
1217 recwin < (tp->rcv_adv - tp->rcv_nxt))
1218 recwin = (tp->rcv_adv - tp->rcv_nxt);
1219 }
1220 /*
1221 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1222 * or <SYN,ACK>) segment itself is never scaled. The <SYN,ACK>
1223 * case is handled in syncache.
1224 */
1225 if (flags & TH_SYN)
1226 th->th_win = htons((u_short)
1227 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
1228 else {
1229 /* Avoid shrinking window with window scaling. */
1230 recwin = roundup2(recwin, 1 << tp->rcv_scale);
1231 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
1232 }
1233
1234 /*
1235 * Adjust the RXWIN0SENT flag - indicate that we have advertised
1236 * a 0 window. This may cause the remote transmitter to stall. This
1237 * flag tells soreceive() to disable delayed acknowledgements when
1238 * draining the buffer. This can occur if the receiver is attempting
1239 * to read more data than can be buffered prior to transmitting on
1240 * the connection.
1241 */
1242 if (th->th_win == 0) {
1243 tp->t_sndzerowin++;
1244 tp->t_flags |= TF_RXWIN0SENT;
1245 } else
1246 tp->t_flags &= ~TF_RXWIN0SENT;
1247 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
1248 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
1249 th->th_flags |= TH_URG;
1250 } else
1251 /*
1252 * If no urgent pointer to send, then we pull
1253 * the urgent pointer to the left edge of the send window
1254 * so that it doesn't drift into the send window on sequence
1255 * number wraparound.
1256 */
1257 tp->snd_up = tp->snd_una; /* drag it along */
1258
1259 /*
1260 * Put TCP length in extended header, and then
1261 * checksum extended header and data.
1262 */
1263 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
1264 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1265
1266 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1267 if (to.to_flags & TOF_SIGNATURE) {
1268 /*
1269 * Calculate MD5 signature and put it into the place
1270 * determined before.
1271 * NOTE: since TCP options buffer doesn't point into
1272 * mbuf's data, calculate offset and use it.
1273 */
1274 if (!TCPMD5_ENABLED() || (error = TCPMD5_OUTPUT(m, th,
1275 (u_char *)(th + 1) + (to.to_signature - opt))) != 0) {
1276 /*
1277 * Do not send segment if the calculation of MD5
1278 * digest has failed.
1279 */
1280 m_freem(m);
1281 goto out;
1282 }
1283 }
1284 #endif
1285 #ifdef INET6
1286 if (isipv6) {
1287 /*
1288 * There is no need to fill in ip6_plen right now.
1289 * It will be filled later by ip6_output.
1290 */
1291 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1292 th->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr) +
1293 optlen + len, IPPROTO_TCP, 0);
1294 }
1295 #endif
1296 #if defined(INET6) && defined(INET)
1297 else
1298 #endif
1299 #ifdef INET
1300 {
1301 m->m_pkthdr.csum_flags = CSUM_TCP;
1302 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1303 htons(sizeof(struct tcphdr) + IPPROTO_TCP + len + optlen));
1304
1305 /* IP version must be set here for ipv4/ipv6 checking later */
1306 KASSERT(ip->ip_v == IPVERSION,
1307 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
1308 }
1309 #endif
1310
1311 /*
1312 * Enable TSO and specify the size of the segments.
1313 * The TCP pseudo header checksum is always provided.
1314 */
1315 if (tso) {
1316 KASSERT(len > tp->t_maxseg - optlen,
1317 ("%s: len <= tso_segsz", __func__));
1318 m->m_pkthdr.csum_flags |= CSUM_TSO;
1319 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
1320 }
1321
1322 KASSERT(len + hdrlen == m_length(m, NULL),
1323 ("%s: mbuf chain shorter than expected: %d + %u != %u",
1324 __func__, len, hdrlen, m_length(m, NULL)));
1325
1326 #ifdef TCP_HHOOK
1327 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
1328 hhook_run_tcp_est_out(tp, th, &to, len, tso);
1329 #endif
1330
1331 #ifdef TCPDEBUG
1332 /*
1333 * Trace.
1334 */
1335 if (so->so_options & SO_DEBUG) {
1336 u_short save = 0;
1337 #ifdef INET6
1338 if (!isipv6)
1339 #endif
1340 {
1341 save = ipov->ih_len;
1342 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen + (th->th_off << 2) */);
1343 }
1344 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
1345 #ifdef INET6
1346 if (!isipv6)
1347 #endif
1348 ipov->ih_len = save;
1349 }
1350 #endif /* TCPDEBUG */
1351 TCP_PROBE3(debug__output, tp, th, m);
1352
1353 /* We're getting ready to send; log now. */
1354 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
1355 len, NULL, false);
1356
1357 /*
1358 * Fill in IP length and desired time to live and
1359 * send to IP level. There should be a better way
1360 * to handle ttl and tos; we could keep them in
1361 * the template, but need a way to checksum without them.
1362 */
1363 /*
1364 * m->m_pkthdr.len should have been set before checksum calculation,
1365 * because in6_cksum() need it.
1366 */
1367 #ifdef INET6
1368 if (isipv6) {
1369 /*
1370 * we separately set hoplimit for every segment, since the
1371 * user might want to change the value via setsockopt.
1372 * Also, desired default hop limit might be changed via
1373 * Neighbor Discovery.
1374 */
1375 ip6->ip6_hlim = in6_selecthlim(tp->t_inpcb, NULL);
1376
1377 /*
1378 * Set the packet size here for the benefit of DTrace probes.
1379 * ip6_output() will set it properly; it's supposed to include
1380 * the option header lengths as well.
1381 */
1382 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
1383
1384 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
1385 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
1386 else
1387 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
1388
1389 if (tp->t_state == TCPS_SYN_SENT)
1390 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
1391
1392 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
1393
1394 #ifdef TCPPCAP
1395 /* Save packet, if requested. */
1396 tcp_pcap_add(th, m, &(tp->t_outpkts));
1397 #endif
1398
1399 /* TODO: IPv6 IP6TOS_ECT bit on */
1400 error = ip6_output(m, tp->t_inpcb->in6p_outputopts,
1401 &tp->t_inpcb->inp_route6,
1402 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
1403 NULL, NULL, tp->t_inpcb);
1404
1405 if (error == EMSGSIZE && tp->t_inpcb->inp_route6.ro_rt != NULL)
1406 mtu = tp->t_inpcb->inp_route6.ro_rt->rt_mtu;
1407 }
1408 #endif /* INET6 */
1409 #if defined(INET) && defined(INET6)
1410 else
1411 #endif
1412 #ifdef INET
1413 {
1414 ip->ip_len = htons(m->m_pkthdr.len);
1415 #ifdef INET6
1416 if (tp->t_inpcb->inp_vflag & INP_IPV6PROTO)
1417 ip->ip_ttl = in6_selecthlim(tp->t_inpcb, NULL);
1418 #endif /* INET6 */
1419 /*
1420 * If we do path MTU discovery, then we set DF on every packet.
1421 * This might not be the best thing to do according to RFC3390
1422 * Section 2. However the tcp hostcache migitates the problem
1423 * so it affects only the first tcp connection with a host.
1424 *
1425 * NB: Don't set DF on small MTU/MSS to have a safe fallback.
1426 */
1427 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
1428 ip->ip_off |= htons(IP_DF);
1429 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
1430 } else {
1431 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
1432 }
1433
1434 if (tp->t_state == TCPS_SYN_SENT)
1435 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
1436
1437 TCP_PROBE5(send, NULL, tp, ip, tp, th);
1438
1439 #ifdef TCPPCAP
1440 /* Save packet, if requested. */
1441 tcp_pcap_add(th, m, &(tp->t_outpkts));
1442 #endif
1443
1444 error = ip_output(m, tp->t_inpcb->inp_options, &tp->t_inpcb->inp_route,
1445 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0,
1446 tp->t_inpcb);
1447
1448 if (error == EMSGSIZE && tp->t_inpcb->inp_route.ro_rt != NULL)
1449 mtu = tp->t_inpcb->inp_route.ro_rt->rt_mtu;
1450 }
1451 #endif /* INET */
1452
1453 out:
1454 /*
1455 * In transmit state, time the transmission and arrange for
1456 * the retransmit. In persist state, just set snd_max.
1457 */
1458 if ((tp->t_flags & TF_FORCEDATA) == 0 ||
1459 !tcp_timer_active(tp, TT_PERSIST)) {
1460 tcp_seq startseq = tp->snd_nxt;
1461
1462 /*
1463 * Advance snd_nxt over sequence space of this segment.
1464 */
1465 if (flags & (TH_SYN|TH_FIN)) {
1466 if (flags & TH_SYN)
1467 tp->snd_nxt++;
1468 if (flags & TH_FIN) {
1469 tp->snd_nxt++;
1470 tp->t_flags |= TF_SENTFIN;
1471 }
1472 }
1473 if (sack_rxmit)
1474 goto timer;
1475 tp->snd_nxt += len;
1476 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
1477 tp->snd_max = tp->snd_nxt;
1478 /*
1479 * Time this transmission if not a retransmission and
1480 * not currently timing anything.
1481 */
1482 tp->t_sndtime = ticks;
1483 if (tp->t_rtttime == 0) {
1484 tp->t_rtttime = ticks;
1485 tp->t_rtseq = startseq;
1486 TCPSTAT_INC(tcps_segstimed);
1487 }
1488 }
1489
1490 /*
1491 * Set retransmit timer if not currently set,
1492 * and not doing a pure ack or a keep-alive probe.
1493 * Initial value for retransmit timer is smoothed
1494 * round-trip time + 2 * round-trip time variance.
1495 * Initialize shift counter which is used for backoff
1496 * of retransmit time.
1497 */
1498 timer:
1499 if (!tcp_timer_active(tp, TT_REXMT) &&
1500 ((sack_rxmit && tp->snd_nxt != tp->snd_max) ||
1501 (tp->snd_nxt != tp->snd_una))) {
1502 if (tcp_timer_active(tp, TT_PERSIST)) {
1503 tcp_timer_activate(tp, TT_PERSIST, 0);
1504 tp->t_rxtshift = 0;
1505 }
1506 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
1507 } else if (len == 0 && sbavail(&so->so_snd) &&
1508 !tcp_timer_active(tp, TT_REXMT) &&
1509 !tcp_timer_active(tp, TT_PERSIST)) {
1510 /*
1511 * Avoid a situation where we do not set persist timer
1512 * after a zero window condition. For example:
1513 * 1) A -> B: packet with enough data to fill the window
1514 * 2) B -> A: ACK for #1 + new data (0 window
1515 * advertisement)
1516 * 3) A -> B: ACK for #2, 0 len packet
1517 *
1518 * In this case, A will not activate the persist timer,
1519 * because it chose to send a packet. Unless tcp_output
1520 * is called for some other reason (delayed ack timer,
1521 * another input packet from B, socket syscall), A will
1522 * not send zero window probes.
1523 *
1524 * So, if you send a 0-length packet, but there is data
1525 * in the socket buffer, and neither the rexmt or
1526 * persist timer is already set, then activate the
1527 * persist timer.
1528 */
1529 tp->t_rxtshift = 0;
1530 tcp_setpersist(tp);
1531 }
1532 } else {
1533 /*
1534 * Persist case, update snd_max but since we are in
1535 * persist mode (no window) we do not update snd_nxt.
1536 */
1537 int xlen = len;
1538 if (flags & TH_SYN)
1539 ++xlen;
1540 if (flags & TH_FIN) {
1541 ++xlen;
1542 tp->t_flags |= TF_SENTFIN;
1543 }
1544 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max))
1545 tp->snd_max = tp->snd_nxt + xlen;
1546 }
1547 if ((error == 0) &&
1548 (TCPS_HAVEESTABLISHED(tp->t_state) &&
1549 (tp->t_flags & TF_SACK_PERMIT) &&
1550 tp->rcv_numsacks > 0)) {
1551 /* Clean up any DSACK's sent */
1552 tcp_clean_dsack_blocks(tp);
1553 }
1554 if (error) {
1555 /* Record the error. */
1556 TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_OUT,
1557 error, 0, NULL, false);
1558
1559 /*
1560 * We know that the packet was lost, so back out the
1561 * sequence number advance, if any.
1562 *
1563 * If the error is EPERM the packet got blocked by the
1564 * local firewall. Normally we should terminate the
1565 * connection but the blocking may have been spurious
1566 * due to a firewall reconfiguration cycle. So we treat
1567 * it like a packet loss and let the retransmit timer and
1568 * timeouts do their work over time.
1569 * XXX: It is a POLA question whether calling tcp_drop right
1570 * away would be the really correct behavior instead.
1571 */
1572 if (((tp->t_flags & TF_FORCEDATA) == 0 ||
1573 !tcp_timer_active(tp, TT_PERSIST)) &&
1574 ((flags & TH_SYN) == 0) &&
1575 (error != EPERM)) {
1576 if (sack_rxmit) {
1577 p->rxmit -= len;
1578 tp->sackhint.sack_bytes_rexmit -= len;
1579 KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
1580 ("sackhint bytes rtx >= 0"));
1581 } else
1582 tp->snd_nxt -= len;
1583 }
1584 SOCKBUF_UNLOCK_ASSERT(&so->so_snd); /* Check gotos. */
1585 switch (error) {
1586 case EACCES:
1587 case EPERM:
1588 tp->t_softerror = error;
1589 return (error);
1590 case ENOBUFS:
1591 TCP_XMIT_TIMER_ASSERT(tp, len, flags);
1592 tp->snd_cwnd = tp->t_maxseg;
1593 return (0);
1594 case EMSGSIZE:
1595 /*
1596 * For some reason the interface we used initially
1597 * to send segments changed to another or lowered
1598 * its MTU.
1599 * If TSO was active we either got an interface
1600 * without TSO capabilits or TSO was turned off.
1601 * If we obtained mtu from ip_output() then update
1602 * it and try again.
1603 */
1604 if (tso)
1605 tp->t_flags &= ~TF_TSO;
1606 if (mtu != 0) {
1607 tcp_mss_update(tp, -1, mtu, NULL, NULL);
1608 goto again;
1609 }
1610 return (error);
1611 case EHOSTDOWN:
1612 case EHOSTUNREACH:
1613 case ENETDOWN:
1614 case ENETUNREACH:
1615 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1616 tp->t_softerror = error;
1617 return (0);
1618 }
1619 /* FALLTHROUGH */
1620 default:
1621 return (error);
1622 }
1623 }
1624 TCPSTAT_INC(tcps_sndtotal);
1625
1626 /*
1627 * Data sent (as far as we can tell).
1628 * If this advertises a larger window than any other segment,
1629 * then remember the size of the advertised window.
1630 * Any pending ACK has now been sent.
1631 */
1632 if (SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
1633 tp->rcv_adv = tp->rcv_nxt + recwin;
1634 tp->last_ack_sent = tp->rcv_nxt;
1635 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
1636 if (tcp_timer_active(tp, TT_DELACK))
1637 tcp_timer_activate(tp, TT_DELACK, 0);
1638 #if 0
1639 /*
1640 * This completely breaks TCP if newreno is turned on. What happens
1641 * is that if delayed-acks are turned on on the receiver, this code
1642 * on the transmitter effectively destroys the TCP window, forcing
1643 * it to four packets (1.5Kx4 = 6K window).
1644 */
1645 if (sendalot && --maxburst)
1646 goto again;
1647 #endif
1648 if (sendalot)
1649 goto again;
1650 return (0);
1651 }
1652
1653 void
1654 tcp_setpersist(struct tcpcb *tp)
1655 {
1656 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
1657 int tt;
1658
1659 tp->t_flags &= ~TF_PREVVALID;
1660 if (tcp_timer_active(tp, TT_REXMT))
1661 panic("tcp_setpersist: retransmit pending");
1662 /*
1663 * Start/restart persistence timer.
1664 */
1665 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
1666 tcp_persmin, tcp_persmax);
1667 tcp_timer_activate(tp, TT_PERSIST, tt);
1668 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
1669 tp->t_rxtshift++;
1670 }
1671
1672 /*
1673 * Insert TCP options according to the supplied parameters to the place
1674 * optp in a consistent way. Can handle unaligned destinations.
1675 *
1676 * The order of the option processing is crucial for optimal packing and
1677 * alignment for the scarce option space.
1678 *
1679 * The optimal order for a SYN/SYN-ACK segment is:
1680 * MSS (4) + NOP (1) + Window scale (3) + SACK permitted (2) +
1681 * Timestamp (10) + Signature (18) = 38 bytes out of a maximum of 40.
1682 *
1683 * The SACK options should be last. SACK blocks consume 8*n+2 bytes.
1684 * So a full size SACK blocks option is 34 bytes (with 4 SACK blocks).
1685 * At minimum we need 10 bytes (to generate 1 SACK block). If both
1686 * TCP Timestamps (12 bytes) and TCP Signatures (18 bytes) are present,
1687 * we only have 10 bytes for SACK options (40 - (12 + 18)).
1688 */
1689 int
1690 tcp_addoptions(struct tcpopt *to, u_char *optp)
1691 {
1692 u_int32_t mask, optlen = 0;
1693
1694 for (mask = 1; mask < TOF_MAXOPT; mask <<= 1) {
1695 if ((to->to_flags & mask) != mask)
1696 continue;
1697 if (optlen == TCP_MAXOLEN)
1698 break;
1699 switch (to->to_flags & mask) {
1700 case TOF_MSS:
1701 while (optlen % 4) {
1702 optlen += TCPOLEN_NOP;
1703 *optp++ = TCPOPT_NOP;
1704 }
1705 if (TCP_MAXOLEN - optlen < TCPOLEN_MAXSEG)
1706 continue;
1707 optlen += TCPOLEN_MAXSEG;
1708 *optp++ = TCPOPT_MAXSEG;
1709 *optp++ = TCPOLEN_MAXSEG;
1710 to->to_mss = htons(to->to_mss);
1711 bcopy((u_char *)&to->to_mss, optp, sizeof(to->to_mss));
1712 optp += sizeof(to->to_mss);
1713 break;
1714 case TOF_SCALE:
1715 while (!optlen || optlen % 2 != 1) {
1716 optlen += TCPOLEN_NOP;
1717 *optp++ = TCPOPT_NOP;
1718 }
1719 if (TCP_MAXOLEN - optlen < TCPOLEN_WINDOW)
1720 continue;
1721 optlen += TCPOLEN_WINDOW;
1722 *optp++ = TCPOPT_WINDOW;
1723 *optp++ = TCPOLEN_WINDOW;
1724 *optp++ = to->to_wscale;
1725 break;
1726 case TOF_SACKPERM:
1727 while (optlen % 2) {
1728 optlen += TCPOLEN_NOP;
1729 *optp++ = TCPOPT_NOP;
1730 }
1731 if (TCP_MAXOLEN - optlen < TCPOLEN_SACK_PERMITTED)
1732 continue;
1733 optlen += TCPOLEN_SACK_PERMITTED;
1734 *optp++ = TCPOPT_SACK_PERMITTED;
1735 *optp++ = TCPOLEN_SACK_PERMITTED;
1736 break;
1737 case TOF_TS:
1738 while (!optlen || optlen % 4 != 2) {
1739 optlen += TCPOLEN_NOP;
1740 *optp++ = TCPOPT_NOP;
1741 }
1742 if (TCP_MAXOLEN - optlen < TCPOLEN_TIMESTAMP)
1743 continue;
1744 optlen += TCPOLEN_TIMESTAMP;
1745 *optp++ = TCPOPT_TIMESTAMP;
1746 *optp++ = TCPOLEN_TIMESTAMP;
1747 to->to_tsval = htonl(to->to_tsval);
1748 to->to_tsecr = htonl(to->to_tsecr);
1749 bcopy((u_char *)&to->to_tsval, optp, sizeof(to->to_tsval));
1750 optp += sizeof(to->to_tsval);
1751 bcopy((u_char *)&to->to_tsecr, optp, sizeof(to->to_tsecr));
1752 optp += sizeof(to->to_tsecr);
1753 break;
1754 case TOF_SIGNATURE:
1755 {
1756 int siglen = TCPOLEN_SIGNATURE - 2;
1757
1758 while (!optlen || optlen % 4 != 2) {
1759 optlen += TCPOLEN_NOP;
1760 *optp++ = TCPOPT_NOP;
1761 }
1762 if (TCP_MAXOLEN - optlen < TCPOLEN_SIGNATURE) {
1763 to->to_flags &= ~TOF_SIGNATURE;
1764 continue;
1765 }
1766 optlen += TCPOLEN_SIGNATURE;
1767 *optp++ = TCPOPT_SIGNATURE;
1768 *optp++ = TCPOLEN_SIGNATURE;
1769 to->to_signature = optp;
1770 while (siglen--)
1771 *optp++ = 0;
1772 break;
1773 }
1774 case TOF_SACK:
1775 {
1776 int sackblks = 0;
1777 struct sackblk *sack = (struct sackblk *)to->to_sacks;
1778 tcp_seq sack_seq;
1779
1780 while (!optlen || optlen % 4 != 2) {
1781 optlen += TCPOLEN_NOP;
1782 *optp++ = TCPOPT_NOP;
1783 }
1784 if (TCP_MAXOLEN - optlen < TCPOLEN_SACKHDR + TCPOLEN_SACK)
1785 continue;
1786 optlen += TCPOLEN_SACKHDR;
1787 *optp++ = TCPOPT_SACK;
1788 sackblks = min(to->to_nsacks,
1789 (TCP_MAXOLEN - optlen) / TCPOLEN_SACK);
1790 *optp++ = TCPOLEN_SACKHDR + sackblks * TCPOLEN_SACK;
1791 while (sackblks--) {
1792 sack_seq = htonl(sack->start);
1793 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
1794 optp += sizeof(sack_seq);
1795 sack_seq = htonl(sack->end);
1796 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
1797 optp += sizeof(sack_seq);
1798 optlen += TCPOLEN_SACK;
1799 sack++;
1800 }
1801 TCPSTAT_INC(tcps_sack_send_blocks);
1802 break;
1803 }
1804 case TOF_FASTOPEN:
1805 {
1806 int total_len;
1807
1808 /* XXX is there any point to aligning this option? */
1809 total_len = TCPOLEN_FAST_OPEN_EMPTY + to->to_tfo_len;
1810 if (TCP_MAXOLEN - optlen < total_len) {
1811 to->to_flags &= ~TOF_FASTOPEN;
1812 continue;
1813 }
1814 *optp++ = TCPOPT_FAST_OPEN;
1815 *optp++ = total_len;
1816 if (to->to_tfo_len > 0) {
1817 bcopy(to->to_tfo_cookie, optp, to->to_tfo_len);
1818 optp += to->to_tfo_len;
1819 }
1820 optlen += total_len;
1821 break;
1822 }
1823 default:
1824 panic("%s: unknown TCP option type", __func__);
1825 break;
1826 }
1827 }
1828
1829 /* Terminate and pad TCP options to a 4 byte boundary. */
1830 if (optlen % 4) {
1831 optlen += TCPOLEN_EOL;
1832 *optp++ = TCPOPT_EOL;
1833 }
1834 /*
1835 * According to RFC 793 (STD0007):
1836 * "The content of the header beyond the End-of-Option option
1837 * must be header padding (i.e., zero)."
1838 * and later: "The padding is composed of zeros."
1839 */
1840 while (optlen % 4) {
1841 optlen += TCPOLEN_PAD;
1842 *optp++ = TCPOPT_PAD;
1843 }
1844
1845 KASSERT(optlen <= TCP_MAXOLEN, ("%s: TCP options too long", __func__));
1846 return (optlen);
1847 }
1848
1849 /*
1850 * This is a copy of m_copym(), taking the TSO segment size/limit
1851 * constraints into account, and advancing the sndptr as it goes.
1852 */
1853 struct mbuf *
1854 tcp_m_copym(struct mbuf *m, int32_t off0, int32_t *plen,
1855 int32_t seglimit, int32_t segsize, struct sockbuf *sb)
1856 {
1857 struct mbuf *n, **np;
1858 struct mbuf *top;
1859 int32_t off = off0;
1860 int32_t len = *plen;
1861 int32_t fragsize;
1862 int32_t len_cp = 0;
1863 int32_t *pkthdrlen;
1864 uint32_t mlen, frags;
1865 bool copyhdr;
1866
1867
1868 KASSERT(off >= 0, ("tcp_m_copym, negative off %d", off));
1869 KASSERT(len >= 0, ("tcp_m_copym, negative len %d", len));
1870 if (off == 0 && m->m_flags & M_PKTHDR)
1871 copyhdr = true;
1872 else
1873 copyhdr = false;
1874 while (off > 0) {
1875 KASSERT(m != NULL, ("tcp_m_copym, offset > size of mbuf chain"));
1876 if (off < m->m_len)
1877 break;
1878 off -= m->m_len;
1879 if ((sb) && (m == sb->sb_sndptr)) {
1880 sb->sb_sndptroff += m->m_len;
1881 sb->sb_sndptr = m->m_next;
1882 }
1883 m = m->m_next;
1884 }
1885 np = ⊤
1886 top = NULL;
1887 pkthdrlen = NULL;
1888 while (len > 0) {
1889 if (m == NULL) {
1890 KASSERT(len == M_COPYALL,
1891 ("tcp_m_copym, length > size of mbuf chain"));
1892 *plen = len_cp;
1893 if (pkthdrlen != NULL)
1894 *pkthdrlen = len_cp;
1895 break;
1896 }
1897 mlen = min(len, m->m_len - off);
1898 if (seglimit) {
1899 /*
1900 * For M_NOMAP mbufs, add 3 segments
1901 * + 1 in case we are crossing page boundaries
1902 * + 2 in case the TLS hdr/trailer are used
1903 * It is cheaper to just add the segments
1904 * than it is to take the cache miss to look
1905 * at the mbuf ext_pgs state in detail.
1906 */
1907 if (m->m_flags & M_NOMAP) {
1908 fragsize = min(segsize, PAGE_SIZE);
1909 frags = 3;
1910 } else {
1911 fragsize = segsize;
1912 frags = 0;
1913 }
1914
1915 /* Break if we really can't fit anymore. */
1916 if ((frags + 1) >= seglimit) {
1917 *plen = len_cp;
1918 if (pkthdrlen != NULL)
1919 *pkthdrlen = len_cp;
1920 break;
1921 }
1922
1923 /*
1924 * Reduce size if you can't copy the whole
1925 * mbuf. If we can't copy the whole mbuf, also
1926 * adjust len so the loop will end after this
1927 * mbuf.
1928 */
1929 if ((frags + howmany(mlen, fragsize)) >= seglimit) {
1930 mlen = (seglimit - frags - 1) * fragsize;
1931 len = mlen;
1932 *plen = len_cp + len;
1933 if (pkthdrlen != NULL)
1934 *pkthdrlen = *plen;
1935 }
1936 frags += howmany(mlen, fragsize);
1937 if (frags == 0)
1938 frags++;
1939 seglimit -= frags;
1940 KASSERT(seglimit > 0,
1941 ("%s: seglimit went too low", __func__));
1942 }
1943 if (copyhdr)
1944 n = m_gethdr(M_NOWAIT, m->m_type);
1945 else
1946 n = m_get(M_NOWAIT, m->m_type);
1947 *np = n;
1948 if (n == NULL)
1949 goto nospace;
1950 if (copyhdr) {
1951 if (!m_dup_pkthdr(n, m, M_NOWAIT))
1952 goto nospace;
1953 if (len == M_COPYALL)
1954 n->m_pkthdr.len -= off0;
1955 else
1956 n->m_pkthdr.len = len;
1957 pkthdrlen = &n->m_pkthdr.len;
1958 copyhdr = false;
1959 }
1960 n->m_len = mlen;
1961 len_cp += n->m_len;
1962 if (m->m_flags & M_EXT) {
1963 n->m_data = m->m_data + off;
1964 mb_dupcl(n, m);
1965 } else
1966 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1967 (u_int)n->m_len);
1968
1969 if (sb && (sb->sb_sndptr == m) &&
1970 ((n->m_len + off) >= m->m_len) && m->m_next) {
1971 sb->sb_sndptroff += m->m_len;
1972 sb->sb_sndptr = m->m_next;
1973 }
1974 off = 0;
1975 if (len != M_COPYALL) {
1976 len -= n->m_len;
1977 }
1978 m = m->m_next;
1979 np = &n->m_next;
1980 }
1981 return (top);
1982 nospace:
1983 m_freem(top);
1984 return (NULL);
1985 }
1986
1987 void
1988 tcp_sndbuf_autoscale(struct tcpcb *tp, struct socket *so, uint32_t sendwin)
1989 {
1990
1991 /*
1992 * Automatic sizing of send socket buffer. Often the send buffer
1993 * size is not optimally adjusted to the actual network conditions
1994 * at hand (delay bandwidth product). Setting the buffer size too
1995 * small limits throughput on links with high bandwidth and high
1996 * delay (eg. trans-continental/oceanic links). Setting the
1997 * buffer size too big consumes too much real kernel memory,
1998 * especially with many connections on busy servers.
1999 *
2000 * The criteria to step up the send buffer one notch are:
2001 * 1. receive window of remote host is larger than send buffer
2002 * (with a fudge factor of 5/4th);
2003 * 2. send buffer is filled to 7/8th with data (so we actually
2004 * have data to make use of it);
2005 * 3. send buffer fill has not hit maximal automatic size;
2006 * 4. our send window (slow start and cogestion controlled) is
2007 * larger than sent but unacknowledged data in send buffer.
2008 *
2009 * The remote host receive window scaling factor may limit the
2010 * growing of the send buffer before it reaches its allowed
2011 * maximum.
2012 *
2013 * It scales directly with slow start or congestion window
2014 * and does at most one step per received ACK. This fast
2015 * scaling has the drawback of growing the send buffer beyond
2016 * what is strictly necessary to make full use of a given
2017 * delay*bandwidth product. However testing has shown this not
2018 * to be much of an problem. At worst we are trading wasting
2019 * of available bandwidth (the non-use of it) for wasting some
2020 * socket buffer memory.
2021 *
2022 * TODO: Shrink send buffer during idle periods together
2023 * with congestion window. Requires another timer. Has to
2024 * wait for upcoming tcp timer rewrite.
2025 *
2026 * XXXGL: should there be used sbused() or sbavail()?
2027 */
2028 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
2029 int lowat;
2030
2031 lowat = V_tcp_sendbuf_auto_lowat ? so->so_snd.sb_lowat : 0;
2032 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat - lowat &&
2033 sbused(&so->so_snd) >=
2034 (so->so_snd.sb_hiwat / 8 * 7) - lowat &&
2035 sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
2036 sendwin >= (sbused(&so->so_snd) -
2037 (tp->snd_nxt - tp->snd_una))) {
2038 if (!sbreserve_locked(&so->so_snd,
2039 min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
2040 V_tcp_autosndbuf_max), so, curthread))
2041 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
2042 }
2043 }
2044 }
Cache object: b9333f7b6cf8419f5286bce2622c66d6
|