1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 2007-2008,2010
7 * Swinburne University of Technology, Melbourne, Australia.
8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
9 * Copyright (c) 2010 The FreeBSD Foundation
10 * Copyright (c) 2010-2011 Juniper Networks, Inc.
11 * All rights reserved.
12 *
13 * Portions of this software were developed at the Centre for Advanced Internet
14 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
15 * James Healy and David Hayes, made possible in part by a grant from the Cisco
16 * University Research Program Fund at Community Foundation Silicon Valley.
17 *
18 * Portions of this software were developed at the Centre for Advanced
19 * Internet Architectures, Swinburne University of Technology, Melbourne,
20 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
21 *
22 * Portions of this software were developed by Robert N. M. Watson under
23 * contract to Juniper Networks, Inc.
24 *
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions
27 * are met:
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. Neither the name of the University nor the names of its contributors
34 * may be used to endorse or promote products derived from this software
35 * without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 * SUCH DAMAGE.
48 *
49 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
50 */
51
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54
55 #include "opt_inet.h"
56 #include "opt_inet6.h"
57 #include "opt_ipsec.h"
58
59 #include <sys/param.h>
60 #include <sys/arb.h>
61 #include <sys/kernel.h>
62 #ifdef TCP_HHOOK
63 #include <sys/hhook.h>
64 #endif
65 #include <sys/malloc.h>
66 #include <sys/mbuf.h>
67 #include <sys/proc.h> /* for proc0 declaration */
68 #include <sys/protosw.h>
69 #include <sys/qmath.h>
70 #include <sys/sdt.h>
71 #include <sys/signalvar.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/sysctl.h>
75 #include <sys/syslog.h>
76 #include <sys/systm.h>
77 #include <sys/stats.h>
78
79 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
80
81 #include <vm/uma.h>
82
83 #include <net/if.h>
84 #include <net/if_var.h>
85 #include <net/route.h>
86 #include <net/vnet.h>
87
88 #define TCPSTATES /* for logging */
89
90 #include <netinet/in.h>
91 #include <netinet/in_kdtrace.h>
92 #include <netinet/in_pcb.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
96 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
97 #include <netinet/ip_var.h>
98 #include <netinet/ip_options.h>
99 #include <netinet/ip6.h>
100 #include <netinet/icmp6.h>
101 #include <netinet6/in6_pcb.h>
102 #include <netinet6/in6_var.h>
103 #include <netinet6/ip6_var.h>
104 #include <netinet6/nd6.h>
105 #include <netinet/tcp.h>
106 #include <netinet/tcp_fsm.h>
107 #include <netinet/tcp_log_buf.h>
108 #include <netinet/tcp_seq.h>
109 #include <netinet/tcp_timer.h>
110 #include <netinet/tcp_var.h>
111 #include <netinet6/tcp6_var.h>
112 #include <netinet/tcpip.h>
113 #include <netinet/cc/cc.h>
114 #include <netinet/tcp_fastopen.h>
115 #ifdef TCPPCAP
116 #include <netinet/tcp_pcap.h>
117 #endif
118 #include <netinet/tcp_syncache.h>
119 #ifdef TCP_OFFLOAD
120 #include <netinet/tcp_offload.h>
121 #endif
122 #include <netinet/tcp_ecn.h>
123 #include <netinet/udp.h>
124
125 #include <netipsec/ipsec_support.h>
126
127 #include <machine/in_cksum.h>
128
129 #include <security/mac/mac_framework.h>
130
131 const int tcprexmtthresh = 3;
132
133 VNET_DEFINE(int, tcp_log_in_vain) = 0;
134 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_VNET | CTLFLAG_RW,
135 &VNET_NAME(tcp_log_in_vain), 0,
136 "Log all incoming TCP segments to closed ports");
137
138 VNET_DEFINE(int, blackhole) = 0;
139 #define V_blackhole VNET(blackhole)
140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW,
141 &VNET_NAME(blackhole), 0,
142 "Do not send RST on segments to closed ports");
143
144 VNET_DEFINE(bool, blackhole_local) = false;
145 #define V_blackhole_local VNET(blackhole_local)
146 SYSCTL_BOOL(_net_inet_tcp, OID_AUTO, blackhole_local, CTLFLAG_VNET |
147 CTLFLAG_RW, &VNET_NAME(blackhole_local), false,
148 "Enforce net.inet.tcp.blackhole for locally originated packets");
149
150 VNET_DEFINE(int, tcp_delack_enabled) = 1;
151 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW,
152 &VNET_NAME(tcp_delack_enabled), 0,
153 "Delay ACK to try and piggyback it onto a data packet");
154
155 VNET_DEFINE(int, drop_synfin) = 0;
156 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW,
157 &VNET_NAME(drop_synfin), 0,
158 "Drop TCP packets with SYN+FIN set");
159
160 VNET_DEFINE(int, tcp_do_prr_conservative) = 0;
161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_prr_conservative, CTLFLAG_VNET | CTLFLAG_RW,
162 &VNET_NAME(tcp_do_prr_conservative), 0,
163 "Do conservative Proportional Rate Reduction");
164
165 VNET_DEFINE(int, tcp_do_prr) = 1;
166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_prr, CTLFLAG_VNET | CTLFLAG_RW,
167 &VNET_NAME(tcp_do_prr), 1,
168 "Enable Proportional Rate Reduction per RFC 6937");
169
170 VNET_DEFINE(int, tcp_do_lrd) = 0;
171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_lrd, CTLFLAG_VNET | CTLFLAG_RW,
172 &VNET_NAME(tcp_do_lrd), 1,
173 "Perform Lost Retransmission Detection");
174
175 VNET_DEFINE(int, tcp_do_newcwv) = 0;
176 SYSCTL_INT(_net_inet_tcp, OID_AUTO, newcwv, CTLFLAG_VNET | CTLFLAG_RW,
177 &VNET_NAME(tcp_do_newcwv), 0,
178 "Enable New Congestion Window Validation per RFC7661");
179
180 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
181 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW,
182 &VNET_NAME(tcp_do_rfc3042), 0,
183 "Enable RFC 3042 (Limited Transmit)");
184
185 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
186 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW,
187 &VNET_NAME(tcp_do_rfc3390), 0,
188 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
189
190 VNET_DEFINE(int, tcp_initcwnd_segments) = 10;
191 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments,
192 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0,
193 "Slow-start flight size (initial congestion window) in number of segments");
194
195 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW,
197 &VNET_NAME(tcp_do_rfc3465), 0,
198 "Enable RFC 3465 (Appropriate Byte Counting)");
199
200 VNET_DEFINE(int, tcp_abc_l_var) = 2;
201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW,
202 &VNET_NAME(tcp_abc_l_var), 2,
203 "Cap the max cwnd increment during slow-start to this number of segments");
204
205 VNET_DEFINE(int, tcp_insecure_syn) = 0;
206 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW,
207 &VNET_NAME(tcp_insecure_syn), 0,
208 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets");
209
210 VNET_DEFINE(int, tcp_insecure_rst) = 0;
211 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW,
212 &VNET_NAME(tcp_insecure_rst), 0,
213 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets");
214
215 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
216 #define V_tcp_recvspace VNET(tcp_recvspace)
217 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW,
218 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
219
220 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
221 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
222 &VNET_NAME(tcp_do_autorcvbuf), 0,
223 "Enable automatic receive buffer sizing");
224
225 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
226 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
227 &VNET_NAME(tcp_autorcvbuf_max), 0,
228 "Max size of automatic receive buffer");
229
230 VNET_DEFINE(struct inpcbinfo, tcbinfo);
231
232 /*
233 * TCP statistics are stored in an array of counter(9)s, which size matches
234 * size of struct tcpstat. TCP running connection count is a regular array.
235 */
236 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
237 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
238 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
239 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]);
240 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD |
241 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES,
242 "TCP connection counts by TCP state");
243
244 /*
245 * Kernel module interface for updating tcpstat. The first argument is an index
246 * into tcpstat treated as an array.
247 */
248 void
249 kmod_tcpstat_add(int statnum, int val)
250 {
251
252 counter_u64_add(VNET(tcpstat)[statnum], val);
253 }
254
255 /*
256 * Make sure that we only start a SACK loss recovery when
257 * receiving a duplicate ACK with a SACK block, and also
258 * complete SACK loss recovery in case the other end
259 * reneges.
260 */
261 static bool inline
262 tcp_is_sack_recovery(struct tcpcb *tp, struct tcpopt *to)
263 {
264 return ((tp->t_flags & TF_SACK_PERMIT) &&
265 ((to->to_flags & TOF_SACK) ||
266 (!TAILQ_EMPTY(&tp->snd_holes))));
267 }
268
269 #ifdef TCP_HHOOK
270 /*
271 * Wrapper for the TCP established input helper hook.
272 */
273 void
274 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
275 {
276 struct tcp_hhook_data hhook_data;
277
278 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
279 hhook_data.tp = tp;
280 hhook_data.th = th;
281 hhook_data.to = to;
282
283 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
284 &tp->t_osd);
285 }
286 }
287 #endif
288
289 /*
290 * CC wrapper hook functions
291 */
292 void
293 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs,
294 uint16_t type)
295 {
296 #ifdef STATS
297 int32_t gput;
298 #endif
299
300 INP_WLOCK_ASSERT(tptoinpcb(tp));
301
302 tp->t_ccv.nsegs = nsegs;
303 tp->t_ccv.bytes_this_ack = BYTES_THIS_ACK(tp, th);
304 if ((!V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd)) ||
305 (V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd) &&
306 (tp->snd_cwnd < (tcp_compute_pipe(tp) * 2))))
307 tp->t_ccv.flags |= CCF_CWND_LIMITED;
308 else
309 tp->t_ccv.flags &= ~CCF_CWND_LIMITED;
310
311 if (type == CC_ACK) {
312 #ifdef STATS
313 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
314 ((int32_t)tp->snd_cwnd) - tp->snd_wnd);
315 if (!IN_RECOVERY(tp->t_flags))
316 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_ACKLEN,
317 tp->t_ccv.bytes_this_ack / (tcp_maxseg(tp) * nsegs));
318 if ((tp->t_flags & TF_GPUTINPROG) &&
319 SEQ_GEQ(th->th_ack, tp->gput_ack)) {
320 /*
321 * Compute goodput in bits per millisecond.
322 */
323 gput = (((int64_t)SEQ_SUB(th->th_ack, tp->gput_seq)) << 3) /
324 max(1, tcp_ts_getticks() - tp->gput_ts);
325 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
326 gput);
327 /*
328 * XXXLAS: This is a temporary hack, and should be
329 * chained off VOI_TCP_GPUT when stats(9) grows an API
330 * to deal with chained VOIs.
331 */
332 if (tp->t_stats_gput_prev > 0)
333 stats_voi_update_abs_s32(tp->t_stats,
334 VOI_TCP_GPUT_ND,
335 ((gput - tp->t_stats_gput_prev) * 100) /
336 tp->t_stats_gput_prev);
337 tp->t_flags &= ~TF_GPUTINPROG;
338 tp->t_stats_gput_prev = gput;
339 }
340 #endif /* STATS */
341 if (tp->snd_cwnd > tp->snd_ssthresh) {
342 tp->t_bytes_acked += tp->t_ccv.bytes_this_ack;
343 if (tp->t_bytes_acked >= tp->snd_cwnd) {
344 tp->t_bytes_acked -= tp->snd_cwnd;
345 tp->t_ccv.flags |= CCF_ABC_SENTAWND;
346 }
347 } else {
348 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND;
349 tp->t_bytes_acked = 0;
350 }
351 }
352
353 if (CC_ALGO(tp)->ack_received != NULL) {
354 /* XXXLAS: Find a way to live without this */
355 tp->t_ccv.curack = th->th_ack;
356 CC_ALGO(tp)->ack_received(&tp->t_ccv, type);
357 }
358 #ifdef STATS
359 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd);
360 #endif
361 }
362
363 void
364 cc_conn_init(struct tcpcb *tp)
365 {
366 struct hc_metrics_lite metrics;
367 struct inpcb *inp = tptoinpcb(tp);
368 u_int maxseg;
369 int rtt;
370
371 INP_WLOCK_ASSERT(inp);
372
373 tcp_hc_get(&inp->inp_inc, &metrics);
374 maxseg = tcp_maxseg(tp);
375
376 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
377 tp->t_srtt = rtt;
378 TCPSTAT_INC(tcps_usedrtt);
379 if (metrics.rmx_rttvar) {
380 tp->t_rttvar = metrics.rmx_rttvar;
381 TCPSTAT_INC(tcps_usedrttvar);
382 } else {
383 /* default variation is +- 1 rtt */
384 tp->t_rttvar =
385 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
386 }
387 TCPT_RANGESET(tp->t_rxtcur,
388 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
389 tp->t_rttmin, TCPTV_REXMTMAX);
390 }
391 if (metrics.rmx_ssthresh) {
392 /*
393 * There's some sort of gateway or interface
394 * buffer limit on the path. Use this to set
395 * the slow start threshold, but set the
396 * threshold to no less than 2*mss.
397 */
398 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh);
399 TCPSTAT_INC(tcps_usedssthresh);
400 }
401
402 /*
403 * Set the initial slow-start flight size.
404 *
405 * If a SYN or SYN/ACK was lost and retransmitted, we have to
406 * reduce the initial CWND to one segment as congestion is likely
407 * requiring us to be cautious.
408 */
409 if (tp->snd_cwnd == 1)
410 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */
411 else
412 tp->snd_cwnd = tcp_compute_initwnd(maxseg);
413
414 if (CC_ALGO(tp)->conn_init != NULL)
415 CC_ALGO(tp)->conn_init(&tp->t_ccv);
416 }
417
418 void inline
419 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
420 {
421 INP_WLOCK_ASSERT(tptoinpcb(tp));
422
423 #ifdef STATS
424 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
425 #endif
426
427 switch(type) {
428 case CC_NDUPACK:
429 if (!IN_FASTRECOVERY(tp->t_flags)) {
430 tp->snd_recover = tp->snd_max;
431 if (tp->t_flags2 & TF2_ECN_PERMIT)
432 tp->t_flags2 |= TF2_ECN_SND_CWR;
433 }
434 break;
435 case CC_ECN:
436 if (!IN_CONGRECOVERY(tp->t_flags) ||
437 /*
438 * Allow ECN reaction on ACK to CWR, if
439 * that data segment was also CE marked.
440 */
441 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
442 EXIT_CONGRECOVERY(tp->t_flags);
443 TCPSTAT_INC(tcps_ecn_rcwnd);
444 tp->snd_recover = tp->snd_max + 1;
445 if (tp->t_flags2 & TF2_ECN_PERMIT)
446 tp->t_flags2 |= TF2_ECN_SND_CWR;
447 }
448 break;
449 case CC_RTO:
450 tp->t_dupacks = 0;
451 tp->t_bytes_acked = 0;
452 EXIT_RECOVERY(tp->t_flags);
453 if (tp->t_flags2 & TF2_ECN_PERMIT)
454 tp->t_flags2 |= TF2_ECN_SND_CWR;
455 break;
456 case CC_RTO_ERR:
457 TCPSTAT_INC(tcps_sndrexmitbad);
458 /* RTO was unnecessary, so reset everything. */
459 tp->snd_cwnd = tp->snd_cwnd_prev;
460 tp->snd_ssthresh = tp->snd_ssthresh_prev;
461 tp->snd_recover = tp->snd_recover_prev;
462 if (tp->t_flags & TF_WASFRECOVERY)
463 ENTER_FASTRECOVERY(tp->t_flags);
464 if (tp->t_flags & TF_WASCRECOVERY)
465 ENTER_CONGRECOVERY(tp->t_flags);
466 tp->snd_nxt = tp->snd_max;
467 tp->t_flags &= ~TF_PREVVALID;
468 tp->t_badrxtwin = 0;
469 break;
470 }
471
472 if (CC_ALGO(tp)->cong_signal != NULL) {
473 if (th != NULL)
474 tp->t_ccv.curack = th->th_ack;
475 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type);
476 }
477 }
478
479 void inline
480 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
481 {
482 INP_WLOCK_ASSERT(tptoinpcb(tp));
483
484 /* XXXLAS: KASSERT that we're in recovery? */
485
486 if (CC_ALGO(tp)->post_recovery != NULL) {
487 tp->t_ccv.curack = th->th_ack;
488 CC_ALGO(tp)->post_recovery(&tp->t_ccv);
489 }
490 /* XXXLAS: EXIT_RECOVERY ? */
491 tp->t_bytes_acked = 0;
492 tp->sackhint.delivered_data = 0;
493 tp->sackhint.prr_out = 0;
494 }
495
496 /*
497 * Indicate whether this ack should be delayed. We can delay the ack if
498 * following conditions are met:
499 * - There is no delayed ack timer in progress.
500 * - Our last ack wasn't a 0-sized window. We never want to delay
501 * the ack that opens up a 0-sized window.
502 * - LRO wasn't used for this segment. We make sure by checking that the
503 * segment size is not larger than the MSS.
504 */
505 #define DELAY_ACK(tp, tlen) \
506 ((!tcp_timer_active(tp, TT_DELACK) && \
507 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
508 (tlen <= tp->t_maxseg) && \
509 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
510
511 void inline
512 cc_ecnpkt_handler_flags(struct tcpcb *tp, uint16_t flags, uint8_t iptos)
513 {
514 INP_WLOCK_ASSERT(tptoinpcb(tp));
515
516 if (CC_ALGO(tp)->ecnpkt_handler != NULL) {
517 switch (iptos & IPTOS_ECN_MASK) {
518 case IPTOS_ECN_CE:
519 tp->t_ccv.flags |= CCF_IPHDR_CE;
520 break;
521 case IPTOS_ECN_ECT0:
522 /* FALLTHROUGH */
523 case IPTOS_ECN_ECT1:
524 /* FALLTHROUGH */
525 case IPTOS_ECN_NOTECT:
526 tp->t_ccv.flags &= ~CCF_IPHDR_CE;
527 break;
528 }
529
530 if (flags & TH_CWR)
531 tp->t_ccv.flags |= CCF_TCPHDR_CWR;
532 else
533 tp->t_ccv.flags &= ~CCF_TCPHDR_CWR;
534
535 CC_ALGO(tp)->ecnpkt_handler(&tp->t_ccv);
536
537 if (tp->t_ccv.flags & CCF_ACKNOW) {
538 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
539 tp->t_flags |= TF_ACKNOW;
540 }
541 }
542 }
543
544 void inline
545 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos)
546 {
547 cc_ecnpkt_handler_flags(tp, tcp_get_flags(th), iptos);
548 }
549
550 /*
551 * TCP input handling is split into multiple parts:
552 * tcp6_input is a thin wrapper around tcp_input for the extended
553 * ip6_protox[] call format in ip6_input
554 * tcp_input handles primary segment validation, inpcb lookup and
555 * SYN processing on listen sockets
556 * tcp_do_segment processes the ACK and text of the segment for
557 * establishing, established and closing connections
558 */
559 #ifdef INET6
560 int
561 tcp6_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port)
562 {
563 struct mbuf *m;
564 struct in6_ifaddr *ia6;
565 struct ip6_hdr *ip6;
566
567 m = *mp;
568 if (m->m_len < *offp + sizeof(struct tcphdr)) {
569 m = m_pullup(m, *offp + sizeof(struct tcphdr));
570 if (m == NULL) {
571 *mp = m;
572 TCPSTAT_INC(tcps_rcvshort);
573 return (IPPROTO_DONE);
574 }
575 }
576
577 /*
578 * draft-itojun-ipv6-tcp-to-anycast
579 * better place to put this in?
580 */
581 ip6 = mtod(m, struct ip6_hdr *);
582 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
583 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
584 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
585 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
586 *mp = NULL;
587 return (IPPROTO_DONE);
588 }
589
590 *mp = m;
591 return (tcp_input_with_port(mp, offp, proto, port));
592 }
593
594 int
595 tcp6_input(struct mbuf **mp, int *offp, int proto)
596 {
597
598 return(tcp6_input_with_port(mp, offp, proto, 0));
599 }
600 #endif /* INET6 */
601
602 int
603 tcp_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port)
604 {
605 struct mbuf *m = *mp;
606 struct tcphdr *th = NULL;
607 struct ip *ip = NULL;
608 struct inpcb *inp = NULL;
609 struct tcpcb *tp = NULL;
610 struct socket *so = NULL;
611 u_char *optp = NULL;
612 int off0;
613 int optlen = 0;
614 #ifdef INET
615 int len;
616 uint8_t ipttl;
617 #endif
618 int tlen = 0, off;
619 int drop_hdrlen;
620 int thflags;
621 int rstreason = 0; /* For badport_bandlim accounting purposes */
622 int lookupflag;
623 uint8_t iptos;
624 struct m_tag *fwd_tag = NULL;
625 #ifdef INET6
626 struct ip6_hdr *ip6 = NULL;
627 int isipv6;
628 #else
629 const void *ip6 = NULL;
630 #endif /* INET6 */
631 struct tcpopt to; /* options in this segment */
632 char *s = NULL; /* address and port logging */
633
634 NET_EPOCH_ASSERT();
635
636 #ifdef INET6
637 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
638 #endif
639
640 off0 = *offp;
641 m = *mp;
642 *mp = NULL;
643 to.to_flags = 0;
644 TCPSTAT_INC(tcps_rcvtotal);
645
646 #ifdef INET6
647 if (isipv6) {
648 ip6 = mtod(m, struct ip6_hdr *);
649 th = (struct tcphdr *)((caddr_t)ip6 + off0);
650 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
651 if (port)
652 goto skip6_csum;
653 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
654 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
655 th->th_sum = m->m_pkthdr.csum_data;
656 else
657 th->th_sum = in6_cksum_pseudo(ip6, tlen,
658 IPPROTO_TCP, m->m_pkthdr.csum_data);
659 th->th_sum ^= 0xffff;
660 } else
661 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
662 if (th->th_sum) {
663 TCPSTAT_INC(tcps_rcvbadsum);
664 goto drop;
665 }
666 skip6_csum:
667 /*
668 * Be proactive about unspecified IPv6 address in source.
669 * As we use all-zero to indicate unbounded/unconnected pcb,
670 * unspecified IPv6 address can be used to confuse us.
671 *
672 * Note that packets with unspecified IPv6 destination is
673 * already dropped in ip6_input.
674 */
675 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
676 /* XXX stat */
677 goto drop;
678 }
679 iptos = IPV6_TRAFFIC_CLASS(ip6);
680 }
681 #endif
682 #if defined(INET) && defined(INET6)
683 else
684 #endif
685 #ifdef INET
686 {
687 /*
688 * Get IP and TCP header together in first mbuf.
689 * Note: IP leaves IP header in first mbuf.
690 */
691 if (off0 > sizeof (struct ip)) {
692 ip_stripoptions(m);
693 off0 = sizeof(struct ip);
694 }
695 if (m->m_len < sizeof (struct tcpiphdr)) {
696 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
697 == NULL) {
698 TCPSTAT_INC(tcps_rcvshort);
699 return (IPPROTO_DONE);
700 }
701 }
702 ip = mtod(m, struct ip *);
703 th = (struct tcphdr *)((caddr_t)ip + off0);
704 tlen = ntohs(ip->ip_len) - off0;
705
706 iptos = ip->ip_tos;
707 if (port)
708 goto skip_csum;
709 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
710 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
711 th->th_sum = m->m_pkthdr.csum_data;
712 else
713 th->th_sum = in_pseudo(ip->ip_src.s_addr,
714 ip->ip_dst.s_addr,
715 htonl(m->m_pkthdr.csum_data + tlen +
716 IPPROTO_TCP));
717 th->th_sum ^= 0xffff;
718 } else {
719 struct ipovly *ipov = (struct ipovly *)ip;
720
721 /*
722 * Checksum extended TCP header and data.
723 */
724 len = off0 + tlen;
725 ipttl = ip->ip_ttl;
726 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
727 ipov->ih_len = htons(tlen);
728 th->th_sum = in_cksum(m, len);
729 /* Reset length for SDT probes. */
730 ip->ip_len = htons(len);
731 /* Reset TOS bits */
732 ip->ip_tos = iptos;
733 /* Re-initialization for later version check */
734 ip->ip_ttl = ipttl;
735 ip->ip_v = IPVERSION;
736 ip->ip_hl = off0 >> 2;
737 }
738 skip_csum:
739 if (th->th_sum && (port == 0)) {
740 TCPSTAT_INC(tcps_rcvbadsum);
741 goto drop;
742 }
743 }
744 #endif /* INET */
745
746 /*
747 * Check that TCP offset makes sense,
748 * pull out TCP options and adjust length. XXX
749 */
750 off = th->th_off << 2;
751 if (off < sizeof (struct tcphdr) || off > tlen) {
752 TCPSTAT_INC(tcps_rcvbadoff);
753 goto drop;
754 }
755 tlen -= off; /* tlen is used instead of ti->ti_len */
756 if (off > sizeof (struct tcphdr)) {
757 #ifdef INET6
758 if (isipv6) {
759 if (m->m_len < off0 + off) {
760 m = m_pullup(m, off0 + off);
761 if (m == NULL) {
762 TCPSTAT_INC(tcps_rcvshort);
763 return (IPPROTO_DONE);
764 }
765 }
766 ip6 = mtod(m, struct ip6_hdr *);
767 th = (struct tcphdr *)((caddr_t)ip6 + off0);
768 }
769 #endif
770 #if defined(INET) && defined(INET6)
771 else
772 #endif
773 #ifdef INET
774 {
775 if (m->m_len < sizeof(struct ip) + off) {
776 if ((m = m_pullup(m, sizeof (struct ip) + off))
777 == NULL) {
778 TCPSTAT_INC(tcps_rcvshort);
779 return (IPPROTO_DONE);
780 }
781 ip = mtod(m, struct ip *);
782 th = (struct tcphdr *)((caddr_t)ip + off0);
783 }
784 }
785 #endif
786 optlen = off - sizeof (struct tcphdr);
787 optp = (u_char *)(th + 1);
788 }
789 thflags = tcp_get_flags(th);
790
791 /*
792 * Convert TCP protocol specific fields to host format.
793 */
794 tcp_fields_to_host(th);
795
796 /*
797 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
798 */
799 drop_hdrlen = off0 + off;
800
801 /*
802 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
803 */
804 if (
805 #ifdef INET6
806 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
807 #ifdef INET
808 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
809 #endif
810 #endif
811 #if defined(INET) && !defined(INET6)
812 (m->m_flags & M_IP_NEXTHOP)
813 #endif
814 )
815 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
816
817 /*
818 * For initial SYN packets we don't need write lock on matching
819 * PCB, be it a listening one or a synchronized one. The packet
820 * shall not modify its state.
821 */
822 lookupflag = INPLOOKUP_WILDCARD |
823 ((thflags & (TH_ACK|TH_SYN)) == TH_SYN ?
824 INPLOOKUP_RLOCKPCB : INPLOOKUP_WLOCKPCB);
825 findpcb:
826 #ifdef INET6
827 if (isipv6 && fwd_tag != NULL) {
828 struct sockaddr_in6 *next_hop6;
829
830 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
831 /*
832 * Transparently forwarded. Pretend to be the destination.
833 * Already got one like this?
834 */
835 inp = in6_pcblookup_mbuf(&V_tcbinfo,
836 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
837 lookupflag & ~INPLOOKUP_WILDCARD, m->m_pkthdr.rcvif, m);
838 if (!inp) {
839 /*
840 * It's new. Try to find the ambushing socket.
841 * Because we've rewritten the destination address,
842 * any hardware-generated hash is ignored.
843 */
844 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
845 th->th_sport, &next_hop6->sin6_addr,
846 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
847 th->th_dport, lookupflag, m->m_pkthdr.rcvif);
848 }
849 } else if (isipv6) {
850 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
851 th->th_sport, &ip6->ip6_dst, th->th_dport, lookupflag,
852 m->m_pkthdr.rcvif, m);
853 }
854 #endif /* INET6 */
855 #if defined(INET6) && defined(INET)
856 else
857 #endif
858 #ifdef INET
859 if (fwd_tag != NULL) {
860 struct sockaddr_in *next_hop;
861
862 next_hop = (struct sockaddr_in *)(fwd_tag+1);
863 /*
864 * Transparently forwarded. Pretend to be the destination.
865 * already got one like this?
866 */
867 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
868 ip->ip_dst, th->th_dport, lookupflag & ~INPLOOKUP_WILDCARD,
869 m->m_pkthdr.rcvif, m);
870 if (!inp) {
871 /*
872 * It's new. Try to find the ambushing socket.
873 * Because we've rewritten the destination address,
874 * any hardware-generated hash is ignored.
875 */
876 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
877 th->th_sport, next_hop->sin_addr,
878 next_hop->sin_port ? ntohs(next_hop->sin_port) :
879 th->th_dport, lookupflag, m->m_pkthdr.rcvif);
880 }
881 } else
882 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
883 th->th_sport, ip->ip_dst, th->th_dport, lookupflag,
884 m->m_pkthdr.rcvif, m);
885 #endif /* INET */
886
887 /*
888 * If the INPCB does not exist then all data in the incoming
889 * segment is discarded and an appropriate RST is sent back.
890 * XXX MRT Send RST using which routing table?
891 */
892 if (inp == NULL) {
893 if (rstreason != 0) {
894 /* We came here after second (safety) lookup. */
895 MPASS((lookupflag & INPLOOKUP_WILDCARD) == 0);
896 goto dropwithreset;
897 }
898 /*
899 * Log communication attempts to ports that are not
900 * in use.
901 */
902 if ((V_tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
903 V_tcp_log_in_vain == 2) {
904 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
905 log(LOG_INFO, "%s; %s: Connection attempt "
906 "to closed port\n", s, __func__);
907 }
908 /*
909 * When blackholing do not respond with a RST but
910 * completely ignore the segment and drop it.
911 */
912 if (((V_blackhole == 1 && (thflags & TH_SYN)) ||
913 V_blackhole == 2) && (V_blackhole_local || (
914 #ifdef INET6
915 isipv6 ? !in6_localaddr(&ip6->ip6_src) :
916 #endif
917 #ifdef INET
918 !in_localip(ip->ip_src)
919 #else
920 true
921 #endif
922 )))
923 goto dropunlock;
924
925 rstreason = BANDLIM_RST_CLOSEDPORT;
926 goto dropwithreset;
927 }
928 INP_LOCK_ASSERT(inp);
929
930 if ((inp->inp_flowtype == M_HASHTYPE_NONE) &&
931 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) &&
932 !SOLISTENING(inp->inp_socket)) {
933 inp->inp_flowid = m->m_pkthdr.flowid;
934 inp->inp_flowtype = M_HASHTYPE_GET(m);
935 }
936 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
937 #ifdef INET6
938 if (isipv6 && IPSEC_ENABLED(ipv6) &&
939 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) {
940 goto dropunlock;
941 }
942 #ifdef INET
943 else
944 #endif
945 #endif /* INET6 */
946 #ifdef INET
947 if (IPSEC_ENABLED(ipv4) &&
948 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) {
949 goto dropunlock;
950 }
951 #endif /* INET */
952 #endif /* IPSEC */
953
954 /*
955 * Check the minimum TTL for socket.
956 */
957 if (inp->inp_ip_minttl != 0) {
958 #ifdef INET6
959 if (isipv6) {
960 if (inp->inp_ip_minttl > ip6->ip6_hlim)
961 goto dropunlock;
962 } else
963 #endif
964 if (inp->inp_ip_minttl > ip->ip_ttl)
965 goto dropunlock;
966 }
967
968 tp = intotcpcb(inp);
969 switch (tp->t_state) {
970 case TCPS_TIME_WAIT:
971 /*
972 * A previous connection in TIMEWAIT state is supposed to catch
973 * stray or duplicate segments arriving late. If this segment
974 * was a legitimate new connection attempt, the old INPCB gets
975 * removed and we can try again to find a listening socket.
976 */
977 tcp_dooptions(&to, optp, optlen,
978 (thflags & TH_SYN) ? TO_SYN : 0);
979 /*
980 * tcp_twcheck unlocks the inp always, and frees the m if fails.
981 */
982 if (tcp_twcheck(inp, &to, th, m, tlen))
983 goto findpcb;
984 return (IPPROTO_DONE);
985 case TCPS_CLOSED:
986 /*
987 * The TCPCB may no longer exist if the connection is winding
988 * down or it is in the CLOSED state. Either way we drop the
989 * segment and send an appropriate response.
990 */
991 rstreason = BANDLIM_RST_CLOSEDPORT;
992 goto dropwithreset;
993 }
994
995 if ((tp->t_port != port) && (tp->t_state > TCPS_LISTEN)) {
996 rstreason = BANDLIM_RST_CLOSEDPORT;
997 goto dropwithreset;
998 }
999
1000 #ifdef TCP_OFFLOAD
1001 if (tp->t_flags & TF_TOE) {
1002 tcp_offload_input(tp, m);
1003 m = NULL; /* consumed by the TOE driver */
1004 goto dropunlock;
1005 }
1006 #endif
1007
1008 #ifdef MAC
1009 if (mac_inpcb_check_deliver(inp, m))
1010 goto dropunlock;
1011 #endif
1012 so = inp->inp_socket;
1013 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1014 /*
1015 * When the socket is accepting connections (the INPCB is in LISTEN
1016 * state) we look into the SYN cache if this is a new connection
1017 * attempt or the completion of a previous one.
1018 */
1019 KASSERT(tp->t_state == TCPS_LISTEN || !SOLISTENING(so),
1020 ("%s: so accepting but tp %p not listening", __func__, tp));
1021 if (tp->t_state == TCPS_LISTEN && SOLISTENING(so)) {
1022 struct in_conninfo inc;
1023
1024 bzero(&inc, sizeof(inc));
1025 #ifdef INET6
1026 if (isipv6) {
1027 inc.inc_flags |= INC_ISIPV6;
1028 if (inp->inp_inc.inc_flags & INC_IPV6MINMTU)
1029 inc.inc_flags |= INC_IPV6MINMTU;
1030 inc.inc6_faddr = ip6->ip6_src;
1031 inc.inc6_laddr = ip6->ip6_dst;
1032 } else
1033 #endif
1034 {
1035 inc.inc_faddr = ip->ip_src;
1036 inc.inc_laddr = ip->ip_dst;
1037 }
1038 inc.inc_fport = th->th_sport;
1039 inc.inc_lport = th->th_dport;
1040 inc.inc_fibnum = so->so_fibnum;
1041
1042 /*
1043 * Check for an existing connection attempt in syncache if
1044 * the flag is only ACK. A successful lookup creates a new
1045 * socket appended to the listen queue in SYN_RECEIVED state.
1046 */
1047 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1048 /*
1049 * Parse the TCP options here because
1050 * syncookies need access to the reflected
1051 * timestamp.
1052 */
1053 tcp_dooptions(&to, optp, optlen, 0);
1054 /*
1055 * NB: syncache_expand() doesn't unlock inp.
1056 */
1057 rstreason = syncache_expand(&inc, &to, th, &so, m, port);
1058 if (rstreason < 0) {
1059 /*
1060 * A failing TCP MD5 signature comparison
1061 * must result in the segment being dropped
1062 * and must not produce any response back
1063 * to the sender.
1064 */
1065 goto dropunlock;
1066 } else if (rstreason == 0) {
1067 /*
1068 * No syncache entry, or ACK was not for our
1069 * SYN/ACK. Do our protection against double
1070 * ACK. If peer sent us 2 ACKs, then for the
1071 * first one syncache_expand() successfully
1072 * converted syncache entry into a socket,
1073 * while we were waiting on the inpcb lock. We
1074 * don't want to sent RST for the second ACK,
1075 * so we perform second lookup without wildcard
1076 * match, hoping to find the new socket. If
1077 * the ACK is stray indeed, rstreason would
1078 * hint the above code that the lookup was a
1079 * second attempt.
1080 *
1081 * NB: syncache did its own logging
1082 * of the failure cause.
1083 */
1084 INP_WUNLOCK(inp);
1085 rstreason = BANDLIM_RST_OPENPORT;
1086 lookupflag &= ~INPLOOKUP_WILDCARD;
1087 goto findpcb;
1088 }
1089 tfo_socket_result:
1090 if (so == NULL) {
1091 /*
1092 * We completed the 3-way handshake
1093 * but could not allocate a socket
1094 * either due to memory shortage,
1095 * listen queue length limits or
1096 * global socket limits. Send RST
1097 * or wait and have the remote end
1098 * retransmit the ACK for another
1099 * try.
1100 */
1101 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1102 log(LOG_DEBUG, "%s; %s: Listen socket: "
1103 "Socket allocation failed due to "
1104 "limits or memory shortage, %s\n",
1105 s, __func__,
1106 V_tcp_sc_rst_sock_fail ?
1107 "sending RST" : "try again");
1108 if (V_tcp_sc_rst_sock_fail) {
1109 rstreason = BANDLIM_UNLIMITED;
1110 goto dropwithreset;
1111 } else
1112 goto dropunlock;
1113 }
1114 /*
1115 * Socket is created in state SYN_RECEIVED.
1116 * Unlock the listen socket, lock the newly
1117 * created socket and update the tp variable.
1118 * If we came here via jump to tfo_socket_result,
1119 * then listening socket is read-locked.
1120 */
1121 INP_UNLOCK(inp); /* listen socket */
1122 inp = sotoinpcb(so);
1123 /*
1124 * New connection inpcb is already locked by
1125 * syncache_expand().
1126 */
1127 INP_WLOCK_ASSERT(inp);
1128 tp = intotcpcb(inp);
1129 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1130 ("%s: ", __func__));
1131 /*
1132 * Process the segment and the data it
1133 * contains. tcp_do_segment() consumes
1134 * the mbuf chain and unlocks the inpcb.
1135 */
1136 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1137 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1138 iptos);
1139 return (IPPROTO_DONE);
1140 }
1141 /*
1142 * Segment flag validation for new connection attempts:
1143 *
1144 * Our (SYN|ACK) response was rejected.
1145 * Check with syncache and remove entry to prevent
1146 * retransmits.
1147 *
1148 * NB: syncache_chkrst does its own logging of failure
1149 * causes.
1150 */
1151 if (thflags & TH_RST) {
1152 syncache_chkrst(&inc, th, m, port);
1153 goto dropunlock;
1154 }
1155 /*
1156 * We can't do anything without SYN.
1157 */
1158 if ((thflags & TH_SYN) == 0) {
1159 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1160 log(LOG_DEBUG, "%s; %s: Listen socket: "
1161 "SYN is missing, segment ignored\n",
1162 s, __func__);
1163 TCPSTAT_INC(tcps_badsyn);
1164 goto dropunlock;
1165 }
1166 /*
1167 * (SYN|ACK) is bogus on a listen socket.
1168 */
1169 if (thflags & TH_ACK) {
1170 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1171 log(LOG_DEBUG, "%s; %s: Listen socket: "
1172 "SYN|ACK invalid, segment rejected\n",
1173 s, __func__);
1174 syncache_badack(&inc, port); /* XXX: Not needed! */
1175 TCPSTAT_INC(tcps_badsyn);
1176 rstreason = BANDLIM_RST_OPENPORT;
1177 goto dropwithreset;
1178 }
1179 /*
1180 * If the drop_synfin option is enabled, drop all
1181 * segments with both the SYN and FIN bits set.
1182 * This prevents e.g. nmap from identifying the
1183 * TCP/IP stack.
1184 * XXX: Poor reasoning. nmap has other methods
1185 * and is constantly refining its stack detection
1186 * strategies.
1187 * XXX: This is a violation of the TCP specification
1188 * and was used by RFC1644.
1189 */
1190 if ((thflags & TH_FIN) && V_drop_synfin) {
1191 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1192 log(LOG_DEBUG, "%s; %s: Listen socket: "
1193 "SYN|FIN segment ignored (based on "
1194 "sysctl setting)\n", s, __func__);
1195 TCPSTAT_INC(tcps_badsyn);
1196 goto dropunlock;
1197 }
1198 /*
1199 * Segment's flags are (SYN) or (SYN|FIN).
1200 *
1201 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1202 * as they do not affect the state of the TCP FSM.
1203 * The data pointed to by TH_URG and th_urp is ignored.
1204 */
1205 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1206 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1207 KASSERT(thflags & (TH_SYN),
1208 ("%s: Listen socket: TH_SYN not set", __func__));
1209 INP_RLOCK_ASSERT(inp);
1210 #ifdef INET6
1211 /*
1212 * If deprecated address is forbidden,
1213 * we do not accept SYN to deprecated interface
1214 * address to prevent any new inbound connection from
1215 * getting established.
1216 * When we do not accept SYN, we send a TCP RST,
1217 * with deprecated source address (instead of dropping
1218 * it). We compromise it as it is much better for peer
1219 * to send a RST, and RST will be the final packet
1220 * for the exchange.
1221 *
1222 * If we do not forbid deprecated addresses, we accept
1223 * the SYN packet. RFC2462 does not suggest dropping
1224 * SYN in this case.
1225 * If we decipher RFC2462 5.5.4, it says like this:
1226 * 1. use of deprecated addr with existing
1227 * communication is okay - "SHOULD continue to be
1228 * used"
1229 * 2. use of it with new communication:
1230 * (2a) "SHOULD NOT be used if alternate address
1231 * with sufficient scope is available"
1232 * (2b) nothing mentioned otherwise.
1233 * Here we fall into (2b) case as we have no choice in
1234 * our source address selection - we must obey the peer.
1235 *
1236 * The wording in RFC2462 is confusing, and there are
1237 * multiple description text for deprecated address
1238 * handling - worse, they are not exactly the same.
1239 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1240 */
1241 if (isipv6 && !V_ip6_use_deprecated) {
1242 struct in6_ifaddr *ia6;
1243
1244 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
1245 if (ia6 != NULL &&
1246 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1247 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1248 log(LOG_DEBUG, "%s; %s: Listen socket: "
1249 "Connection attempt to deprecated "
1250 "IPv6 address rejected\n",
1251 s, __func__);
1252 rstreason = BANDLIM_RST_OPENPORT;
1253 goto dropwithreset;
1254 }
1255 }
1256 #endif /* INET6 */
1257 /*
1258 * Basic sanity checks on incoming SYN requests:
1259 * Don't respond if the destination is a link layer
1260 * broadcast according to RFC1122 4.2.3.10, p. 104.
1261 * If it is from this socket it must be forged.
1262 * Don't respond if the source or destination is a
1263 * global or subnet broad- or multicast address.
1264 * Note that it is quite possible to receive unicast
1265 * link-layer packets with a broadcast IP address. Use
1266 * in_broadcast() to find them.
1267 */
1268 if (m->m_flags & (M_BCAST|M_MCAST)) {
1269 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1270 log(LOG_DEBUG, "%s; %s: Listen socket: "
1271 "Connection attempt from broad- or multicast "
1272 "link layer address ignored\n", s, __func__);
1273 goto dropunlock;
1274 }
1275 #ifdef INET6
1276 if (isipv6) {
1277 if (th->th_dport == th->th_sport &&
1278 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1279 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1280 log(LOG_DEBUG, "%s; %s: Listen socket: "
1281 "Connection attempt to/from self "
1282 "ignored\n", s, __func__);
1283 goto dropunlock;
1284 }
1285 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1286 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1287 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1288 log(LOG_DEBUG, "%s; %s: Listen socket: "
1289 "Connection attempt from/to multicast "
1290 "address ignored\n", s, __func__);
1291 goto dropunlock;
1292 }
1293 }
1294 #endif
1295 #if defined(INET) && defined(INET6)
1296 else
1297 #endif
1298 #ifdef INET
1299 {
1300 if (th->th_dport == th->th_sport &&
1301 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1302 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1303 log(LOG_DEBUG, "%s; %s: Listen socket: "
1304 "Connection attempt from/to self "
1305 "ignored\n", s, __func__);
1306 goto dropunlock;
1307 }
1308 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1309 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1310 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1311 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1312 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1313 log(LOG_DEBUG, "%s; %s: Listen socket: "
1314 "Connection attempt from/to broad- "
1315 "or multicast address ignored\n",
1316 s, __func__);
1317 goto dropunlock;
1318 }
1319 }
1320 #endif
1321 /*
1322 * SYN appears to be valid. Create compressed TCP state
1323 * for syncache.
1324 */
1325 TCP_PROBE3(debug__input, tp, th, m);
1326 tcp_dooptions(&to, optp, optlen, TO_SYN);
1327 if ((so = syncache_add(&inc, &to, th, inp, so, m, NULL, NULL,
1328 iptos, port)) != NULL)
1329 goto tfo_socket_result;
1330
1331 /*
1332 * Entry added to syncache and mbuf consumed.
1333 * Only the listen socket is unlocked by syncache_add().
1334 */
1335 return (IPPROTO_DONE);
1336 } else if (tp->t_state == TCPS_LISTEN) {
1337 /*
1338 * When a listen socket is torn down the SO_ACCEPTCONN
1339 * flag is removed first while connections are drained
1340 * from the accept queue in a unlock/lock cycle of the
1341 * ACCEPT_LOCK, opening a race condition allowing a SYN
1342 * attempt go through unhandled.
1343 */
1344 goto dropunlock;
1345 }
1346 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1347 if (tp->t_flags & TF_SIGNATURE) {
1348 tcp_dooptions(&to, optp, optlen, thflags);
1349 if ((to.to_flags & TOF_SIGNATURE) == 0) {
1350 TCPSTAT_INC(tcps_sig_err_nosigopt);
1351 goto dropunlock;
1352 }
1353 if (!TCPMD5_ENABLED() ||
1354 TCPMD5_INPUT(m, th, to.to_signature) != 0)
1355 goto dropunlock;
1356 }
1357 #endif
1358 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1359
1360 /*
1361 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1362 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1363 * the inpcb, and unlocks pcbinfo.
1364 *
1365 * XXXGL: in case of a pure SYN arriving on existing connection
1366 * TCP stacks won't need to modify the PCB, they would either drop
1367 * the segment silently, or send a challenge ACK. However, we try
1368 * to upgrade the lock, because calling convention for stacks is
1369 * write-lock on PCB. If upgrade fails, drop the SYN.
1370 */
1371 if ((lookupflag & INPLOOKUP_RLOCKPCB) && INP_TRY_UPGRADE(inp) == 0)
1372 goto dropunlock;
1373
1374 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos);
1375 return (IPPROTO_DONE);
1376
1377 dropwithreset:
1378 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1379
1380 if (inp != NULL) {
1381 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1382 INP_UNLOCK(inp);
1383 } else
1384 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1385 m = NULL; /* mbuf chain got consumed. */
1386 goto drop;
1387
1388 dropunlock:
1389 if (m != NULL)
1390 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1391
1392 if (inp != NULL)
1393 INP_UNLOCK(inp);
1394
1395 drop:
1396 if (s != NULL)
1397 free(s, M_TCPLOG);
1398 if (m != NULL)
1399 m_freem(m);
1400 return (IPPROTO_DONE);
1401 }
1402
1403 /*
1404 * Automatic sizing of receive socket buffer. Often the send
1405 * buffer size is not optimally adjusted to the actual network
1406 * conditions at hand (delay bandwidth product). Setting the
1407 * buffer size too small limits throughput on links with high
1408 * bandwidth and high delay (eg. trans-continental/oceanic links).
1409 *
1410 * On the receive side the socket buffer memory is only rarely
1411 * used to any significant extent. This allows us to be much
1412 * more aggressive in scaling the receive socket buffer. For
1413 * the case that the buffer space is actually used to a large
1414 * extent and we run out of kernel memory we can simply drop
1415 * the new segments; TCP on the sender will just retransmit it
1416 * later. Setting the buffer size too big may only consume too
1417 * much kernel memory if the application doesn't read() from
1418 * the socket or packet loss or reordering makes use of the
1419 * reassembly queue.
1420 *
1421 * The criteria to step up the receive buffer one notch are:
1422 * 1. Application has not set receive buffer size with
1423 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE.
1424 * 2. the number of bytes received during 1/2 of an sRTT
1425 * is at least 3/8 of the current socket buffer size.
1426 * 3. receive buffer size has not hit maximal automatic size;
1427 *
1428 * If all of the criteria are met we increaset the socket buffer
1429 * by a 1/2 (bounded by the max). This allows us to keep ahead
1430 * of slow-start but also makes it so our peer never gets limited
1431 * by our rwnd which we then open up causing a burst.
1432 *
1433 * This algorithm does two steps per RTT at most and only if
1434 * we receive a bulk stream w/o packet losses or reorderings.
1435 * Shrinking the buffer during idle times is not necessary as
1436 * it doesn't consume any memory when idle.
1437 *
1438 * TODO: Only step up if the application is actually serving
1439 * the buffer to better manage the socket buffer resources.
1440 */
1441 int
1442 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so,
1443 struct tcpcb *tp, int tlen)
1444 {
1445 int newsize = 0;
1446
1447 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) &&
1448 tp->t_srtt != 0 && tp->rfbuf_ts != 0 &&
1449 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) >
1450 ((tp->t_srtt >> TCP_RTT_SHIFT)/2)) {
1451 if (tp->rfbuf_cnt > ((so->so_rcv.sb_hiwat / 2)/ 4 * 3) &&
1452 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) {
1453 newsize = min((so->so_rcv.sb_hiwat + (so->so_rcv.sb_hiwat/2)), V_tcp_autorcvbuf_max);
1454 }
1455 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize);
1456
1457 /* Start over with next RTT. */
1458 tp->rfbuf_ts = 0;
1459 tp->rfbuf_cnt = 0;
1460 } else {
1461 tp->rfbuf_cnt += tlen; /* add up */
1462 }
1463 return (newsize);
1464 }
1465
1466 int
1467 tcp_input(struct mbuf **mp, int *offp, int proto)
1468 {
1469 return(tcp_input_with_port(mp, offp, proto, 0));
1470 }
1471
1472 static void
1473 tcp_handle_wakeup(struct tcpcb *tp)
1474 {
1475
1476 INP_WLOCK_ASSERT(tptoinpcb(tp));
1477
1478 if (tp->t_flags & TF_WAKESOR) {
1479 struct socket *so = tptosocket(tp);
1480
1481 tp->t_flags &= ~TF_WAKESOR;
1482 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1483 sorwakeup_locked(so);
1484 }
1485 }
1486
1487 void
1488 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1489 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos)
1490 {
1491 uint16_t thflags;
1492 int acked, ourfinisacked, needoutput = 0, sack_changed;
1493 int rstreason, todrop, win, incforsyn = 0;
1494 uint32_t tiwin;
1495 uint16_t nsegs;
1496 char *s;
1497 struct inpcb *inp = tptoinpcb(tp);
1498 struct in_conninfo *inc = &inp->inp_inc;
1499 struct mbuf *mfree;
1500 struct tcpopt to;
1501 int tfo_syn;
1502 u_int maxseg;
1503
1504 thflags = tcp_get_flags(th);
1505 tp->sackhint.last_sack_ack = 0;
1506 sack_changed = 0;
1507 nsegs = max(1, m->m_pkthdr.lro_nsegs);
1508
1509 NET_EPOCH_ASSERT();
1510 INP_WLOCK_ASSERT(inp);
1511 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1512 __func__));
1513 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1514 __func__));
1515
1516 #ifdef TCPPCAP
1517 /* Save segment, if requested. */
1518 tcp_pcap_add(th, m, &(tp->t_inpkts));
1519 #endif
1520 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
1521 tlen, NULL, true);
1522
1523 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
1524 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1525 log(LOG_DEBUG, "%s; %s: "
1526 "SYN|FIN segment ignored (based on "
1527 "sysctl setting)\n", s, __func__);
1528 free(s, M_TCPLOG);
1529 }
1530 goto drop;
1531 }
1532
1533 /*
1534 * If a segment with the ACK-bit set arrives in the SYN-SENT state
1535 * check SEQ.ACK first.
1536 */
1537 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
1538 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
1539 rstreason = BANDLIM_UNLIMITED;
1540 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
1541 goto dropwithreset;
1542 }
1543
1544 /*
1545 * Segment received on connection.
1546 * Reset idle time and keep-alive timer.
1547 * XXX: This should be done after segment
1548 * validation to ignore broken/spoofed segs.
1549 */
1550 if (tp->t_idle_reduce &&
1551 (tp->snd_max == tp->snd_una) &&
1552 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
1553 cc_after_idle(tp);
1554 tp->t_rcvtime = ticks;
1555
1556 if (thflags & TH_FIN)
1557 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
1558 /*
1559 * Scale up the window into a 32-bit value.
1560 * For the SYN_SENT state the scale is zero.
1561 */
1562 tiwin = th->th_win << tp->snd_scale;
1563 #ifdef STATS
1564 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
1565 #endif
1566
1567 /*
1568 * TCP ECN processing.
1569 */
1570 if (tcp_ecn_input_segment(tp, thflags, tlen,
1571 tcp_packets_this_ack(tp, th->th_ack),
1572 iptos))
1573 cc_cong_signal(tp, th, CC_ECN);
1574
1575 /*
1576 * Parse options on any incoming segment.
1577 */
1578 tcp_dooptions(&to, (u_char *)(th + 1),
1579 (th->th_off << 2) - sizeof(struct tcphdr),
1580 (thflags & TH_SYN) ? TO_SYN : 0);
1581
1582 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1583 if ((tp->t_flags & TF_SIGNATURE) != 0 &&
1584 (to.to_flags & TOF_SIGNATURE) == 0) {
1585 TCPSTAT_INC(tcps_sig_err_sigopt);
1586 /* XXX: should drop? */
1587 }
1588 #endif
1589 /*
1590 * If echoed timestamp is later than the current time,
1591 * fall back to non RFC1323 RTT calculation. Normalize
1592 * timestamp if syncookies were used when this connection
1593 * was established.
1594 */
1595 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1596 to.to_tsecr -= tp->ts_offset;
1597 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1598 to.to_tsecr = 0;
1599 else if (tp->t_rxtshift == 1 &&
1600 tp->t_flags & TF_PREVVALID &&
1601 tp->t_badrxtwin != 0 &&
1602 TSTMP_LT(to.to_tsecr, tp->t_badrxtwin))
1603 cc_cong_signal(tp, th, CC_RTO_ERR);
1604 }
1605 /*
1606 * Process options only when we get SYN/ACK back. The SYN case
1607 * for incoming connections is handled in tcp_syncache.
1608 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1609 * or <SYN,ACK>) segment itself is never scaled.
1610 * XXX this is traditional behavior, may need to be cleaned up.
1611 */
1612 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1613 /* Handle parallel SYN for ECN */
1614 tcp_ecn_input_parallel_syn(tp, thflags, iptos);
1615 if ((to.to_flags & TOF_SCALE) &&
1616 (tp->t_flags & TF_REQ_SCALE) &&
1617 !(tp->t_flags & TF_NOOPT)) {
1618 tp->t_flags |= TF_RCVD_SCALE;
1619 tp->snd_scale = to.to_wscale;
1620 } else
1621 tp->t_flags &= ~TF_REQ_SCALE;
1622 /*
1623 * Initial send window. It will be updated with
1624 * the next incoming segment to the scaled value.
1625 */
1626 tp->snd_wnd = th->th_win;
1627 if ((to.to_flags & TOF_TS) &&
1628 (tp->t_flags & TF_REQ_TSTMP) &&
1629 !(tp->t_flags & TF_NOOPT)) {
1630 tp->t_flags |= TF_RCVD_TSTMP;
1631 tp->ts_recent = to.to_tsval;
1632 tp->ts_recent_age = tcp_ts_getticks();
1633 } else
1634 tp->t_flags &= ~TF_REQ_TSTMP;
1635 if (to.to_flags & TOF_MSS)
1636 tcp_mss(tp, to.to_mss);
1637 if ((tp->t_flags & TF_SACK_PERMIT) &&
1638 (!(to.to_flags & TOF_SACKPERM) ||
1639 (tp->t_flags & TF_NOOPT)))
1640 tp->t_flags &= ~TF_SACK_PERMIT;
1641 if (IS_FASTOPEN(tp->t_flags)) {
1642 if ((to.to_flags & TOF_FASTOPEN) &&
1643 !(tp->t_flags & TF_NOOPT)) {
1644 uint16_t mss;
1645
1646 if (to.to_flags & TOF_MSS)
1647 mss = to.to_mss;
1648 else
1649 if ((inp->inp_vflag & INP_IPV6) != 0)
1650 mss = TCP6_MSS;
1651 else
1652 mss = TCP_MSS;
1653 tcp_fastopen_update_cache(tp, mss,
1654 to.to_tfo_len, to.to_tfo_cookie);
1655 } else
1656 tcp_fastopen_disable_path(tp);
1657 }
1658 }
1659
1660 /*
1661 * If timestamps were negotiated during SYN/ACK and a
1662 * segment without a timestamp is received, silently drop
1663 * the segment, unless it is a RST segment or missing timestamps are
1664 * tolerated.
1665 * See section 3.2 of RFC 7323.
1666 */
1667 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1668 if (((thflags & TH_RST) != 0) || V_tcp_tolerate_missing_ts) {
1669 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1670 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1671 "segment processed normally\n",
1672 s, __func__);
1673 free(s, M_TCPLOG);
1674 }
1675 } else {
1676 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1677 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1678 "segment silently dropped\n", s, __func__);
1679 free(s, M_TCPLOG);
1680 }
1681 goto drop;
1682 }
1683 }
1684 /*
1685 * If timestamps were not negotiated during SYN/ACK and a
1686 * segment with a timestamp is received, ignore the
1687 * timestamp and process the packet normally.
1688 * See section 3.2 of RFC 7323.
1689 */
1690 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1691 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1692 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1693 "segment processed normally\n", s, __func__);
1694 free(s, M_TCPLOG);
1695 }
1696 }
1697
1698 /*
1699 * Header prediction: check for the two common cases
1700 * of a uni-directional data xfer. If the packet has
1701 * no control flags, is in-sequence, the window didn't
1702 * change and we're not retransmitting, it's a
1703 * candidate. If the length is zero and the ack moved
1704 * forward, we're the sender side of the xfer. Just
1705 * free the data acked & wake any higher level process
1706 * that was blocked waiting for space. If the length
1707 * is non-zero and the ack didn't move, we're the
1708 * receiver side. If we're getting packets in-order
1709 * (the reassembly queue is empty), add the data to
1710 * the socket buffer and note that we need a delayed ack.
1711 * Make sure that the hidden state-flags are also off.
1712 * Since we check for TCPS_ESTABLISHED first, it can only
1713 * be TH_NEEDSYN.
1714 */
1715 if (tp->t_state == TCPS_ESTABLISHED &&
1716 th->th_seq == tp->rcv_nxt &&
1717 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1718 tp->snd_nxt == tp->snd_max &&
1719 tiwin && tiwin == tp->snd_wnd &&
1720 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1721 SEGQ_EMPTY(tp) &&
1722 ((to.to_flags & TOF_TS) == 0 ||
1723 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1724 /*
1725 * If last ACK falls within this segment's sequence numbers,
1726 * record the timestamp.
1727 * NOTE that the test is modified according to the latest
1728 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1729 */
1730 if ((to.to_flags & TOF_TS) != 0 &&
1731 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1732 tp->ts_recent_age = tcp_ts_getticks();
1733 tp->ts_recent = to.to_tsval;
1734 }
1735
1736 if (tlen == 0) {
1737 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1738 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1739 !IN_RECOVERY(tp->t_flags) &&
1740 (to.to_flags & TOF_SACK) == 0 &&
1741 TAILQ_EMPTY(&tp->snd_holes)) {
1742 /*
1743 * This is a pure ack for outstanding data.
1744 */
1745 TCPSTAT_INC(tcps_predack);
1746
1747 /*
1748 * "bad retransmit" recovery without timestamps.
1749 */
1750 if ((to.to_flags & TOF_TS) == 0 &&
1751 tp->t_rxtshift == 1 &&
1752 tp->t_flags & TF_PREVVALID &&
1753 tp->t_badrxtwin != 0 &&
1754 TSTMP_LT(ticks, tp->t_badrxtwin)) {
1755 cc_cong_signal(tp, th, CC_RTO_ERR);
1756 }
1757
1758 /*
1759 * Recalculate the transmit timer / rtt.
1760 *
1761 * Some boxes send broken timestamp replies
1762 * during the SYN+ACK phase, ignore
1763 * timestamps of 0 or we could calculate a
1764 * huge RTT and blow up the retransmit timer.
1765 */
1766 if ((to.to_flags & TOF_TS) != 0 &&
1767 to.to_tsecr) {
1768 uint32_t t;
1769
1770 t = tcp_ts_getticks() - to.to_tsecr;
1771 if (!tp->t_rttlow || tp->t_rttlow > t)
1772 tp->t_rttlow = t;
1773 tcp_xmit_timer(tp,
1774 TCP_TS_TO_TICKS(t) + 1);
1775 } else if (tp->t_rtttime &&
1776 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1777 if (!tp->t_rttlow ||
1778 tp->t_rttlow > ticks - tp->t_rtttime)
1779 tp->t_rttlow = ticks - tp->t_rtttime;
1780 tcp_xmit_timer(tp,
1781 ticks - tp->t_rtttime);
1782 }
1783 acked = BYTES_THIS_ACK(tp, th);
1784
1785 #ifdef TCP_HHOOK
1786 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1787 hhook_run_tcp_est_in(tp, th, &to);
1788 #endif
1789
1790 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
1791 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1792 sbdrop(&so->so_snd, acked);
1793 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1794 SEQ_LEQ(th->th_ack, tp->snd_recover))
1795 tp->snd_recover = th->th_ack - 1;
1796
1797 /*
1798 * Let the congestion control algorithm update
1799 * congestion control related information. This
1800 * typically means increasing the congestion
1801 * window.
1802 */
1803 cc_ack_received(tp, th, nsegs, CC_ACK);
1804
1805 tp->snd_una = th->th_ack;
1806 /*
1807 * Pull snd_wl2 up to prevent seq wrap relative
1808 * to th_ack.
1809 */
1810 tp->snd_wl2 = th->th_ack;
1811 tp->t_dupacks = 0;
1812 m_freem(m);
1813
1814 /*
1815 * If all outstanding data are acked, stop
1816 * retransmit timer, otherwise restart timer
1817 * using current (possibly backed-off) value.
1818 * If process is waiting for space,
1819 * wakeup/selwakeup/signal. If data
1820 * are ready to send, let tcp_output
1821 * decide between more output or persist.
1822 */
1823 TCP_PROBE3(debug__input, tp, th, m);
1824 /*
1825 * Clear t_acktime if remote side has ACKd
1826 * all data in the socket buffer.
1827 * Otherwise, update t_acktime if we received
1828 * a sufficiently large ACK.
1829 */
1830 if (sbavail(&so->so_snd) == 0)
1831 tp->t_acktime = 0;
1832 else if (acked > 1)
1833 tp->t_acktime = ticks;
1834 if (tp->snd_una == tp->snd_max)
1835 tcp_timer_activate(tp, TT_REXMT, 0);
1836 else if (!tcp_timer_active(tp, TT_PERSIST))
1837 tcp_timer_activate(tp, TT_REXMT,
1838 TP_RXTCUR(tp));
1839 sowwakeup(so);
1840 if (sbavail(&so->so_snd))
1841 (void) tcp_output(tp);
1842 goto check_delack;
1843 }
1844 } else if (th->th_ack == tp->snd_una &&
1845 tlen <= sbspace(&so->so_rcv)) {
1846 int newsize = 0; /* automatic sockbuf scaling */
1847
1848 /*
1849 * This is a pure, in-sequence data packet with
1850 * nothing on the reassembly queue and we have enough
1851 * buffer space to take it.
1852 */
1853 /* Clean receiver SACK report if present */
1854 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1855 tcp_clean_sackreport(tp);
1856 TCPSTAT_INC(tcps_preddat);
1857 tp->rcv_nxt += tlen;
1858 if (tlen &&
1859 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
1860 (tp->t_fbyte_in == 0)) {
1861 tp->t_fbyte_in = ticks;
1862 if (tp->t_fbyte_in == 0)
1863 tp->t_fbyte_in = 1;
1864 if (tp->t_fbyte_out && tp->t_fbyte_in)
1865 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
1866 }
1867 /*
1868 * Pull snd_wl1 up to prevent seq wrap relative to
1869 * th_seq.
1870 */
1871 tp->snd_wl1 = th->th_seq;
1872 /*
1873 * Pull rcv_up up to prevent seq wrap relative to
1874 * rcv_nxt.
1875 */
1876 tp->rcv_up = tp->rcv_nxt;
1877 TCPSTAT_ADD(tcps_rcvpack, nsegs);
1878 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1879 TCP_PROBE3(debug__input, tp, th, m);
1880
1881 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
1882
1883 /* Add data to socket buffer. */
1884 SOCKBUF_LOCK(&so->so_rcv);
1885 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1886 m_freem(m);
1887 } else {
1888 /*
1889 * Set new socket buffer size.
1890 * Give up when limit is reached.
1891 */
1892 if (newsize)
1893 if (!sbreserve_locked(so, SO_RCV,
1894 newsize, NULL))
1895 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1896 m_adj(m, drop_hdrlen); /* delayed header drop */
1897 sbappendstream_locked(&so->so_rcv, m, 0);
1898 }
1899 /* NB: sorwakeup_locked() does an implicit unlock. */
1900 sorwakeup_locked(so);
1901 if (DELAY_ACK(tp, tlen)) {
1902 tp->t_flags |= TF_DELACK;
1903 } else {
1904 tp->t_flags |= TF_ACKNOW;
1905 tcp_output(tp);
1906 }
1907 goto check_delack;
1908 }
1909 }
1910
1911 /*
1912 * Calculate amount of space in receive window,
1913 * and then do TCP input processing.
1914 * Receive window is amount of space in rcv queue,
1915 * but not less than advertised window.
1916 */
1917 win = sbspace(&so->so_rcv);
1918 if (win < 0)
1919 win = 0;
1920 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1921
1922 switch (tp->t_state) {
1923 /*
1924 * If the state is SYN_RECEIVED:
1925 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1926 */
1927 case TCPS_SYN_RECEIVED:
1928 if ((thflags & TH_ACK) &&
1929 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1930 SEQ_GT(th->th_ack, tp->snd_max))) {
1931 rstreason = BANDLIM_RST_OPENPORT;
1932 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
1933 goto dropwithreset;
1934 }
1935 if (IS_FASTOPEN(tp->t_flags)) {
1936 /*
1937 * When a TFO connection is in SYN_RECEIVED, the
1938 * only valid packets are the initial SYN, a
1939 * retransmit/copy of the initial SYN (possibly with
1940 * a subset of the original data), a valid ACK, a
1941 * FIN, or a RST.
1942 */
1943 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
1944 rstreason = BANDLIM_RST_OPENPORT;
1945 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
1946 goto dropwithreset;
1947 } else if (thflags & TH_SYN) {
1948 /* non-initial SYN is ignored */
1949 if ((tcp_timer_active(tp, TT_DELACK) ||
1950 tcp_timer_active(tp, TT_REXMT)))
1951 goto drop;
1952 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) {
1953 goto drop;
1954 }
1955 }
1956 break;
1957
1958 /*
1959 * If the state is SYN_SENT:
1960 * if seg contains a RST with valid ACK (SEQ.ACK has already
1961 * been verified), then drop the connection.
1962 * if seg contains a RST without an ACK, drop the seg.
1963 * if seg does not contain SYN, then drop the seg.
1964 * Otherwise this is an acceptable SYN segment
1965 * initialize tp->rcv_nxt and tp->irs
1966 * if seg contains ack then advance tp->snd_una
1967 * if seg contains an ECE and ECN support is enabled, the stream
1968 * is ECN capable.
1969 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1970 * arrange for segment to be acked (eventually)
1971 * continue processing rest of data/controls, beginning with URG
1972 */
1973 case TCPS_SYN_SENT:
1974 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
1975 TCP_PROBE5(connect__refused, NULL, tp,
1976 m, tp, th);
1977 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
1978 tp = tcp_drop(tp, ECONNREFUSED);
1979 }
1980 if (thflags & TH_RST)
1981 goto drop;
1982 if (!(thflags & TH_SYN))
1983 goto drop;
1984
1985 tp->irs = th->th_seq;
1986 tcp_rcvseqinit(tp);
1987 if (thflags & TH_ACK) {
1988 int tfo_partial_ack = 0;
1989
1990 TCPSTAT_INC(tcps_connects);
1991 soisconnected(so);
1992 #ifdef MAC
1993 mac_socketpeer_set_from_mbuf(m, so);
1994 #endif
1995 /* Do window scaling on this connection? */
1996 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1997 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1998 tp->rcv_scale = tp->request_r_scale;
1999 }
2000 tp->rcv_adv += min(tp->rcv_wnd,
2001 TCP_MAXWIN << tp->rcv_scale);
2002 tp->snd_una++; /* SYN is acked */
2003 /*
2004 * If not all the data that was sent in the TFO SYN
2005 * has been acked, resend the remainder right away.
2006 */
2007 if (IS_FASTOPEN(tp->t_flags) &&
2008 (tp->snd_una != tp->snd_max)) {
2009 tp->snd_nxt = th->th_ack;
2010 tfo_partial_ack = 1;
2011 }
2012 /*
2013 * If there's data, delay ACK; if there's also a FIN
2014 * ACKNOW will be turned on later.
2015 */
2016 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial_ack)
2017 tcp_timer_activate(tp, TT_DELACK,
2018 tcp_delacktime);
2019 else
2020 tp->t_flags |= TF_ACKNOW;
2021
2022 tcp_ecn_input_syn_sent(tp, thflags, iptos);
2023
2024 /*
2025 * Received <SYN,ACK> in SYN_SENT[*] state.
2026 * Transitions:
2027 * SYN_SENT --> ESTABLISHED
2028 * SYN_SENT* --> FIN_WAIT_1
2029 */
2030 tp->t_starttime = ticks;
2031 if (tp->t_flags & TF_NEEDFIN) {
2032 tp->t_acktime = ticks;
2033 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2034 tp->t_flags &= ~TF_NEEDFIN;
2035 thflags &= ~TH_SYN;
2036 } else {
2037 tcp_state_change(tp, TCPS_ESTABLISHED);
2038 TCP_PROBE5(connect__established, NULL, tp,
2039 m, tp, th);
2040 cc_conn_init(tp);
2041 tcp_timer_activate(tp, TT_KEEP,
2042 TP_KEEPIDLE(tp));
2043 }
2044 } else {
2045 /*
2046 * Received initial SYN in SYN-SENT[*] state =>
2047 * simultaneous open.
2048 * If it succeeds, connection is * half-synchronized.
2049 * Otherwise, do 3-way handshake:
2050 * SYN-SENT -> SYN-RECEIVED
2051 * SYN-SENT* -> SYN-RECEIVED*
2052 */
2053 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN);
2054 tcp_timer_activate(tp, TT_REXMT, 0);
2055 tcp_state_change(tp, TCPS_SYN_RECEIVED);
2056 }
2057
2058 /*
2059 * Advance th->th_seq to correspond to first data byte.
2060 * If data, trim to stay within window,
2061 * dropping FIN if necessary.
2062 */
2063 th->th_seq++;
2064 if (tlen > tp->rcv_wnd) {
2065 todrop = tlen - tp->rcv_wnd;
2066 m_adj(m, -todrop);
2067 tlen = tp->rcv_wnd;
2068 thflags &= ~TH_FIN;
2069 TCPSTAT_INC(tcps_rcvpackafterwin);
2070 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2071 }
2072 tp->snd_wl1 = th->th_seq - 1;
2073 tp->rcv_up = th->th_seq;
2074 /*
2075 * Client side of transaction: already sent SYN and data.
2076 * If the remote host used T/TCP to validate the SYN,
2077 * our data will be ACK'd; if so, enter normal data segment
2078 * processing in the middle of step 5, ack processing.
2079 * Otherwise, goto step 6.
2080 */
2081 if (thflags & TH_ACK)
2082 goto process_ACK;
2083
2084 goto step6;
2085 }
2086
2087 /*
2088 * States other than LISTEN or SYN_SENT.
2089 * First check the RST flag and sequence number since reset segments
2090 * are exempt from the timestamp and connection count tests. This
2091 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2092 * below which allowed reset segments in half the sequence space
2093 * to fall though and be processed (which gives forged reset
2094 * segments with a random sequence number a 50 percent chance of
2095 * killing a connection).
2096 * Then check timestamp, if present.
2097 * Then check the connection count, if present.
2098 * Then check that at least some bytes of segment are within
2099 * receive window. If segment begins before rcv_nxt,
2100 * drop leading data (and SYN); if nothing left, just ack.
2101 */
2102 if (thflags & TH_RST) {
2103 /*
2104 * RFC5961 Section 3.2
2105 *
2106 * - RST drops connection only if SEG.SEQ == RCV.NXT.
2107 * - If RST is in window, we send challenge ACK.
2108 *
2109 * Note: to take into account delayed ACKs, we should
2110 * test against last_ack_sent instead of rcv_nxt.
2111 * Note 2: we handle special case of closed window, not
2112 * covered by the RFC.
2113 */
2114 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2115 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
2116 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
2117 KASSERT(tp->t_state != TCPS_SYN_SENT,
2118 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
2119 __func__, th, tp));
2120
2121 if (V_tcp_insecure_rst ||
2122 tp->last_ack_sent == th->th_seq) {
2123 TCPSTAT_INC(tcps_drops);
2124 /* Drop the connection. */
2125 switch (tp->t_state) {
2126 case TCPS_SYN_RECEIVED:
2127 so->so_error = ECONNREFUSED;
2128 goto close;
2129 case TCPS_ESTABLISHED:
2130 case TCPS_FIN_WAIT_1:
2131 case TCPS_FIN_WAIT_2:
2132 case TCPS_CLOSE_WAIT:
2133 case TCPS_CLOSING:
2134 case TCPS_LAST_ACK:
2135 so->so_error = ECONNRESET;
2136 close:
2137 /* FALLTHROUGH */
2138 default:
2139 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_RST);
2140 tp = tcp_close(tp);
2141 }
2142 } else {
2143 TCPSTAT_INC(tcps_badrst);
2144 /* Send challenge ACK. */
2145 tcp_respond(tp, mtod(m, void *), th, m,
2146 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
2147 tp->last_ack_sent = tp->rcv_nxt;
2148 m = NULL;
2149 }
2150 }
2151 goto drop;
2152 }
2153
2154 /*
2155 * RFC5961 Section 4.2
2156 * Send challenge ACK for any SYN in synchronized state.
2157 */
2158 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT &&
2159 tp->t_state != TCPS_SYN_RECEIVED) {
2160 TCPSTAT_INC(tcps_badsyn);
2161 if (V_tcp_insecure_syn &&
2162 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2163 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2164 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
2165 tp = tcp_drop(tp, ECONNRESET);
2166 rstreason = BANDLIM_UNLIMITED;
2167 } else {
2168 tcp_ecn_input_syn_sent(tp, thflags, iptos);
2169 /* Send challenge ACK. */
2170 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
2171 tp->snd_nxt, TH_ACK);
2172 tp->last_ack_sent = tp->rcv_nxt;
2173 m = NULL;
2174 }
2175 goto drop;
2176 }
2177
2178 /*
2179 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2180 * and it's less than ts_recent, drop it.
2181 */
2182 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2183 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2184 /* Check to see if ts_recent is over 24 days old. */
2185 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2186 /*
2187 * Invalidate ts_recent. If this segment updates
2188 * ts_recent, the age will be reset later and ts_recent
2189 * will get a valid value. If it does not, setting
2190 * ts_recent to zero will at least satisfy the
2191 * requirement that zero be placed in the timestamp
2192 * echo reply when ts_recent isn't valid. The
2193 * age isn't reset until we get a valid ts_recent
2194 * because we don't want out-of-order segments to be
2195 * dropped when ts_recent is old.
2196 */
2197 tp->ts_recent = 0;
2198 } else {
2199 TCPSTAT_INC(tcps_rcvduppack);
2200 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2201 TCPSTAT_INC(tcps_pawsdrop);
2202 if (tlen)
2203 goto dropafterack;
2204 goto drop;
2205 }
2206 }
2207
2208 /*
2209 * In the SYN-RECEIVED state, validate that the packet belongs to
2210 * this connection before trimming the data to fit the receive
2211 * window. Check the sequence number versus IRS since we know
2212 * the sequence numbers haven't wrapped. This is a partial fix
2213 * for the "LAND" DoS attack.
2214 */
2215 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2216 rstreason = BANDLIM_RST_OPENPORT;
2217 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
2218 goto dropwithreset;
2219 }
2220
2221 todrop = tp->rcv_nxt - th->th_seq;
2222 if (todrop > 0) {
2223 if (thflags & TH_SYN) {
2224 thflags &= ~TH_SYN;
2225 th->th_seq++;
2226 if (th->th_urp > 1)
2227 th->th_urp--;
2228 else
2229 thflags &= ~TH_URG;
2230 todrop--;
2231 }
2232 /*
2233 * Following if statement from Stevens, vol. 2, p. 960.
2234 */
2235 if (todrop > tlen
2236 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2237 /*
2238 * Any valid FIN must be to the left of the window.
2239 * At this point the FIN must be a duplicate or out
2240 * of sequence; drop it.
2241 */
2242 thflags &= ~TH_FIN;
2243
2244 /*
2245 * Send an ACK to resynchronize and drop any data.
2246 * But keep on processing for RST or ACK.
2247 */
2248 tp->t_flags |= TF_ACKNOW;
2249 todrop = tlen;
2250 TCPSTAT_INC(tcps_rcvduppack);
2251 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2252 } else {
2253 TCPSTAT_INC(tcps_rcvpartduppack);
2254 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2255 }
2256 /*
2257 * DSACK - add SACK block for dropped range
2258 */
2259 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) {
2260 tcp_update_sack_list(tp, th->th_seq,
2261 th->th_seq + todrop);
2262 /*
2263 * ACK now, as the next in-sequence segment
2264 * will clear the DSACK block again
2265 */
2266 tp->t_flags |= TF_ACKNOW;
2267 }
2268 drop_hdrlen += todrop; /* drop from the top afterwards */
2269 th->th_seq += todrop;
2270 tlen -= todrop;
2271 if (th->th_urp > todrop)
2272 th->th_urp -= todrop;
2273 else {
2274 thflags &= ~TH_URG;
2275 th->th_urp = 0;
2276 }
2277 }
2278
2279 /*
2280 * If new data are received on a connection after the
2281 * user processes are gone, then RST the other end.
2282 */
2283 if ((tp->t_flags & TF_CLOSED) && tlen) {
2284 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2285 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2286 "after socket was closed, "
2287 "sending RST and removing tcpcb\n",
2288 s, __func__, tcpstates[tp->t_state], tlen);
2289 free(s, M_TCPLOG);
2290 }
2291 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
2292 /* tcp_close will kill the inp pre-log the Reset */
2293 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
2294 tp = tcp_close(tp);
2295 TCPSTAT_INC(tcps_rcvafterclose);
2296 rstreason = BANDLIM_UNLIMITED;
2297 goto dropwithreset;
2298 }
2299
2300 /*
2301 * If segment ends after window, drop trailing data
2302 * (and PUSH and FIN); if nothing left, just ACK.
2303 */
2304 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2305 if (todrop > 0) {
2306 TCPSTAT_INC(tcps_rcvpackafterwin);
2307 if (todrop >= tlen) {
2308 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2309 /*
2310 * If window is closed can only take segments at
2311 * window edge, and have to drop data and PUSH from
2312 * incoming segments. Continue processing, but
2313 * remember to ack. Otherwise, drop segment
2314 * and ack.
2315 */
2316 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2317 tp->t_flags |= TF_ACKNOW;
2318 TCPSTAT_INC(tcps_rcvwinprobe);
2319 } else
2320 goto dropafterack;
2321 } else
2322 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2323 m_adj(m, -todrop);
2324 tlen -= todrop;
2325 thflags &= ~(TH_PUSH|TH_FIN);
2326 }
2327
2328 /*
2329 * If last ACK falls within this segment's sequence numbers,
2330 * record its timestamp.
2331 * NOTE:
2332 * 1) That the test incorporates suggestions from the latest
2333 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2334 * 2) That updating only on newer timestamps interferes with
2335 * our earlier PAWS tests, so this check should be solely
2336 * predicated on the sequence space of this segment.
2337 * 3) That we modify the segment boundary check to be
2338 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2339 * instead of RFC1323's
2340 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2341 * This modified check allows us to overcome RFC1323's
2342 * limitations as described in Stevens TCP/IP Illustrated
2343 * Vol. 2 p.869. In such cases, we can still calculate the
2344 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2345 */
2346 if ((to.to_flags & TOF_TS) != 0 &&
2347 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2348 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2349 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2350 tp->ts_recent_age = tcp_ts_getticks();
2351 tp->ts_recent = to.to_tsval;
2352 }
2353
2354 /*
2355 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2356 * flag is on (half-synchronized state), then queue data for
2357 * later processing; else drop segment and return.
2358 */
2359 if ((thflags & TH_ACK) == 0) {
2360 if (tp->t_state == TCPS_SYN_RECEIVED ||
2361 (tp->t_flags & TF_NEEDSYN)) {
2362 if (tp->t_state == TCPS_SYN_RECEIVED &&
2363 IS_FASTOPEN(tp->t_flags)) {
2364 tp->snd_wnd = tiwin;
2365 cc_conn_init(tp);
2366 }
2367 goto step6;
2368 } else if (tp->t_flags & TF_ACKNOW)
2369 goto dropafterack;
2370 else
2371 goto drop;
2372 }
2373
2374 /*
2375 * Ack processing.
2376 */
2377 switch (tp->t_state) {
2378 /*
2379 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2380 * ESTABLISHED state and continue processing.
2381 * The ACK was checked above.
2382 */
2383 case TCPS_SYN_RECEIVED:
2384
2385 TCPSTAT_INC(tcps_connects);
2386 if (tp->t_flags & TF_SONOTCONN) {
2387 /*
2388 * Usually SYN_RECEIVED had been created from a LISTEN,
2389 * and solisten_enqueue() has already marked the socket
2390 * layer as connected. If it didn't, which can happen
2391 * only with an accept_filter(9), then the tp is marked
2392 * with TF_SONOTCONN. The other reason for this mark
2393 * to be set is a simultaneous open, a SYN_RECEIVED
2394 * that had been created from SYN_SENT.
2395 */
2396 tp->t_flags &= ~TF_SONOTCONN;
2397 soisconnected(so);
2398 }
2399 /* Do window scaling? */
2400 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2401 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2402 tp->rcv_scale = tp->request_r_scale;
2403 }
2404 tp->snd_wnd = tiwin;
2405 /*
2406 * Make transitions:
2407 * SYN-RECEIVED -> ESTABLISHED
2408 * SYN-RECEIVED* -> FIN-WAIT-1
2409 */
2410 tp->t_starttime = ticks;
2411 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
2412 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2413 tp->t_tfo_pending = NULL;
2414 }
2415 if (tp->t_flags & TF_NEEDFIN) {
2416 tp->t_acktime = ticks;
2417 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2418 tp->t_flags &= ~TF_NEEDFIN;
2419 } else {
2420 tcp_state_change(tp, TCPS_ESTABLISHED);
2421 TCP_PROBE5(accept__established, NULL, tp,
2422 m, tp, th);
2423 /*
2424 * TFO connections call cc_conn_init() during SYN
2425 * processing. Calling it again here for such
2426 * connections is not harmless as it would undo the
2427 * snd_cwnd reduction that occurs when a TFO SYN|ACK
2428 * is retransmitted.
2429 */
2430 if (!IS_FASTOPEN(tp->t_flags))
2431 cc_conn_init(tp);
2432 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2433 }
2434 /*
2435 * Account for the ACK of our SYN prior to
2436 * regular ACK processing below, except for
2437 * simultaneous SYN, which is handled later.
2438 */
2439 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
2440 incforsyn = 1;
2441 /*
2442 * If segment contains data or ACK, will call tcp_reass()
2443 * later; if not, do so now to pass queued data to user.
2444 */
2445 if (tlen == 0 && (thflags & TH_FIN) == 0) {
2446 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
2447 (struct mbuf *)0);
2448 tcp_handle_wakeup(tp);
2449 }
2450 tp->snd_wl1 = th->th_seq - 1;
2451 /* FALLTHROUGH */
2452
2453 /*
2454 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2455 * ACKs. If the ack is in the range
2456 * tp->snd_una < th->th_ack <= tp->snd_max
2457 * then advance tp->snd_una to th->th_ack and drop
2458 * data from the retransmission queue. If this ACK reflects
2459 * more up to date window information we update our window information.
2460 */
2461 case TCPS_ESTABLISHED:
2462 case TCPS_FIN_WAIT_1:
2463 case TCPS_FIN_WAIT_2:
2464 case TCPS_CLOSE_WAIT:
2465 case TCPS_CLOSING:
2466 case TCPS_LAST_ACK:
2467 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2468 TCPSTAT_INC(tcps_rcvacktoomuch);
2469 goto dropafterack;
2470 }
2471 if (tcp_is_sack_recovery(tp, &to)) {
2472 if (((sack_changed = tcp_sack_doack(tp, &to, th->th_ack)) != 0) &&
2473 (tp->t_flags & TF_LRD)) {
2474 tcp_sack_lost_retransmission(tp, th);
2475 }
2476 } else
2477 /*
2478 * Reset the value so that previous (valid) value
2479 * from the last ack with SACK doesn't get used.
2480 */
2481 tp->sackhint.sacked_bytes = 0;
2482
2483 #ifdef TCP_HHOOK
2484 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2485 hhook_run_tcp_est_in(tp, th, &to);
2486 #endif
2487
2488 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2489 maxseg = tcp_maxseg(tp);
2490 if (tlen == 0 &&
2491 (tiwin == tp->snd_wnd ||
2492 (tp->t_flags & TF_SACK_PERMIT))) {
2493 /*
2494 * If this is the first time we've seen a
2495 * FIN from the remote, this is not a
2496 * duplicate and it needs to be processed
2497 * normally. This happens during a
2498 * simultaneous close.
2499 */
2500 if ((thflags & TH_FIN) &&
2501 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2502 tp->t_dupacks = 0;
2503 break;
2504 }
2505 TCPSTAT_INC(tcps_rcvdupack);
2506 /*
2507 * If we have outstanding data (other than
2508 * a window probe), this is a completely
2509 * duplicate ack (ie, window info didn't
2510 * change and FIN isn't set),
2511 * the ack is the biggest we've
2512 * seen and we've seen exactly our rexmt
2513 * threshold of them, assume a packet
2514 * has been dropped and retransmit it.
2515 * Kludge snd_nxt & the congestion
2516 * window so we send only this one
2517 * packet.
2518 *
2519 * We know we're losing at the current
2520 * window size so do congestion avoidance
2521 * (set ssthresh to half the current window
2522 * and pull our congestion window back to
2523 * the new ssthresh).
2524 *
2525 * Dup acks mean that packets have left the
2526 * network (they're now cached at the receiver)
2527 * so bump cwnd by the amount in the receiver
2528 * to keep a constant cwnd packets in the
2529 * network.
2530 *
2531 * When using TCP ECN, notify the peer that
2532 * we reduced the cwnd.
2533 */
2534 /*
2535 * Following 2 kinds of acks should not affect
2536 * dupack counting:
2537 * 1) Old acks
2538 * 2) Acks with SACK but without any new SACK
2539 * information in them. These could result from
2540 * any anomaly in the network like a switch
2541 * duplicating packets or a possible DoS attack.
2542 */
2543 if (th->th_ack != tp->snd_una ||
2544 (tcp_is_sack_recovery(tp, &to) &&
2545 !sack_changed))
2546 break;
2547 else if (!tcp_timer_active(tp, TT_REXMT))
2548 tp->t_dupacks = 0;
2549 else if (++tp->t_dupacks > tcprexmtthresh ||
2550 IN_FASTRECOVERY(tp->t_flags)) {
2551 cc_ack_received(tp, th, nsegs,
2552 CC_DUPACK);
2553 if (V_tcp_do_prr &&
2554 IN_FASTRECOVERY(tp->t_flags)) {
2555 tcp_do_prr_ack(tp, th, &to);
2556 } else if (tcp_is_sack_recovery(tp, &to) &&
2557 IN_FASTRECOVERY(tp->t_flags)) {
2558 int awnd;
2559
2560 /*
2561 * Compute the amount of data in flight first.
2562 * We can inject new data into the pipe iff
2563 * we have less than 1/2 the original window's
2564 * worth of data in flight.
2565 */
2566 if (V_tcp_do_newsack)
2567 awnd = tcp_compute_pipe(tp);
2568 else
2569 awnd = (tp->snd_nxt - tp->snd_fack) +
2570 tp->sackhint.sack_bytes_rexmit;
2571
2572 if (awnd < tp->snd_ssthresh) {
2573 tp->snd_cwnd += maxseg;
2574 if (tp->snd_cwnd > tp->snd_ssthresh)
2575 tp->snd_cwnd = tp->snd_ssthresh;
2576 }
2577 } else
2578 tp->snd_cwnd += maxseg;
2579 (void) tcp_output(tp);
2580 goto drop;
2581 } else if (tp->t_dupacks == tcprexmtthresh ||
2582 (tp->t_flags & TF_SACK_PERMIT &&
2583 V_tcp_do_newsack &&
2584 tp->sackhint.sacked_bytes >
2585 (tcprexmtthresh - 1) * maxseg)) {
2586 enter_recovery:
2587 /*
2588 * Above is the RFC6675 trigger condition of
2589 * more than (dupthresh-1)*maxseg sacked data.
2590 * If the count of holes in the
2591 * scoreboard is >= dupthresh, we could
2592 * also enter loss recovery, but don't
2593 * have that value readily available.
2594 */
2595 tp->t_dupacks = tcprexmtthresh;
2596 tcp_seq onxt = tp->snd_nxt;
2597
2598 /*
2599 * If we're doing sack, or prr, check
2600 * to see if we're already in sack
2601 * recovery. If we're not doing sack,
2602 * check to see if we're in newreno
2603 * recovery.
2604 */
2605 if (V_tcp_do_prr ||
2606 (tp->t_flags & TF_SACK_PERMIT)) {
2607 if (IN_FASTRECOVERY(tp->t_flags)) {
2608 tp->t_dupacks = 0;
2609 break;
2610 }
2611 } else {
2612 if (SEQ_LEQ(th->th_ack,
2613 tp->snd_recover)) {
2614 tp->t_dupacks = 0;
2615 break;
2616 }
2617 }
2618 /* Congestion signal before ack. */
2619 cc_cong_signal(tp, th, CC_NDUPACK);
2620 cc_ack_received(tp, th, nsegs,
2621 CC_DUPACK);
2622 tcp_timer_activate(tp, TT_REXMT, 0);
2623 tp->t_rtttime = 0;
2624 if (V_tcp_do_prr) {
2625 /*
2626 * snd_ssthresh is already updated by
2627 * cc_cong_signal.
2628 */
2629 if (tcp_is_sack_recovery(tp, &to)) {
2630 tp->sackhint.prr_delivered =
2631 tp->sackhint.sacked_bytes;
2632 } else {
2633 tp->sackhint.prr_delivered =
2634 imin(tp->snd_max - tp->snd_una,
2635 imin(INT_MAX / 65536,
2636 tp->t_dupacks) * maxseg);
2637 }
2638 tp->sackhint.recover_fs = max(1,
2639 tp->snd_nxt - tp->snd_una);
2640 }
2641 if (tcp_is_sack_recovery(tp, &to)) {
2642 TCPSTAT_INC(
2643 tcps_sack_recovery_episode);
2644 tp->snd_recover = tp->snd_nxt;
2645 tp->snd_cwnd = maxseg;
2646 (void) tcp_output(tp);
2647 if (SEQ_GT(th->th_ack, tp->snd_una))
2648 goto resume_partialack;
2649 goto drop;
2650 }
2651 tp->snd_nxt = th->th_ack;
2652 tp->snd_cwnd = maxseg;
2653 (void) tcp_output(tp);
2654 KASSERT(tp->snd_limited <= 2,
2655 ("%s: tp->snd_limited too big",
2656 __func__));
2657 tp->snd_cwnd = tp->snd_ssthresh +
2658 maxseg *
2659 (tp->t_dupacks - tp->snd_limited);
2660 if (SEQ_GT(onxt, tp->snd_nxt))
2661 tp->snd_nxt = onxt;
2662 goto drop;
2663 } else if (V_tcp_do_rfc3042) {
2664 /*
2665 * Process first and second duplicate
2666 * ACKs. Each indicates a segment
2667 * leaving the network, creating room
2668 * for more. Make sure we can send a
2669 * packet on reception of each duplicate
2670 * ACK by increasing snd_cwnd by one
2671 * segment. Restore the original
2672 * snd_cwnd after packet transmission.
2673 */
2674 cc_ack_received(tp, th, nsegs,
2675 CC_DUPACK);
2676 uint32_t oldcwnd = tp->snd_cwnd;
2677 tcp_seq oldsndmax = tp->snd_max;
2678 u_int sent;
2679 int avail;
2680
2681 KASSERT(tp->t_dupacks == 1 ||
2682 tp->t_dupacks == 2,
2683 ("%s: dupacks not 1 or 2",
2684 __func__));
2685 if (tp->t_dupacks == 1)
2686 tp->snd_limited = 0;
2687 tp->snd_cwnd =
2688 (tp->snd_nxt - tp->snd_una) +
2689 (tp->t_dupacks - tp->snd_limited) *
2690 maxseg;
2691 /*
2692 * Only call tcp_output when there
2693 * is new data available to be sent
2694 * or we need to send an ACK.
2695 */
2696 SOCKBUF_LOCK(&so->so_snd);
2697 avail = sbavail(&so->so_snd) -
2698 (tp->snd_nxt - tp->snd_una);
2699 SOCKBUF_UNLOCK(&so->so_snd);
2700 if (avail > 0 || tp->t_flags & TF_ACKNOW)
2701 (void) tcp_output(tp);
2702 sent = tp->snd_max - oldsndmax;
2703 if (sent > maxseg) {
2704 KASSERT((tp->t_dupacks == 2 &&
2705 tp->snd_limited == 0) ||
2706 (sent == maxseg + 1 &&
2707 tp->t_flags & TF_SENTFIN),
2708 ("%s: sent too much",
2709 __func__));
2710 tp->snd_limited = 2;
2711 } else if (sent > 0)
2712 ++tp->snd_limited;
2713 tp->snd_cwnd = oldcwnd;
2714 goto drop;
2715 }
2716 }
2717 break;
2718 } else {
2719 /*
2720 * This ack is advancing the left edge, reset the
2721 * counter.
2722 */
2723 tp->t_dupacks = 0;
2724 /*
2725 * If this ack also has new SACK info, increment the
2726 * counter as per rfc6675. The variable
2727 * sack_changed tracks all changes to the SACK
2728 * scoreboard, including when partial ACKs without
2729 * SACK options are received, and clear the scoreboard
2730 * from the left side. Such partial ACKs should not be
2731 * counted as dupacks here.
2732 */
2733 if (tcp_is_sack_recovery(tp, &to) &&
2734 sack_changed) {
2735 tp->t_dupacks++;
2736 /* limit overhead by setting maxseg last */
2737 if (!IN_FASTRECOVERY(tp->t_flags) &&
2738 (tp->sackhint.sacked_bytes >
2739 ((tcprexmtthresh - 1) *
2740 (maxseg = tcp_maxseg(tp))))) {
2741 goto enter_recovery;
2742 }
2743 }
2744 }
2745
2746 resume_partialack:
2747 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2748 ("%s: th_ack <= snd_una", __func__));
2749
2750 /*
2751 * If the congestion window was inflated to account
2752 * for the other side's cached packets, retract it.
2753 */
2754 if (IN_FASTRECOVERY(tp->t_flags)) {
2755 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2756 if (tp->t_flags & TF_SACK_PERMIT)
2757 if (V_tcp_do_prr && to.to_flags & TOF_SACK) {
2758 tcp_timer_activate(tp, TT_REXMT, 0);
2759 tp->t_rtttime = 0;
2760 tcp_do_prr_ack(tp, th, &to);
2761 tp->t_flags |= TF_ACKNOW;
2762 (void) tcp_output(tp);
2763 } else
2764 tcp_sack_partialack(tp, th);
2765 else
2766 tcp_newreno_partial_ack(tp, th);
2767 } else
2768 cc_post_recovery(tp, th);
2769 } else if (IN_CONGRECOVERY(tp->t_flags)) {
2770 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2771 if (V_tcp_do_prr) {
2772 tp->sackhint.delivered_data = BYTES_THIS_ACK(tp, th);
2773 tp->snd_fack = th->th_ack;
2774 tcp_do_prr_ack(tp, th, &to);
2775 (void) tcp_output(tp);
2776 }
2777 } else
2778 cc_post_recovery(tp, th);
2779 }
2780 /*
2781 * If we reach this point, ACK is not a duplicate,
2782 * i.e., it ACKs something we sent.
2783 */
2784 if (tp->t_flags & TF_NEEDSYN) {
2785 /*
2786 * T/TCP: Connection was half-synchronized, and our
2787 * SYN has been ACK'd (so connection is now fully
2788 * synchronized). Go to non-starred state,
2789 * increment snd_una for ACK of SYN, and check if
2790 * we can do window scaling.
2791 */
2792 tp->t_flags &= ~TF_NEEDSYN;
2793 tp->snd_una++;
2794 /* Do window scaling? */
2795 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2796 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2797 tp->rcv_scale = tp->request_r_scale;
2798 /* Send window already scaled. */
2799 }
2800 }
2801
2802 process_ACK:
2803 INP_WLOCK_ASSERT(inp);
2804
2805 /*
2806 * Adjust for the SYN bit in sequence space,
2807 * but don't account for it in cwnd calculations.
2808 * This is for the SYN_RECEIVED, non-simultaneous
2809 * SYN case. SYN_SENT and simultaneous SYN are
2810 * treated elsewhere.
2811 */
2812 if (incforsyn)
2813 tp->snd_una++;
2814 acked = BYTES_THIS_ACK(tp, th);
2815 KASSERT(acked >= 0, ("%s: acked unexepectedly negative "
2816 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__,
2817 tp->snd_una, th->th_ack, tp, m));
2818 TCPSTAT_ADD(tcps_rcvackpack, nsegs);
2819 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2820
2821 /*
2822 * If we just performed our first retransmit, and the ACK
2823 * arrives within our recovery window, then it was a mistake
2824 * to do the retransmit in the first place. Recover our
2825 * original cwnd and ssthresh, and proceed to transmit where
2826 * we left off.
2827 */
2828 if (tp->t_rxtshift == 1 &&
2829 tp->t_flags & TF_PREVVALID &&
2830 tp->t_badrxtwin != 0 &&
2831 to.to_flags & TOF_TS &&
2832 to.to_tsecr != 0 &&
2833 TSTMP_LT(to.to_tsecr, tp->t_badrxtwin))
2834 cc_cong_signal(tp, th, CC_RTO_ERR);
2835
2836 /*
2837 * If we have a timestamp reply, update smoothed
2838 * round trip time. If no timestamp is present but
2839 * transmit timer is running and timed sequence
2840 * number was acked, update smoothed round trip time.
2841 * Since we now have an rtt measurement, cancel the
2842 * timer backoff (cf., Phil Karn's retransmit alg.).
2843 * Recompute the initial retransmit timer.
2844 *
2845 * Some boxes send broken timestamp replies
2846 * during the SYN+ACK phase, ignore
2847 * timestamps of 0 or we could calculate a
2848 * huge RTT and blow up the retransmit timer.
2849 */
2850 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2851 uint32_t t;
2852
2853 t = tcp_ts_getticks() - to.to_tsecr;
2854 if (!tp->t_rttlow || tp->t_rttlow > t)
2855 tp->t_rttlow = t;
2856 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2857 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2858 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2859 tp->t_rttlow = ticks - tp->t_rtttime;
2860 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2861 }
2862
2863 SOCKBUF_LOCK(&so->so_snd);
2864 /*
2865 * Clear t_acktime if remote side has ACKd all data in the
2866 * socket buffer and FIN (if applicable).
2867 * Otherwise, update t_acktime if we received a sufficiently
2868 * large ACK.
2869 */
2870 if ((tp->t_state <= TCPS_CLOSE_WAIT &&
2871 acked == sbavail(&so->so_snd)) ||
2872 acked > sbavail(&so->so_snd))
2873 tp->t_acktime = 0;
2874 else if (acked > 1)
2875 tp->t_acktime = ticks;
2876
2877 /*
2878 * If all outstanding data is acked, stop retransmit
2879 * timer and remember to restart (more output or persist).
2880 * If there is more data to be acked, restart retransmit
2881 * timer, using current (possibly backed-off) value.
2882 */
2883 if (th->th_ack == tp->snd_max) {
2884 tcp_timer_activate(tp, TT_REXMT, 0);
2885 needoutput = 1;
2886 } else if (!tcp_timer_active(tp, TT_PERSIST))
2887 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp));
2888
2889 /*
2890 * If no data (only SYN) was ACK'd,
2891 * skip rest of ACK processing.
2892 */
2893 if (acked == 0) {
2894 SOCKBUF_UNLOCK(&so->so_snd);
2895 goto step6;
2896 }
2897
2898 /*
2899 * Let the congestion control algorithm update congestion
2900 * control related information. This typically means increasing
2901 * the congestion window.
2902 */
2903 cc_ack_received(tp, th, nsegs, CC_ACK);
2904
2905 if (acked > sbavail(&so->so_snd)) {
2906 if (tp->snd_wnd >= sbavail(&so->so_snd))
2907 tp->snd_wnd -= sbavail(&so->so_snd);
2908 else
2909 tp->snd_wnd = 0;
2910 mfree = sbcut_locked(&so->so_snd,
2911 (int)sbavail(&so->so_snd));
2912 ourfinisacked = 1;
2913 } else {
2914 mfree = sbcut_locked(&so->so_snd, acked);
2915 if (tp->snd_wnd >= (uint32_t) acked)
2916 tp->snd_wnd -= acked;
2917 else
2918 tp->snd_wnd = 0;
2919 ourfinisacked = 0;
2920 }
2921 /* NB: sowwakeup_locked() does an implicit unlock. */
2922 sowwakeup_locked(so);
2923 m_freem(mfree);
2924 /* Detect una wraparound. */
2925 if (!IN_RECOVERY(tp->t_flags) &&
2926 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2927 SEQ_LEQ(th->th_ack, tp->snd_recover))
2928 tp->snd_recover = th->th_ack - 1;
2929 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2930 if (IN_RECOVERY(tp->t_flags) &&
2931 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2932 EXIT_RECOVERY(tp->t_flags);
2933 }
2934 tp->snd_una = th->th_ack;
2935 if (tp->t_flags & TF_SACK_PERMIT) {
2936 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2937 tp->snd_recover = tp->snd_una;
2938 }
2939 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2940 tp->snd_nxt = tp->snd_una;
2941
2942 switch (tp->t_state) {
2943 /*
2944 * In FIN_WAIT_1 STATE in addition to the processing
2945 * for the ESTABLISHED state if our FIN is now acknowledged
2946 * then enter FIN_WAIT_2.
2947 */
2948 case TCPS_FIN_WAIT_1:
2949 if (ourfinisacked) {
2950 /*
2951 * If we can't receive any more
2952 * data, then closing user can proceed.
2953 * Starting the timer is contrary to the
2954 * specification, but if we don't get a FIN
2955 * we'll hang forever.
2956 */
2957 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2958 soisdisconnected(so);
2959 tcp_timer_activate(tp, TT_2MSL,
2960 (tcp_fast_finwait2_recycle ?
2961 tcp_finwait2_timeout :
2962 TP_MAXIDLE(tp)));
2963 }
2964 tcp_state_change(tp, TCPS_FIN_WAIT_2);
2965 }
2966 break;
2967
2968 /*
2969 * In CLOSING STATE in addition to the processing for
2970 * the ESTABLISHED state if the ACK acknowledges our FIN
2971 * then enter the TIME-WAIT state, otherwise ignore
2972 * the segment.
2973 */
2974 case TCPS_CLOSING:
2975 if (ourfinisacked) {
2976 tcp_twstart(tp);
2977 m_freem(m);
2978 return;
2979 }
2980 break;
2981
2982 /*
2983 * In LAST_ACK, we may still be waiting for data to drain
2984 * and/or to be acked, as well as for the ack of our FIN.
2985 * If our FIN is now acknowledged, delete the TCB,
2986 * enter the closed state and return.
2987 */
2988 case TCPS_LAST_ACK:
2989 if (ourfinisacked) {
2990 tp = tcp_close(tp);
2991 goto drop;
2992 }
2993 break;
2994 }
2995 }
2996
2997 step6:
2998 INP_WLOCK_ASSERT(inp);
2999
3000 /*
3001 * Update window information.
3002 * Don't look at window if no ACK: TAC's send garbage on first SYN.
3003 */
3004 if ((thflags & TH_ACK) &&
3005 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
3006 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
3007 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
3008 /* keep track of pure window updates */
3009 if (tlen == 0 &&
3010 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
3011 TCPSTAT_INC(tcps_rcvwinupd);
3012 tp->snd_wnd = tiwin;
3013 tp->snd_wl1 = th->th_seq;
3014 tp->snd_wl2 = th->th_ack;
3015 if (tp->snd_wnd > tp->max_sndwnd)
3016 tp->max_sndwnd = tp->snd_wnd;
3017 needoutput = 1;
3018 }
3019
3020 /*
3021 * Process segments with URG.
3022 */
3023 if ((thflags & TH_URG) && th->th_urp &&
3024 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3025 /*
3026 * This is a kludge, but if we receive and accept
3027 * random urgent pointers, we'll crash in
3028 * soreceive. It's hard to imagine someone
3029 * actually wanting to send this much urgent data.
3030 */
3031 SOCKBUF_LOCK(&so->so_rcv);
3032 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
3033 th->th_urp = 0; /* XXX */
3034 thflags &= ~TH_URG; /* XXX */
3035 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
3036 goto dodata; /* XXX */
3037 }
3038 /*
3039 * If this segment advances the known urgent pointer,
3040 * then mark the data stream. This should not happen
3041 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
3042 * a FIN has been received from the remote side.
3043 * In these states we ignore the URG.
3044 *
3045 * According to RFC961 (Assigned Protocols),
3046 * the urgent pointer points to the last octet
3047 * of urgent data. We continue, however,
3048 * to consider it to indicate the first octet
3049 * of data past the urgent section as the original
3050 * spec states (in one of two places).
3051 */
3052 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
3053 tp->rcv_up = th->th_seq + th->th_urp;
3054 so->so_oobmark = sbavail(&so->so_rcv) +
3055 (tp->rcv_up - tp->rcv_nxt) - 1;
3056 if (so->so_oobmark == 0)
3057 so->so_rcv.sb_state |= SBS_RCVATMARK;
3058 sohasoutofband(so);
3059 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
3060 }
3061 SOCKBUF_UNLOCK(&so->so_rcv);
3062 /*
3063 * Remove out of band data so doesn't get presented to user.
3064 * This can happen independent of advancing the URG pointer,
3065 * but if two URG's are pending at once, some out-of-band
3066 * data may creep in... ick.
3067 */
3068 if (th->th_urp <= (uint32_t)tlen &&
3069 !(so->so_options & SO_OOBINLINE)) {
3070 /* hdr drop is delayed */
3071 tcp_pulloutofband(so, th, m, drop_hdrlen);
3072 }
3073 } else {
3074 /*
3075 * If no out of band data is expected,
3076 * pull receive urgent pointer along
3077 * with the receive window.
3078 */
3079 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
3080 tp->rcv_up = tp->rcv_nxt;
3081 }
3082 dodata: /* XXX */
3083 INP_WLOCK_ASSERT(inp);
3084
3085 /*
3086 * Process the segment text, merging it into the TCP sequencing queue,
3087 * and arranging for acknowledgment of receipt if necessary.
3088 * This process logically involves adjusting tp->rcv_wnd as data
3089 * is presented to the user (this happens in tcp_usrreq.c,
3090 * case PRU_RCVD). If a FIN has already been received on this
3091 * connection then we just ignore the text.
3092 */
3093 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
3094 IS_FASTOPEN(tp->t_flags));
3095 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
3096 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3097 tcp_seq save_start = th->th_seq;
3098 tcp_seq save_rnxt = tp->rcv_nxt;
3099 int save_tlen = tlen;
3100 m_adj(m, drop_hdrlen); /* delayed header drop */
3101 /*
3102 * Insert segment which includes th into TCP reassembly queue
3103 * with control block tp. Set thflags to whether reassembly now
3104 * includes a segment with FIN. This handles the common case
3105 * inline (segment is the next to be received on an established
3106 * connection, and the queue is empty), avoiding linkage into
3107 * and removal from the queue and repetition of various
3108 * conversions.
3109 * Set DELACK for segments received in order, but ack
3110 * immediately when segments are out of order (so
3111 * fast retransmit can work).
3112 */
3113 if (th->th_seq == tp->rcv_nxt &&
3114 SEGQ_EMPTY(tp) &&
3115 (TCPS_HAVEESTABLISHED(tp->t_state) ||
3116 tfo_syn)) {
3117 if (DELAY_ACK(tp, tlen) || tfo_syn)
3118 tp->t_flags |= TF_DELACK;
3119 else
3120 tp->t_flags |= TF_ACKNOW;
3121 tp->rcv_nxt += tlen;
3122 if (tlen &&
3123 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
3124 (tp->t_fbyte_in == 0)) {
3125 tp->t_fbyte_in = ticks;
3126 if (tp->t_fbyte_in == 0)
3127 tp->t_fbyte_in = 1;
3128 if (tp->t_fbyte_out && tp->t_fbyte_in)
3129 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
3130 }
3131 thflags = tcp_get_flags(th) & TH_FIN;
3132 TCPSTAT_INC(tcps_rcvpack);
3133 TCPSTAT_ADD(tcps_rcvbyte, tlen);
3134 SOCKBUF_LOCK(&so->so_rcv);
3135 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
3136 m_freem(m);
3137 else
3138 sbappendstream_locked(&so->so_rcv, m, 0);
3139 tp->t_flags |= TF_WAKESOR;
3140 } else {
3141 /*
3142 * XXX: Due to the header drop above "th" is
3143 * theoretically invalid by now. Fortunately
3144 * m_adj() doesn't actually frees any mbufs
3145 * when trimming from the head.
3146 */
3147 tcp_seq temp = save_start;
3148
3149 thflags = tcp_reass(tp, th, &temp, &tlen, m);
3150 tp->t_flags |= TF_ACKNOW;
3151 }
3152 if ((tp->t_flags & TF_SACK_PERMIT) &&
3153 (save_tlen > 0) &&
3154 TCPS_HAVEESTABLISHED(tp->t_state)) {
3155 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
3156 /*
3157 * DSACK actually handled in the fastpath
3158 * above.
3159 */
3160 tcp_update_sack_list(tp, save_start,
3161 save_start + save_tlen);
3162 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
3163 if ((tp->rcv_numsacks >= 1) &&
3164 (tp->sackblks[0].end == save_start)) {
3165 /*
3166 * Partial overlap, recorded at todrop
3167 * above.
3168 */
3169 tcp_update_sack_list(tp,
3170 tp->sackblks[0].start,
3171 tp->sackblks[0].end);
3172 } else {
3173 tcp_update_dsack_list(tp, save_start,
3174 save_start + save_tlen);
3175 }
3176 } else if (tlen >= save_tlen) {
3177 /* Update of sackblks. */
3178 tcp_update_dsack_list(tp, save_start,
3179 save_start + save_tlen);
3180 } else if (tlen > 0) {
3181 tcp_update_dsack_list(tp, save_start,
3182 save_start + tlen);
3183 }
3184 }
3185 tcp_handle_wakeup(tp);
3186 #if 0
3187 /*
3188 * Note the amount of data that peer has sent into
3189 * our window, in order to estimate the sender's
3190 * buffer size.
3191 * XXX: Unused.
3192 */
3193 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
3194 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
3195 else
3196 len = so->so_rcv.sb_hiwat;
3197 #endif
3198 } else {
3199 m_freem(m);
3200 thflags &= ~TH_FIN;
3201 }
3202
3203 /*
3204 * If FIN is received ACK the FIN and let the user know
3205 * that the connection is closing.
3206 */
3207 if (thflags & TH_FIN) {
3208 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3209 /* The socket upcall is handled by socantrcvmore. */
3210 socantrcvmore(so);
3211 /*
3212 * If connection is half-synchronized
3213 * (ie NEEDSYN flag on) then delay ACK,
3214 * so it may be piggybacked when SYN is sent.
3215 * Otherwise, since we received a FIN then no
3216 * more input can be expected, send ACK now.
3217 */
3218 if (tp->t_flags & TF_NEEDSYN)
3219 tp->t_flags |= TF_DELACK;
3220 else
3221 tp->t_flags |= TF_ACKNOW;
3222 tp->rcv_nxt++;
3223 }
3224 switch (tp->t_state) {
3225 /*
3226 * In SYN_RECEIVED and ESTABLISHED STATES
3227 * enter the CLOSE_WAIT state.
3228 */
3229 case TCPS_SYN_RECEIVED:
3230 tp->t_starttime = ticks;
3231 /* FALLTHROUGH */
3232 case TCPS_ESTABLISHED:
3233 tcp_state_change(tp, TCPS_CLOSE_WAIT);
3234 break;
3235
3236 /*
3237 * If still in FIN_WAIT_1 STATE FIN has not been acked so
3238 * enter the CLOSING state.
3239 */
3240 case TCPS_FIN_WAIT_1:
3241 tcp_state_change(tp, TCPS_CLOSING);
3242 break;
3243
3244 /*
3245 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3246 * starting the time-wait timer, turning off the other
3247 * standard timers.
3248 */
3249 case TCPS_FIN_WAIT_2:
3250 tcp_twstart(tp);
3251 return;
3252 }
3253 }
3254 TCP_PROBE3(debug__input, tp, th, m);
3255
3256 /*
3257 * Return any desired output.
3258 */
3259 if (needoutput || (tp->t_flags & TF_ACKNOW))
3260 (void) tcp_output(tp);
3261
3262 check_delack:
3263 INP_WLOCK_ASSERT(inp);
3264
3265 if (tp->t_flags & TF_DELACK) {
3266 tp->t_flags &= ~TF_DELACK;
3267 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3268 }
3269 INP_WUNLOCK(inp);
3270 return;
3271
3272 dropafterack:
3273 /*
3274 * Generate an ACK dropping incoming segment if it occupies
3275 * sequence space, where the ACK reflects our state.
3276 *
3277 * We can now skip the test for the RST flag since all
3278 * paths to this code happen after packets containing
3279 * RST have been dropped.
3280 *
3281 * In the SYN-RECEIVED state, don't send an ACK unless the
3282 * segment we received passes the SYN-RECEIVED ACK test.
3283 * If it fails send a RST. This breaks the loop in the
3284 * "LAND" DoS attack, and also prevents an ACK storm
3285 * between two listening ports that have been sent forged
3286 * SYN segments, each with the source address of the other.
3287 */
3288 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3289 (SEQ_GT(tp->snd_una, th->th_ack) ||
3290 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3291 rstreason = BANDLIM_RST_OPENPORT;
3292 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
3293 goto dropwithreset;
3294 }
3295 TCP_PROBE3(debug__input, tp, th, m);
3296 tp->t_flags |= TF_ACKNOW;
3297 (void) tcp_output(tp);
3298 INP_WUNLOCK(inp);
3299 m_freem(m);
3300 return;
3301
3302 dropwithreset:
3303 if (tp != NULL) {
3304 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3305 INP_WUNLOCK(inp);
3306 } else
3307 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3308 return;
3309
3310 drop:
3311 /*
3312 * Drop space held by incoming segment and return.
3313 */
3314 TCP_PROBE3(debug__input, tp, th, m);
3315 if (tp != NULL) {
3316 INP_WUNLOCK(inp);
3317 }
3318 m_freem(m);
3319 }
3320
3321 /*
3322 * Issue RST and make ACK acceptable to originator of segment.
3323 * The mbuf must still include the original packet header.
3324 * tp may be NULL.
3325 */
3326 void
3327 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3328 int tlen, int rstreason)
3329 {
3330 #ifdef INET
3331 struct ip *ip;
3332 #endif
3333 #ifdef INET6
3334 struct ip6_hdr *ip6;
3335 #endif
3336
3337 if (tp != NULL) {
3338 INP_LOCK_ASSERT(tptoinpcb(tp));
3339 }
3340
3341 /* Don't bother if destination was broadcast/multicast. */
3342 if ((tcp_get_flags(th) & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3343 goto drop;
3344 #ifdef INET6
3345 if (mtod(m, struct ip *)->ip_v == 6) {
3346 ip6 = mtod(m, struct ip6_hdr *);
3347 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3348 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3349 goto drop;
3350 /* IPv6 anycast check is done at tcp6_input() */
3351 }
3352 #endif
3353 #if defined(INET) && defined(INET6)
3354 else
3355 #endif
3356 #ifdef INET
3357 {
3358 ip = mtod(m, struct ip *);
3359 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3360 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3361 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3362 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3363 goto drop;
3364 }
3365 #endif
3366
3367 /* Perform bandwidth limiting. */
3368 if (badport_bandlim(rstreason) < 0)
3369 goto drop;
3370
3371 /* tcp_respond consumes the mbuf chain. */
3372 if (tcp_get_flags(th) & TH_ACK) {
3373 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3374 th->th_ack, TH_RST);
3375 } else {
3376 if (tcp_get_flags(th) & TH_SYN)
3377 tlen++;
3378 if (tcp_get_flags(th) & TH_FIN)
3379 tlen++;
3380 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3381 (tcp_seq)0, TH_RST|TH_ACK);
3382 }
3383 return;
3384 drop:
3385 m_freem(m);
3386 }
3387
3388 /*
3389 * Parse TCP options and place in tcpopt.
3390 */
3391 void
3392 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3393 {
3394 int opt, optlen;
3395
3396 to->to_flags = 0;
3397 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3398 opt = cp[0];
3399 if (opt == TCPOPT_EOL)
3400 break;
3401 if (opt == TCPOPT_NOP)
3402 optlen = 1;
3403 else {
3404 if (cnt < 2)
3405 break;
3406 optlen = cp[1];
3407 if (optlen < 2 || optlen > cnt)
3408 break;
3409 }
3410 switch (opt) {
3411 case TCPOPT_MAXSEG:
3412 if (optlen != TCPOLEN_MAXSEG)
3413 continue;
3414 if (!(flags & TO_SYN))
3415 continue;
3416 to->to_flags |= TOF_MSS;
3417 bcopy((char *)cp + 2,
3418 (char *)&to->to_mss, sizeof(to->to_mss));
3419 to->to_mss = ntohs(to->to_mss);
3420 break;
3421 case TCPOPT_WINDOW:
3422 if (optlen != TCPOLEN_WINDOW)
3423 continue;
3424 if (!(flags & TO_SYN))
3425 continue;
3426 to->to_flags |= TOF_SCALE;
3427 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3428 break;
3429 case TCPOPT_TIMESTAMP:
3430 if (optlen != TCPOLEN_TIMESTAMP)
3431 continue;
3432 to->to_flags |= TOF_TS;
3433 bcopy((char *)cp + 2,
3434 (char *)&to->to_tsval, sizeof(to->to_tsval));
3435 to->to_tsval = ntohl(to->to_tsval);
3436 bcopy((char *)cp + 6,
3437 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3438 to->to_tsecr = ntohl(to->to_tsecr);
3439 break;
3440 case TCPOPT_SIGNATURE:
3441 /*
3442 * In order to reply to a host which has set the
3443 * TCP_SIGNATURE option in its initial SYN, we have
3444 * to record the fact that the option was observed
3445 * here for the syncache code to perform the correct
3446 * response.
3447 */
3448 if (optlen != TCPOLEN_SIGNATURE)
3449 continue;
3450 to->to_flags |= TOF_SIGNATURE;
3451 to->to_signature = cp + 2;
3452 break;
3453 case TCPOPT_SACK_PERMITTED:
3454 if (optlen != TCPOLEN_SACK_PERMITTED)
3455 continue;
3456 if (!(flags & TO_SYN))
3457 continue;
3458 if (!V_tcp_do_sack)
3459 continue;
3460 to->to_flags |= TOF_SACKPERM;
3461 break;
3462 case TCPOPT_SACK:
3463 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3464 continue;
3465 if (flags & TO_SYN)
3466 continue;
3467 to->to_flags |= TOF_SACK;
3468 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3469 to->to_sacks = cp + 2;
3470 TCPSTAT_INC(tcps_sack_rcv_blocks);
3471 break;
3472 case TCPOPT_FAST_OPEN:
3473 /*
3474 * Cookie length validation is performed by the
3475 * server side cookie checking code or the client
3476 * side cookie cache update code.
3477 */
3478 if (!(flags & TO_SYN))
3479 continue;
3480 if (!V_tcp_fastopen_client_enable &&
3481 !V_tcp_fastopen_server_enable)
3482 continue;
3483 to->to_flags |= TOF_FASTOPEN;
3484 to->to_tfo_len = optlen - 2;
3485 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL;
3486 break;
3487 default:
3488 continue;
3489 }
3490 }
3491 }
3492
3493 /*
3494 * Pull out of band byte out of a segment so
3495 * it doesn't appear in the user's data queue.
3496 * It is still reflected in the segment length for
3497 * sequencing purposes.
3498 */
3499 void
3500 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3501 int off)
3502 {
3503 int cnt = off + th->th_urp - 1;
3504
3505 while (cnt >= 0) {
3506 if (m->m_len > cnt) {
3507 char *cp = mtod(m, caddr_t) + cnt;
3508 struct tcpcb *tp = sototcpcb(so);
3509
3510 INP_WLOCK_ASSERT(tptoinpcb(tp));
3511
3512 tp->t_iobc = *cp;
3513 tp->t_oobflags |= TCPOOB_HAVEDATA;
3514 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3515 m->m_len--;
3516 if (m->m_flags & M_PKTHDR)
3517 m->m_pkthdr.len--;
3518 return;
3519 }
3520 cnt -= m->m_len;
3521 m = m->m_next;
3522 if (m == NULL)
3523 break;
3524 }
3525 panic("tcp_pulloutofband");
3526 }
3527
3528 /*
3529 * Collect new round-trip time estimate
3530 * and update averages and current timeout.
3531 */
3532 void
3533 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3534 {
3535 int delta;
3536
3537 INP_WLOCK_ASSERT(tptoinpcb(tp));
3538
3539 TCPSTAT_INC(tcps_rttupdated);
3540 if (tp->t_rttupdated < UCHAR_MAX)
3541 tp->t_rttupdated++;
3542 #ifdef STATS
3543 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT,
3544 imax(0, rtt * 1000 / hz));
3545 #endif
3546 if ((tp->t_srtt != 0) && (tp->t_rxtshift <= TCP_RTT_INVALIDATE)) {
3547 /*
3548 * srtt is stored as fixed point with 5 bits after the
3549 * binary point (i.e., scaled by 8). The following magic
3550 * is equivalent to the smoothing algorithm in rfc793 with
3551 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3552 * point). Adjust rtt to origin 0.
3553 */
3554 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3555 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3556
3557 if ((tp->t_srtt += delta) <= 0)
3558 tp->t_srtt = 1;
3559
3560 /*
3561 * We accumulate a smoothed rtt variance (actually, a
3562 * smoothed mean difference), then set the retransmit
3563 * timer to smoothed rtt + 4 times the smoothed variance.
3564 * rttvar is stored as fixed point with 4 bits after the
3565 * binary point (scaled by 16). The following is
3566 * equivalent to rfc793 smoothing with an alpha of .75
3567 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3568 * rfc793's wired-in beta.
3569 */
3570 if (delta < 0)
3571 delta = -delta;
3572 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3573 if ((tp->t_rttvar += delta) <= 0)
3574 tp->t_rttvar = 1;
3575 } else {
3576 /*
3577 * No rtt measurement yet - use the unsmoothed rtt.
3578 * Set the variance to half the rtt (so our first
3579 * retransmit happens at 3*rtt).
3580 */
3581 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3582 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3583 }
3584 tp->t_rtttime = 0;
3585 tp->t_rxtshift = 0;
3586
3587 /*
3588 * the retransmit should happen at rtt + 4 * rttvar.
3589 * Because of the way we do the smoothing, srtt and rttvar
3590 * will each average +1/2 tick of bias. When we compute
3591 * the retransmit timer, we want 1/2 tick of rounding and
3592 * 1 extra tick because of +-1/2 tick uncertainty in the
3593 * firing of the timer. The bias will give us exactly the
3594 * 1.5 tick we need. But, because the bias is
3595 * statistical, we have to test that we don't drop below
3596 * the minimum feasible timer (which is 2 ticks).
3597 */
3598 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3599 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3600
3601 /*
3602 * We received an ack for a packet that wasn't retransmitted;
3603 * it is probably safe to discard any error indications we've
3604 * received recently. This isn't quite right, but close enough
3605 * for now (a route might have failed after we sent a segment,
3606 * and the return path might not be symmetrical).
3607 */
3608 tp->t_softerror = 0;
3609 }
3610
3611 /*
3612 * Determine a reasonable value for maxseg size.
3613 * If the route is known, check route for mtu.
3614 * If none, use an mss that can be handled on the outgoing interface
3615 * without forcing IP to fragment. If no route is found, route has no mtu,
3616 * or the destination isn't local, use a default, hopefully conservative
3617 * size (usually 512 or the default IP max size, but no more than the mtu
3618 * of the interface), as we can't discover anything about intervening
3619 * gateways or networks. We also initialize the congestion/slow start
3620 * window to be a single segment if the destination isn't local.
3621 * While looking at the routing entry, we also initialize other path-dependent
3622 * parameters from pre-set or cached values in the routing entry.
3623 *
3624 * NOTE that resulting t_maxseg doesn't include space for TCP options or
3625 * IP options, e.g. IPSEC data, since length of this data may vary, and
3626 * thus it is calculated for every segment separately in tcp_output().
3627 *
3628 * NOTE that this routine is only called when we process an incoming
3629 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3630 * settings are handled in tcp_mssopt().
3631 */
3632 void
3633 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3634 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3635 {
3636 int mss = 0;
3637 uint32_t maxmtu = 0;
3638 struct inpcb *inp = tptoinpcb(tp);
3639 struct hc_metrics_lite metrics;
3640 #ifdef INET6
3641 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3642 size_t min_protoh = isipv6 ?
3643 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3644 sizeof (struct tcpiphdr);
3645 #else
3646 size_t min_protoh = sizeof(struct tcpiphdr);
3647 #endif
3648
3649 INP_WLOCK_ASSERT(inp);
3650
3651 if (tp->t_port)
3652 min_protoh += V_tcp_udp_tunneling_overhead;
3653 if (mtuoffer != -1) {
3654 KASSERT(offer == -1, ("%s: conflict", __func__));
3655 offer = mtuoffer - min_protoh;
3656 }
3657
3658 /* Initialize. */
3659 #ifdef INET6
3660 if (isipv6) {
3661 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3662 tp->t_maxseg = V_tcp_v6mssdflt;
3663 }
3664 #endif
3665 #if defined(INET) && defined(INET6)
3666 else
3667 #endif
3668 #ifdef INET
3669 {
3670 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3671 tp->t_maxseg = V_tcp_mssdflt;
3672 }
3673 #endif
3674
3675 /*
3676 * No route to sender, stay with default mss and return.
3677 */
3678 if (maxmtu == 0) {
3679 /*
3680 * In case we return early we need to initialize metrics
3681 * to a defined state as tcp_hc_get() would do for us
3682 * if there was no cache hit.
3683 */
3684 if (metricptr != NULL)
3685 bzero(metricptr, sizeof(struct hc_metrics_lite));
3686 return;
3687 }
3688
3689 /* What have we got? */
3690 switch (offer) {
3691 case 0:
3692 /*
3693 * Offer == 0 means that there was no MSS on the SYN
3694 * segment, in this case we use tcp_mssdflt as
3695 * already assigned to t_maxseg above.
3696 */
3697 offer = tp->t_maxseg;
3698 break;
3699
3700 case -1:
3701 /*
3702 * Offer == -1 means that we didn't receive SYN yet.
3703 */
3704 /* FALLTHROUGH */
3705
3706 default:
3707 /*
3708 * Prevent DoS attack with too small MSS. Round up
3709 * to at least minmss.
3710 */
3711 offer = max(offer, V_tcp_minmss);
3712 }
3713
3714 /*
3715 * rmx information is now retrieved from tcp_hostcache.
3716 */
3717 tcp_hc_get(&inp->inp_inc, &metrics);
3718 if (metricptr != NULL)
3719 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3720
3721 /*
3722 * If there's a discovered mtu in tcp hostcache, use it.
3723 * Else, use the link mtu.
3724 */
3725 if (metrics.rmx_mtu)
3726 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3727 else {
3728 #ifdef INET6
3729 if (isipv6) {
3730 mss = maxmtu - min_protoh;
3731 if (!V_path_mtu_discovery &&
3732 !in6_localaddr(&inp->in6p_faddr))
3733 mss = min(mss, V_tcp_v6mssdflt);
3734 }
3735 #endif
3736 #if defined(INET) && defined(INET6)
3737 else
3738 #endif
3739 #ifdef INET
3740 {
3741 mss = maxmtu - min_protoh;
3742 if (!V_path_mtu_discovery &&
3743 !in_localaddr(inp->inp_faddr))
3744 mss = min(mss, V_tcp_mssdflt);
3745 }
3746 #endif
3747 /*
3748 * XXX - The above conditional (mss = maxmtu - min_protoh)
3749 * probably violates the TCP spec.
3750 * The problem is that, since we don't know the
3751 * other end's MSS, we are supposed to use a conservative
3752 * default. But, if we do that, then MTU discovery will
3753 * never actually take place, because the conservative
3754 * default is much less than the MTUs typically seen
3755 * on the Internet today. For the moment, we'll sweep
3756 * this under the carpet.
3757 *
3758 * The conservative default might not actually be a problem
3759 * if the only case this occurs is when sending an initial
3760 * SYN with options and data to a host we've never talked
3761 * to before. Then, they will reply with an MSS value which
3762 * will get recorded and the new parameters should get
3763 * recomputed. For Further Study.
3764 */
3765 }
3766 mss = min(mss, offer);
3767
3768 /*
3769 * Sanity check: make sure that maxseg will be large
3770 * enough to allow some data on segments even if the
3771 * all the option space is used (40bytes). Otherwise
3772 * funny things may happen in tcp_output.
3773 *
3774 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3775 */
3776 mss = max(mss, 64);
3777
3778 tp->t_maxseg = mss;
3779 }
3780
3781 void
3782 tcp_mss(struct tcpcb *tp, int offer)
3783 {
3784 int mss;
3785 uint32_t bufsize;
3786 struct inpcb *inp = tptoinpcb(tp);
3787 struct socket *so;
3788 struct hc_metrics_lite metrics;
3789 struct tcp_ifcap cap;
3790
3791 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3792
3793 bzero(&cap, sizeof(cap));
3794 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3795
3796 mss = tp->t_maxseg;
3797
3798 /*
3799 * If there's a pipesize, change the socket buffer to that size,
3800 * don't change if sb_hiwat is different than default (then it
3801 * has been changed on purpose with setsockopt).
3802 * Make the socket buffers an integral number of mss units;
3803 * if the mss is larger than the socket buffer, decrease the mss.
3804 */
3805 so = inp->inp_socket;
3806 SOCKBUF_LOCK(&so->so_snd);
3807 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3808 bufsize = metrics.rmx_sendpipe;
3809 else
3810 bufsize = so->so_snd.sb_hiwat;
3811 if (bufsize < mss)
3812 mss = bufsize;
3813 else {
3814 bufsize = roundup(bufsize, mss);
3815 if (bufsize > sb_max)
3816 bufsize = sb_max;
3817 if (bufsize > so->so_snd.sb_hiwat)
3818 (void)sbreserve_locked(so, SO_SND, bufsize, NULL);
3819 }
3820 SOCKBUF_UNLOCK(&so->so_snd);
3821 /*
3822 * Sanity check: make sure that maxseg will be large
3823 * enough to allow some data on segments even if the
3824 * all the option space is used (40bytes). Otherwise
3825 * funny things may happen in tcp_output.
3826 *
3827 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3828 */
3829 tp->t_maxseg = max(mss, 64);
3830
3831 SOCKBUF_LOCK(&so->so_rcv);
3832 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3833 bufsize = metrics.rmx_recvpipe;
3834 else
3835 bufsize = so->so_rcv.sb_hiwat;
3836 if (bufsize > mss) {
3837 bufsize = roundup(bufsize, mss);
3838 if (bufsize > sb_max)
3839 bufsize = sb_max;
3840 if (bufsize > so->so_rcv.sb_hiwat)
3841 (void)sbreserve_locked(so, SO_RCV, bufsize, NULL);
3842 }
3843 SOCKBUF_UNLOCK(&so->so_rcv);
3844
3845 /* Check the interface for TSO capabilities. */
3846 if (cap.ifcap & CSUM_TSO) {
3847 tp->t_flags |= TF_TSO;
3848 tp->t_tsomax = cap.tsomax;
3849 tp->t_tsomaxsegcount = cap.tsomaxsegcount;
3850 tp->t_tsomaxsegsize = cap.tsomaxsegsize;
3851 }
3852 }
3853
3854 /*
3855 * Determine the MSS option to send on an outgoing SYN.
3856 */
3857 int
3858 tcp_mssopt(struct in_conninfo *inc)
3859 {
3860 int mss = 0;
3861 uint32_t thcmtu = 0;
3862 uint32_t maxmtu = 0;
3863 size_t min_protoh;
3864
3865 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3866
3867 #ifdef INET6
3868 if (inc->inc_flags & INC_ISIPV6) {
3869 mss = V_tcp_v6mssdflt;
3870 maxmtu = tcp_maxmtu6(inc, NULL);
3871 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3872 }
3873 #endif
3874 #if defined(INET) && defined(INET6)
3875 else
3876 #endif
3877 #ifdef INET
3878 {
3879 mss = V_tcp_mssdflt;
3880 maxmtu = tcp_maxmtu(inc, NULL);
3881 min_protoh = sizeof(struct tcpiphdr);
3882 }
3883 #endif
3884 #if defined(INET6) || defined(INET)
3885 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3886 #endif
3887
3888 if (maxmtu && thcmtu)
3889 mss = min(maxmtu, thcmtu) - min_protoh;
3890 else if (maxmtu || thcmtu)
3891 mss = max(maxmtu, thcmtu) - min_protoh;
3892
3893 return (mss);
3894 }
3895
3896 void
3897 tcp_do_prr_ack(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
3898 {
3899 int snd_cnt = 0, limit = 0, del_data = 0, pipe = 0;
3900 int maxseg = tcp_maxseg(tp);
3901
3902 INP_WLOCK_ASSERT(tptoinpcb(tp));
3903
3904 /*
3905 * Compute the amount of data that this ACK is indicating
3906 * (del_data) and an estimate of how many bytes are in the
3907 * network.
3908 */
3909 if (tcp_is_sack_recovery(tp, to) ||
3910 (IN_CONGRECOVERY(tp->t_flags) &&
3911 !IN_FASTRECOVERY(tp->t_flags))) {
3912 del_data = tp->sackhint.delivered_data;
3913 if (V_tcp_do_newsack)
3914 pipe = tcp_compute_pipe(tp);
3915 else
3916 pipe = (tp->snd_nxt - tp->snd_fack) +
3917 tp->sackhint.sack_bytes_rexmit;
3918 } else {
3919 if (tp->sackhint.prr_delivered < (tcprexmtthresh * maxseg +
3920 tp->snd_recover - tp->snd_una))
3921 del_data = maxseg;
3922 pipe = imax(0, tp->snd_max - tp->snd_una -
3923 imin(INT_MAX / 65536, tp->t_dupacks) * maxseg);
3924 }
3925 tp->sackhint.prr_delivered += del_data;
3926 /*
3927 * Proportional Rate Reduction
3928 */
3929 if (pipe >= tp->snd_ssthresh) {
3930 if (tp->sackhint.recover_fs == 0)
3931 tp->sackhint.recover_fs =
3932 imax(1, tp->snd_nxt - tp->snd_una);
3933 snd_cnt = howmany((long)tp->sackhint.prr_delivered *
3934 tp->snd_ssthresh, tp->sackhint.recover_fs) -
3935 tp->sackhint.prr_out;
3936 } else {
3937 if (V_tcp_do_prr_conservative || (del_data == 0))
3938 limit = tp->sackhint.prr_delivered -
3939 tp->sackhint.prr_out;
3940 else
3941 limit = imax(tp->sackhint.prr_delivered -
3942 tp->sackhint.prr_out, del_data) +
3943 maxseg;
3944 snd_cnt = imin((tp->snd_ssthresh - pipe), limit);
3945 }
3946 snd_cnt = imax(snd_cnt, 0) / maxseg;
3947 /*
3948 * Send snd_cnt new data into the network in response to this ack.
3949 * If there is going to be a SACK retransmission, adjust snd_cwnd
3950 * accordingly.
3951 */
3952 if (IN_FASTRECOVERY(tp->t_flags)) {
3953 if (tcp_is_sack_recovery(tp, to)) {
3954 tp->snd_cwnd = tp->snd_nxt - tp->snd_recover +
3955 tp->sackhint.sack_bytes_rexmit +
3956 (snd_cnt * maxseg);
3957 } else {
3958 tp->snd_cwnd = (tp->snd_max - tp->snd_una) +
3959 (snd_cnt * maxseg);
3960 }
3961 } else if (IN_CONGRECOVERY(tp->t_flags))
3962 tp->snd_cwnd = pipe - del_data + (snd_cnt * maxseg);
3963 tp->snd_cwnd = imax(maxseg, tp->snd_cwnd);
3964 }
3965
3966 /*
3967 * On a partial ack arrives, force the retransmission of the
3968 * next unacknowledged segment. Do not clear tp->t_dupacks.
3969 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3970 * be started again.
3971 */
3972 void
3973 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3974 {
3975 tcp_seq onxt = tp->snd_nxt;
3976 uint32_t ocwnd = tp->snd_cwnd;
3977 u_int maxseg = tcp_maxseg(tp);
3978
3979 INP_WLOCK_ASSERT(tptoinpcb(tp));
3980
3981 tcp_timer_activate(tp, TT_REXMT, 0);
3982 tp->t_rtttime = 0;
3983 tp->snd_nxt = th->th_ack;
3984 /*
3985 * Set snd_cwnd to one segment beyond acknowledged offset.
3986 * (tp->snd_una has not yet been updated when this function is called.)
3987 */
3988 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th);
3989 tp->t_flags |= TF_ACKNOW;
3990 (void) tcp_output(tp);
3991 tp->snd_cwnd = ocwnd;
3992 if (SEQ_GT(onxt, tp->snd_nxt))
3993 tp->snd_nxt = onxt;
3994 /*
3995 * Partial window deflation. Relies on fact that tp->snd_una
3996 * not updated yet.
3997 */
3998 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3999 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
4000 else
4001 tp->snd_cwnd = 0;
4002 tp->snd_cwnd += maxseg;
4003 }
4004
4005 int
4006 tcp_compute_pipe(struct tcpcb *tp)
4007 {
4008 if (tp->t_fb->tfb_compute_pipe == NULL) {
4009 return (tp->snd_max - tp->snd_una +
4010 tp->sackhint.sack_bytes_rexmit -
4011 tp->sackhint.sacked_bytes);
4012 } else {
4013 return((*tp->t_fb->tfb_compute_pipe)(tp));
4014 }
4015 }
4016
4017 uint32_t
4018 tcp_compute_initwnd(uint32_t maxseg)
4019 {
4020 /*
4021 * Calculate the Initial Window, also used as Restart Window
4022 *
4023 * RFC5681 Section 3.1 specifies the default conservative values.
4024 * RFC3390 specifies slightly more aggressive values.
4025 * RFC6928 increases it to ten segments.
4026 * Support for user specified value for initial flight size.
4027 */
4028 if (V_tcp_initcwnd_segments)
4029 return min(V_tcp_initcwnd_segments * maxseg,
4030 max(2 * maxseg, V_tcp_initcwnd_segments * 1460));
4031 else if (V_tcp_do_rfc3390)
4032 return min(4 * maxseg, max(2 * maxseg, 4380));
4033 else {
4034 /* Per RFC5681 Section 3.1 */
4035 if (maxseg > 2190)
4036 return (2 * maxseg);
4037 else if (maxseg > 1095)
4038 return (3 * maxseg);
4039 else
4040 return (4 * maxseg);
4041 }
4042 }
Cache object: ba32a0a0af587b917cdb87102e1e7267
|