1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
8 * Copyright (c) 2010-2011 Juniper Networks, Inc.
9 * All rights reserved.
10 *
11 * Portions of this software were developed at the Centre for Advanced Internet
12 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
13 * James Healy and David Hayes, made possible in part by a grant from the Cisco
14 * University Research Program Fund at Community Foundation Silicon Valley.
15 *
16 * Portions of this software were developed at the Centre for Advanced
17 * Internet Architectures, Swinburne University of Technology, Melbourne,
18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
19 *
20 * Portions of this software were developed by Robert N. M. Watson under
21 * contract to Juniper Networks, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 *
47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/11.0/sys/netinet/tcp_input.c 301717 2016-06-09 05:48:34Z ae $");
52
53 #include "opt_inet.h"
54 #include "opt_inet6.h"
55 #include "opt_ipsec.h"
56 #include "opt_tcpdebug.h"
57
58 #include <sys/param.h>
59 #include <sys/kernel.h>
60 #include <sys/hhook.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/proc.h> /* for proc0 declaration */
64 #include <sys/protosw.h>
65 #include <sys/sdt.h>
66 #include <sys/signalvar.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/sysctl.h>
70 #include <sys/syslog.h>
71 #include <sys/systm.h>
72
73 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
74
75 #include <vm/uma.h>
76
77 #include <net/if.h>
78 #include <net/if_var.h>
79 #include <net/route.h>
80 #include <net/vnet.h>
81
82 #define TCPSTATES /* for logging */
83
84 #include <netinet/in.h>
85 #include <netinet/in_kdtrace.h>
86 #include <netinet/in_pcb.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
90 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
91 #include <netinet/ip_var.h>
92 #include <netinet/ip_options.h>
93 #include <netinet/ip6.h>
94 #include <netinet/icmp6.h>
95 #include <netinet6/in6_pcb.h>
96 #include <netinet6/in6_var.h>
97 #include <netinet6/ip6_var.h>
98 #include <netinet6/nd6.h>
99 #ifdef TCP_RFC7413
100 #include <netinet/tcp_fastopen.h>
101 #endif
102 #include <netinet/tcp.h>
103 #include <netinet/tcp_fsm.h>
104 #include <netinet/tcp_seq.h>
105 #include <netinet/tcp_timer.h>
106 #include <netinet/tcp_var.h>
107 #include <netinet6/tcp6_var.h>
108 #include <netinet/tcpip.h>
109 #include <netinet/cc/cc.h>
110 #ifdef TCPPCAP
111 #include <netinet/tcp_pcap.h>
112 #endif
113 #include <netinet/tcp_syncache.h>
114 #ifdef TCPDEBUG
115 #include <netinet/tcp_debug.h>
116 #endif /* TCPDEBUG */
117 #ifdef TCP_OFFLOAD
118 #include <netinet/tcp_offload.h>
119 #endif
120
121 #ifdef IPSEC
122 #include <netipsec/ipsec.h>
123 #include <netipsec/ipsec6.h>
124 #endif /*IPSEC*/
125
126 #include <machine/in_cksum.h>
127
128 #include <security/mac/mac_framework.h>
129
130 const int tcprexmtthresh = 3;
131
132 int tcp_log_in_vain = 0;
133 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
134 &tcp_log_in_vain, 0,
135 "Log all incoming TCP segments to closed ports");
136
137 VNET_DEFINE(int, blackhole) = 0;
138 #define V_blackhole VNET(blackhole)
139 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW,
140 &VNET_NAME(blackhole), 0,
141 "Do not send RST on segments to closed ports");
142
143 VNET_DEFINE(int, tcp_delack_enabled) = 1;
144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW,
145 &VNET_NAME(tcp_delack_enabled), 0,
146 "Delay ACK to try and piggyback it onto a data packet");
147
148 VNET_DEFINE(int, drop_synfin) = 0;
149 #define V_drop_synfin VNET(drop_synfin)
150 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW,
151 &VNET_NAME(drop_synfin), 0,
152 "Drop TCP packets with SYN+FIN set");
153
154 VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0;
155 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_pipe, CTLFLAG_VNET | CTLFLAG_RW,
156 &VNET_NAME(tcp_do_rfc6675_pipe), 0,
157 "Use calculated pipe/in-flight bytes per RFC 6675");
158
159 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
160 #define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW,
162 &VNET_NAME(tcp_do_rfc3042), 0,
163 "Enable RFC 3042 (Limited Transmit)");
164
165 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW,
167 &VNET_NAME(tcp_do_rfc3390), 0,
168 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
169
170 VNET_DEFINE(int, tcp_initcwnd_segments) = 10;
171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments,
172 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0,
173 "Slow-start flight size (initial congestion window) in number of segments");
174
175 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
176 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW,
177 &VNET_NAME(tcp_do_rfc3465), 0,
178 "Enable RFC 3465 (Appropriate Byte Counting)");
179
180 VNET_DEFINE(int, tcp_abc_l_var) = 2;
181 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW,
182 &VNET_NAME(tcp_abc_l_var), 2,
183 "Cap the max cwnd increment during slow-start to this number of segments");
184
185 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
186
187 VNET_DEFINE(int, tcp_do_ecn) = 2;
188 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
189 &VNET_NAME(tcp_do_ecn), 0,
190 "TCP ECN support");
191
192 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
193 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW,
194 &VNET_NAME(tcp_ecn_maxretries), 0,
195 "Max retries before giving up on ECN");
196
197 VNET_DEFINE(int, tcp_insecure_syn) = 0;
198 #define V_tcp_insecure_syn VNET(tcp_insecure_syn)
199 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW,
200 &VNET_NAME(tcp_insecure_syn), 0,
201 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets");
202
203 VNET_DEFINE(int, tcp_insecure_rst) = 0;
204 #define V_tcp_insecure_rst VNET(tcp_insecure_rst)
205 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW,
206 &VNET_NAME(tcp_insecure_rst), 0,
207 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets");
208
209 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
210 #define V_tcp_recvspace VNET(tcp_recvspace)
211 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW,
212 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
213
214 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
215 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
216 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
217 &VNET_NAME(tcp_do_autorcvbuf), 0,
218 "Enable automatic receive buffer sizing");
219
220 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
221 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
222 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
223 &VNET_NAME(tcp_autorcvbuf_inc), 0,
224 "Incrementor step size of automatic receive buffer");
225
226 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
227 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
228 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
229 &VNET_NAME(tcp_autorcvbuf_max), 0,
230 "Max size of automatic receive buffer");
231
232 VNET_DEFINE(struct inpcbhead, tcb);
233 #define tcb6 tcb /* for KAME src sync over BSD*'s */
234 VNET_DEFINE(struct inpcbinfo, tcbinfo);
235
236 /*
237 * TCP statistics are stored in an array of counter(9)s, which size matches
238 * size of struct tcpstat. TCP running connection count is a regular array.
239 */
240 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
241 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
242 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
243 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]);
244 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD |
245 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES,
246 "TCP connection counts by TCP state");
247
248 static void
249 tcp_vnet_init(const void *unused)
250 {
251
252 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK);
253 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK);
254 }
255 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
256 tcp_vnet_init, NULL);
257
258 #ifdef VIMAGE
259 static void
260 tcp_vnet_uninit(const void *unused)
261 {
262
263 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES);
264 VNET_PCPUSTAT_FREE(tcpstat);
265 }
266 VNET_SYSUNINIT(tcp_vnet_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
267 tcp_vnet_uninit, NULL);
268 #endif /* VIMAGE */
269
270 /*
271 * Kernel module interface for updating tcpstat. The argument is an index
272 * into tcpstat treated as an array.
273 */
274 void
275 kmod_tcpstat_inc(int statnum)
276 {
277
278 counter_u64_add(VNET(tcpstat)[statnum], 1);
279 }
280
281 /*
282 * Wrapper for the TCP established input helper hook.
283 */
284 void
285 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
286 {
287 struct tcp_hhook_data hhook_data;
288
289 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
290 hhook_data.tp = tp;
291 hhook_data.th = th;
292 hhook_data.to = to;
293
294 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
295 tp->osd);
296 }
297 }
298
299 /*
300 * CC wrapper hook functions
301 */
302 void
303 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
304 {
305 INP_WLOCK_ASSERT(tp->t_inpcb);
306
307 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
308 if (tp->snd_cwnd <= tp->snd_wnd)
309 tp->ccv->flags |= CCF_CWND_LIMITED;
310 else
311 tp->ccv->flags &= ~CCF_CWND_LIMITED;
312
313 if (type == CC_ACK) {
314 if (tp->snd_cwnd > tp->snd_ssthresh) {
315 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
316 V_tcp_abc_l_var * tcp_maxseg(tp));
317 if (tp->t_bytes_acked >= tp->snd_cwnd) {
318 tp->t_bytes_acked -= tp->snd_cwnd;
319 tp->ccv->flags |= CCF_ABC_SENTAWND;
320 }
321 } else {
322 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
323 tp->t_bytes_acked = 0;
324 }
325 }
326
327 if (CC_ALGO(tp)->ack_received != NULL) {
328 /* XXXLAS: Find a way to live without this */
329 tp->ccv->curack = th->th_ack;
330 CC_ALGO(tp)->ack_received(tp->ccv, type);
331 }
332 }
333
334 void
335 cc_conn_init(struct tcpcb *tp)
336 {
337 struct hc_metrics_lite metrics;
338 struct inpcb *inp = tp->t_inpcb;
339 u_int maxseg;
340 int rtt;
341
342 INP_WLOCK_ASSERT(tp->t_inpcb);
343
344 tcp_hc_get(&inp->inp_inc, &metrics);
345 maxseg = tcp_maxseg(tp);
346
347 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
348 tp->t_srtt = rtt;
349 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
350 TCPSTAT_INC(tcps_usedrtt);
351 if (metrics.rmx_rttvar) {
352 tp->t_rttvar = metrics.rmx_rttvar;
353 TCPSTAT_INC(tcps_usedrttvar);
354 } else {
355 /* default variation is +- 1 rtt */
356 tp->t_rttvar =
357 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
358 }
359 TCPT_RANGESET(tp->t_rxtcur,
360 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
361 tp->t_rttmin, TCPTV_REXMTMAX);
362 }
363 if (metrics.rmx_ssthresh) {
364 /*
365 * There's some sort of gateway or interface
366 * buffer limit on the path. Use this to set
367 * the slow start threshold, but set the
368 * threshold to no less than 2*mss.
369 */
370 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh);
371 TCPSTAT_INC(tcps_usedssthresh);
372 }
373
374 /*
375 * Set the initial slow-start flight size.
376 *
377 * RFC5681 Section 3.1 specifies the default conservative values.
378 * RFC3390 specifies slightly more aggressive values.
379 * RFC6928 increases it to ten segments.
380 * Support for user specified value for initial flight size.
381 *
382 * If a SYN or SYN/ACK was lost and retransmitted, we have to
383 * reduce the initial CWND to one segment as congestion is likely
384 * requiring us to be cautious.
385 */
386 if (tp->snd_cwnd == 1)
387 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */
388 else if (V_tcp_initcwnd_segments)
389 tp->snd_cwnd = min(V_tcp_initcwnd_segments * maxseg,
390 max(2 * maxseg, V_tcp_initcwnd_segments * 1460));
391 else if (V_tcp_do_rfc3390)
392 tp->snd_cwnd = min(4 * maxseg, max(2 * maxseg, 4380));
393 else {
394 /* Per RFC5681 Section 3.1 */
395 if (maxseg > 2190)
396 tp->snd_cwnd = 2 * maxseg;
397 else if (maxseg > 1095)
398 tp->snd_cwnd = 3 * maxseg;
399 else
400 tp->snd_cwnd = 4 * maxseg;
401 }
402
403 if (CC_ALGO(tp)->conn_init != NULL)
404 CC_ALGO(tp)->conn_init(tp->ccv);
405 }
406
407 void inline
408 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
409 {
410 u_int maxseg;
411
412 INP_WLOCK_ASSERT(tp->t_inpcb);
413
414 switch(type) {
415 case CC_NDUPACK:
416 if (!IN_FASTRECOVERY(tp->t_flags)) {
417 tp->snd_recover = tp->snd_max;
418 if (tp->t_flags & TF_ECN_PERMIT)
419 tp->t_flags |= TF_ECN_SND_CWR;
420 }
421 break;
422 case CC_ECN:
423 if (!IN_CONGRECOVERY(tp->t_flags)) {
424 TCPSTAT_INC(tcps_ecn_rcwnd);
425 tp->snd_recover = tp->snd_max;
426 if (tp->t_flags & TF_ECN_PERMIT)
427 tp->t_flags |= TF_ECN_SND_CWR;
428 }
429 break;
430 case CC_RTO:
431 maxseg = tcp_maxseg(tp);
432 tp->t_dupacks = 0;
433 tp->t_bytes_acked = 0;
434 EXIT_RECOVERY(tp->t_flags);
435 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
436 maxseg) * maxseg;
437 tp->snd_cwnd = maxseg;
438 break;
439 case CC_RTO_ERR:
440 TCPSTAT_INC(tcps_sndrexmitbad);
441 /* RTO was unnecessary, so reset everything. */
442 tp->snd_cwnd = tp->snd_cwnd_prev;
443 tp->snd_ssthresh = tp->snd_ssthresh_prev;
444 tp->snd_recover = tp->snd_recover_prev;
445 if (tp->t_flags & TF_WASFRECOVERY)
446 ENTER_FASTRECOVERY(tp->t_flags);
447 if (tp->t_flags & TF_WASCRECOVERY)
448 ENTER_CONGRECOVERY(tp->t_flags);
449 tp->snd_nxt = tp->snd_max;
450 tp->t_flags &= ~TF_PREVVALID;
451 tp->t_badrxtwin = 0;
452 break;
453 }
454
455 if (CC_ALGO(tp)->cong_signal != NULL) {
456 if (th != NULL)
457 tp->ccv->curack = th->th_ack;
458 CC_ALGO(tp)->cong_signal(tp->ccv, type);
459 }
460 }
461
462 void inline
463 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
464 {
465 INP_WLOCK_ASSERT(tp->t_inpcb);
466
467 /* XXXLAS: KASSERT that we're in recovery? */
468
469 if (CC_ALGO(tp)->post_recovery != NULL) {
470 tp->ccv->curack = th->th_ack;
471 CC_ALGO(tp)->post_recovery(tp->ccv);
472 }
473 /* XXXLAS: EXIT_RECOVERY ? */
474 tp->t_bytes_acked = 0;
475 }
476
477 #ifdef TCP_SIGNATURE
478 static inline int
479 tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen,
480 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
481 {
482 int ret;
483
484 tcp_fields_to_net(th);
485 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag);
486 tcp_fields_to_host(th);
487 return (ret);
488 }
489 #endif
490
491 /*
492 * Indicate whether this ack should be delayed. We can delay the ack if
493 * following conditions are met:
494 * - There is no delayed ack timer in progress.
495 * - Our last ack wasn't a 0-sized window. We never want to delay
496 * the ack that opens up a 0-sized window.
497 * - LRO wasn't used for this segment. We make sure by checking that the
498 * segment size is not larger than the MSS.
499 */
500 #define DELAY_ACK(tp, tlen) \
501 ((!tcp_timer_active(tp, TT_DELACK) && \
502 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
503 (tlen <= tp->t_maxseg) && \
504 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
505
506 static void inline
507 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos)
508 {
509 INP_WLOCK_ASSERT(tp->t_inpcb);
510
511 if (CC_ALGO(tp)->ecnpkt_handler != NULL) {
512 switch (iptos & IPTOS_ECN_MASK) {
513 case IPTOS_ECN_CE:
514 tp->ccv->flags |= CCF_IPHDR_CE;
515 break;
516 case IPTOS_ECN_ECT0:
517 tp->ccv->flags &= ~CCF_IPHDR_CE;
518 break;
519 case IPTOS_ECN_ECT1:
520 tp->ccv->flags &= ~CCF_IPHDR_CE;
521 break;
522 }
523
524 if (th->th_flags & TH_CWR)
525 tp->ccv->flags |= CCF_TCPHDR_CWR;
526 else
527 tp->ccv->flags &= ~CCF_TCPHDR_CWR;
528
529 if (tp->t_flags & TF_DELACK)
530 tp->ccv->flags |= CCF_DELACK;
531 else
532 tp->ccv->flags &= ~CCF_DELACK;
533
534 CC_ALGO(tp)->ecnpkt_handler(tp->ccv);
535
536 if (tp->ccv->flags & CCF_ACKNOW)
537 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
538 }
539 }
540
541 /*
542 * TCP input handling is split into multiple parts:
543 * tcp6_input is a thin wrapper around tcp_input for the extended
544 * ip6_protox[] call format in ip6_input
545 * tcp_input handles primary segment validation, inpcb lookup and
546 * SYN processing on listen sockets
547 * tcp_do_segment processes the ACK and text of the segment for
548 * establishing, established and closing connections
549 */
550 #ifdef INET6
551 int
552 tcp6_input(struct mbuf **mp, int *offp, int proto)
553 {
554 struct mbuf *m = *mp;
555 struct in6_ifaddr *ia6;
556 struct ip6_hdr *ip6;
557
558 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
559
560 /*
561 * draft-itojun-ipv6-tcp-to-anycast
562 * better place to put this in?
563 */
564 ip6 = mtod(m, struct ip6_hdr *);
565 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
566 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
567 struct ip6_hdr *ip6;
568
569 ifa_free(&ia6->ia_ifa);
570 ip6 = mtod(m, struct ip6_hdr *);
571 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
572 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
573 return (IPPROTO_DONE);
574 }
575 if (ia6)
576 ifa_free(&ia6->ia_ifa);
577
578 return (tcp_input(mp, offp, proto));
579 }
580 #endif /* INET6 */
581
582 int
583 tcp_input(struct mbuf **mp, int *offp, int proto)
584 {
585 struct mbuf *m = *mp;
586 struct tcphdr *th = NULL;
587 struct ip *ip = NULL;
588 struct inpcb *inp = NULL;
589 struct tcpcb *tp = NULL;
590 struct socket *so = NULL;
591 u_char *optp = NULL;
592 int off0;
593 int optlen = 0;
594 #ifdef INET
595 int len;
596 #endif
597 int tlen = 0, off;
598 int drop_hdrlen;
599 int thflags;
600 int rstreason = 0; /* For badport_bandlim accounting purposes */
601 #ifdef TCP_SIGNATURE
602 uint8_t sig_checked = 0;
603 #endif
604 uint8_t iptos = 0;
605 struct m_tag *fwd_tag = NULL;
606 #ifdef INET6
607 struct ip6_hdr *ip6 = NULL;
608 int isipv6;
609 #else
610 const void *ip6 = NULL;
611 #endif /* INET6 */
612 struct tcpopt to; /* options in this segment */
613 char *s = NULL; /* address and port logging */
614 int ti_locked;
615 #ifdef TCPDEBUG
616 /*
617 * The size of tcp_saveipgen must be the size of the max ip header,
618 * now IPv6.
619 */
620 u_char tcp_saveipgen[IP6_HDR_LEN];
621 struct tcphdr tcp_savetcp;
622 short ostate = 0;
623 #endif
624
625 #ifdef INET6
626 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
627 #endif
628
629 off0 = *offp;
630 m = *mp;
631 *mp = NULL;
632 to.to_flags = 0;
633 TCPSTAT_INC(tcps_rcvtotal);
634
635 #ifdef INET6
636 if (isipv6) {
637 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
638
639 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
640 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
641 if (m == NULL) {
642 TCPSTAT_INC(tcps_rcvshort);
643 return (IPPROTO_DONE);
644 }
645 }
646
647 ip6 = mtod(m, struct ip6_hdr *);
648 th = (struct tcphdr *)((caddr_t)ip6 + off0);
649 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
650 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
651 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
652 th->th_sum = m->m_pkthdr.csum_data;
653 else
654 th->th_sum = in6_cksum_pseudo(ip6, tlen,
655 IPPROTO_TCP, m->m_pkthdr.csum_data);
656 th->th_sum ^= 0xffff;
657 } else
658 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
659 if (th->th_sum) {
660 TCPSTAT_INC(tcps_rcvbadsum);
661 goto drop;
662 }
663
664 /*
665 * Be proactive about unspecified IPv6 address in source.
666 * As we use all-zero to indicate unbounded/unconnected pcb,
667 * unspecified IPv6 address can be used to confuse us.
668 *
669 * Note that packets with unspecified IPv6 destination is
670 * already dropped in ip6_input.
671 */
672 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
673 /* XXX stat */
674 goto drop;
675 }
676 }
677 #endif
678 #if defined(INET) && defined(INET6)
679 else
680 #endif
681 #ifdef INET
682 {
683 /*
684 * Get IP and TCP header together in first mbuf.
685 * Note: IP leaves IP header in first mbuf.
686 */
687 if (off0 > sizeof (struct ip)) {
688 ip_stripoptions(m);
689 off0 = sizeof(struct ip);
690 }
691 if (m->m_len < sizeof (struct tcpiphdr)) {
692 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
693 == NULL) {
694 TCPSTAT_INC(tcps_rcvshort);
695 return (IPPROTO_DONE);
696 }
697 }
698 ip = mtod(m, struct ip *);
699 th = (struct tcphdr *)((caddr_t)ip + off0);
700 tlen = ntohs(ip->ip_len) - off0;
701
702 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
703 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
704 th->th_sum = m->m_pkthdr.csum_data;
705 else
706 th->th_sum = in_pseudo(ip->ip_src.s_addr,
707 ip->ip_dst.s_addr,
708 htonl(m->m_pkthdr.csum_data + tlen +
709 IPPROTO_TCP));
710 th->th_sum ^= 0xffff;
711 } else {
712 struct ipovly *ipov = (struct ipovly *)ip;
713
714 /*
715 * Checksum extended TCP header and data.
716 */
717 len = off0 + tlen;
718 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
719 ipov->ih_len = htons(tlen);
720 th->th_sum = in_cksum(m, len);
721 /* Reset length for SDT probes. */
722 ip->ip_len = htons(tlen + off0);
723 }
724
725 if (th->th_sum) {
726 TCPSTAT_INC(tcps_rcvbadsum);
727 goto drop;
728 }
729 /* Re-initialization for later version check */
730 ip->ip_v = IPVERSION;
731 }
732 #endif /* INET */
733
734 #ifdef INET6
735 if (isipv6)
736 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
737 #endif
738 #if defined(INET) && defined(INET6)
739 else
740 #endif
741 #ifdef INET
742 iptos = ip->ip_tos;
743 #endif
744
745 /*
746 * Check that TCP offset makes sense,
747 * pull out TCP options and adjust length. XXX
748 */
749 off = th->th_off << 2;
750 if (off < sizeof (struct tcphdr) || off > tlen) {
751 TCPSTAT_INC(tcps_rcvbadoff);
752 goto drop;
753 }
754 tlen -= off; /* tlen is used instead of ti->ti_len */
755 if (off > sizeof (struct tcphdr)) {
756 #ifdef INET6
757 if (isipv6) {
758 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE);
759 ip6 = mtod(m, struct ip6_hdr *);
760 th = (struct tcphdr *)((caddr_t)ip6 + off0);
761 }
762 #endif
763 #if defined(INET) && defined(INET6)
764 else
765 #endif
766 #ifdef INET
767 {
768 if (m->m_len < sizeof(struct ip) + off) {
769 if ((m = m_pullup(m, sizeof (struct ip) + off))
770 == NULL) {
771 TCPSTAT_INC(tcps_rcvshort);
772 return (IPPROTO_DONE);
773 }
774 ip = mtod(m, struct ip *);
775 th = (struct tcphdr *)((caddr_t)ip + off0);
776 }
777 }
778 #endif
779 optlen = off - sizeof (struct tcphdr);
780 optp = (u_char *)(th + 1);
781 }
782 thflags = th->th_flags;
783
784 /*
785 * Convert TCP protocol specific fields to host format.
786 */
787 tcp_fields_to_host(th);
788
789 /*
790 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
791 */
792 drop_hdrlen = off0 + off;
793
794 /*
795 * Locate pcb for segment; if we're likely to add or remove a
796 * connection then first acquire pcbinfo lock. There are three cases
797 * where we might discover later we need a write lock despite the
798 * flags: ACKs moving a connection out of the syncache, ACKs for a
799 * connection in TIMEWAIT and SYNs not targeting a listening socket.
800 */
801 if ((thflags & (TH_FIN | TH_RST)) != 0) {
802 INP_INFO_RLOCK(&V_tcbinfo);
803 ti_locked = TI_RLOCKED;
804 } else
805 ti_locked = TI_UNLOCKED;
806
807 /*
808 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
809 */
810 if (
811 #ifdef INET6
812 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
813 #ifdef INET
814 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
815 #endif
816 #endif
817 #if defined(INET) && !defined(INET6)
818 (m->m_flags & M_IP_NEXTHOP)
819 #endif
820 )
821 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
822
823 findpcb:
824 #ifdef INVARIANTS
825 if (ti_locked == TI_RLOCKED) {
826 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
827 } else {
828 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
829 }
830 #endif
831 #ifdef INET6
832 if (isipv6 && fwd_tag != NULL) {
833 struct sockaddr_in6 *next_hop6;
834
835 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
836 /*
837 * Transparently forwarded. Pretend to be the destination.
838 * Already got one like this?
839 */
840 inp = in6_pcblookup_mbuf(&V_tcbinfo,
841 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
842 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
843 if (!inp) {
844 /*
845 * It's new. Try to find the ambushing socket.
846 * Because we've rewritten the destination address,
847 * any hardware-generated hash is ignored.
848 */
849 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
850 th->th_sport, &next_hop6->sin6_addr,
851 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
852 th->th_dport, INPLOOKUP_WILDCARD |
853 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
854 }
855 } else if (isipv6) {
856 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
857 th->th_sport, &ip6->ip6_dst, th->th_dport,
858 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
859 m->m_pkthdr.rcvif, m);
860 }
861 #endif /* INET6 */
862 #if defined(INET6) && defined(INET)
863 else
864 #endif
865 #ifdef INET
866 if (fwd_tag != NULL) {
867 struct sockaddr_in *next_hop;
868
869 next_hop = (struct sockaddr_in *)(fwd_tag+1);
870 /*
871 * Transparently forwarded. Pretend to be the destination.
872 * already got one like this?
873 */
874 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
875 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
876 m->m_pkthdr.rcvif, m);
877 if (!inp) {
878 /*
879 * It's new. Try to find the ambushing socket.
880 * Because we've rewritten the destination address,
881 * any hardware-generated hash is ignored.
882 */
883 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
884 th->th_sport, next_hop->sin_addr,
885 next_hop->sin_port ? ntohs(next_hop->sin_port) :
886 th->th_dport, INPLOOKUP_WILDCARD |
887 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
888 }
889 } else
890 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
891 th->th_sport, ip->ip_dst, th->th_dport,
892 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
893 m->m_pkthdr.rcvif, m);
894 #endif /* INET */
895
896 /*
897 * If the INPCB does not exist then all data in the incoming
898 * segment is discarded and an appropriate RST is sent back.
899 * XXX MRT Send RST using which routing table?
900 */
901 if (inp == NULL) {
902 /*
903 * Log communication attempts to ports that are not
904 * in use.
905 */
906 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
907 tcp_log_in_vain == 2) {
908 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
909 log(LOG_INFO, "%s; %s: Connection attempt "
910 "to closed port\n", s, __func__);
911 }
912 /*
913 * When blackholing do not respond with a RST but
914 * completely ignore the segment and drop it.
915 */
916 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
917 V_blackhole == 2)
918 goto dropunlock;
919
920 rstreason = BANDLIM_RST_CLOSEDPORT;
921 goto dropwithreset;
922 }
923 INP_WLOCK_ASSERT(inp);
924 if ((inp->inp_flowtype == M_HASHTYPE_NONE) &&
925 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) &&
926 ((inp->inp_socket == NULL) ||
927 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) {
928 inp->inp_flowid = m->m_pkthdr.flowid;
929 inp->inp_flowtype = M_HASHTYPE_GET(m);
930 }
931 #ifdef IPSEC
932 #ifdef INET6
933 if (isipv6 && ipsec6_in_reject(m, inp)) {
934 goto dropunlock;
935 } else
936 #endif /* INET6 */
937 if (ipsec4_in_reject(m, inp) != 0) {
938 goto dropunlock;
939 }
940 #endif /* IPSEC */
941
942 /*
943 * Check the minimum TTL for socket.
944 */
945 if (inp->inp_ip_minttl != 0) {
946 #ifdef INET6
947 if (isipv6) {
948 if (inp->inp_ip_minttl > ip6->ip6_hlim)
949 goto dropunlock;
950 } else
951 #endif
952 if (inp->inp_ip_minttl > ip->ip_ttl)
953 goto dropunlock;
954 }
955
956 /*
957 * A previous connection in TIMEWAIT state is supposed to catch stray
958 * or duplicate segments arriving late. If this segment was a
959 * legitimate new connection attempt, the old INPCB gets removed and
960 * we can try again to find a listening socket.
961 *
962 * At this point, due to earlier optimism, we may hold only an inpcb
963 * lock, and not the inpcbinfo write lock. If so, we need to try to
964 * acquire it, or if that fails, acquire a reference on the inpcb,
965 * drop all locks, acquire a global write lock, and then re-acquire
966 * the inpcb lock. We may at that point discover that another thread
967 * has tried to free the inpcb, in which case we need to loop back
968 * and try to find a new inpcb to deliver to.
969 *
970 * XXXRW: It may be time to rethink timewait locking.
971 */
972 relocked:
973 if (inp->inp_flags & INP_TIMEWAIT) {
974 if (ti_locked == TI_UNLOCKED) {
975 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
976 in_pcbref(inp);
977 INP_WUNLOCK(inp);
978 INP_INFO_RLOCK(&V_tcbinfo);
979 ti_locked = TI_RLOCKED;
980 INP_WLOCK(inp);
981 if (in_pcbrele_wlocked(inp)) {
982 inp = NULL;
983 goto findpcb;
984 }
985 } else
986 ti_locked = TI_RLOCKED;
987 }
988 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
989
990 if (thflags & TH_SYN)
991 tcp_dooptions(&to, optp, optlen, TO_SYN);
992 /*
993 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
994 */
995 if (tcp_twcheck(inp, &to, th, m, tlen))
996 goto findpcb;
997 INP_INFO_RUNLOCK(&V_tcbinfo);
998 return (IPPROTO_DONE);
999 }
1000 /*
1001 * The TCPCB may no longer exist if the connection is winding
1002 * down or it is in the CLOSED state. Either way we drop the
1003 * segment and send an appropriate response.
1004 */
1005 tp = intotcpcb(inp);
1006 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
1007 rstreason = BANDLIM_RST_CLOSEDPORT;
1008 goto dropwithreset;
1009 }
1010
1011 #ifdef TCP_OFFLOAD
1012 if (tp->t_flags & TF_TOE) {
1013 tcp_offload_input(tp, m);
1014 m = NULL; /* consumed by the TOE driver */
1015 goto dropunlock;
1016 }
1017 #endif
1018
1019 /*
1020 * We've identified a valid inpcb, but it could be that we need an
1021 * inpcbinfo write lock but don't hold it. In this case, attempt to
1022 * acquire using the same strategy as the TIMEWAIT case above. If we
1023 * relock, we have to jump back to 'relocked' as the connection might
1024 * now be in TIMEWAIT.
1025 */
1026 #ifdef INVARIANTS
1027 if ((thflags & (TH_FIN | TH_RST)) != 0)
1028 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1029 #endif
1030 if (!((tp->t_state == TCPS_ESTABLISHED && (thflags & TH_SYN) == 0) ||
1031 (tp->t_state == TCPS_LISTEN && (thflags & TH_SYN) &&
1032 !(tp->t_flags & TF_FASTOPEN)))) {
1033 if (ti_locked == TI_UNLOCKED) {
1034 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
1035 in_pcbref(inp);
1036 INP_WUNLOCK(inp);
1037 INP_INFO_RLOCK(&V_tcbinfo);
1038 ti_locked = TI_RLOCKED;
1039 INP_WLOCK(inp);
1040 if (in_pcbrele_wlocked(inp)) {
1041 inp = NULL;
1042 goto findpcb;
1043 }
1044 goto relocked;
1045 } else
1046 ti_locked = TI_RLOCKED;
1047 }
1048 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1049 }
1050
1051 #ifdef MAC
1052 INP_WLOCK_ASSERT(inp);
1053 if (mac_inpcb_check_deliver(inp, m))
1054 goto dropunlock;
1055 #endif
1056 so = inp->inp_socket;
1057 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1058 #ifdef TCPDEBUG
1059 if (so->so_options & SO_DEBUG) {
1060 ostate = tp->t_state;
1061 #ifdef INET6
1062 if (isipv6) {
1063 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1064 } else
1065 #endif
1066 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1067 tcp_savetcp = *th;
1068 }
1069 #endif /* TCPDEBUG */
1070 /*
1071 * When the socket is accepting connections (the INPCB is in LISTEN
1072 * state) we look into the SYN cache if this is a new connection
1073 * attempt or the completion of a previous one.
1074 */
1075 if (so->so_options & SO_ACCEPTCONN) {
1076 struct in_conninfo inc;
1077
1078 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
1079 "tp not listening", __func__));
1080 bzero(&inc, sizeof(inc));
1081 #ifdef INET6
1082 if (isipv6) {
1083 inc.inc_flags |= INC_ISIPV6;
1084 inc.inc6_faddr = ip6->ip6_src;
1085 inc.inc6_laddr = ip6->ip6_dst;
1086 } else
1087 #endif
1088 {
1089 inc.inc_faddr = ip->ip_src;
1090 inc.inc_laddr = ip->ip_dst;
1091 }
1092 inc.inc_fport = th->th_sport;
1093 inc.inc_lport = th->th_dport;
1094 inc.inc_fibnum = so->so_fibnum;
1095
1096 /*
1097 * Check for an existing connection attempt in syncache if
1098 * the flag is only ACK. A successful lookup creates a new
1099 * socket appended to the listen queue in SYN_RECEIVED state.
1100 */
1101 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1102
1103 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1104 /*
1105 * Parse the TCP options here because
1106 * syncookies need access to the reflected
1107 * timestamp.
1108 */
1109 tcp_dooptions(&to, optp, optlen, 0);
1110 /*
1111 * NB: syncache_expand() doesn't unlock
1112 * inp and tcpinfo locks.
1113 */
1114 if (!syncache_expand(&inc, &to, th, &so, m)) {
1115 /*
1116 * No syncache entry or ACK was not
1117 * for our SYN/ACK. Send a RST.
1118 * NB: syncache did its own logging
1119 * of the failure cause.
1120 */
1121 rstreason = BANDLIM_RST_OPENPORT;
1122 goto dropwithreset;
1123 }
1124 #ifdef TCP_RFC7413
1125 new_tfo_socket:
1126 #endif
1127 if (so == NULL) {
1128 /*
1129 * We completed the 3-way handshake
1130 * but could not allocate a socket
1131 * either due to memory shortage,
1132 * listen queue length limits or
1133 * global socket limits. Send RST
1134 * or wait and have the remote end
1135 * retransmit the ACK for another
1136 * try.
1137 */
1138 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1139 log(LOG_DEBUG, "%s; %s: Listen socket: "
1140 "Socket allocation failed due to "
1141 "limits or memory shortage, %s\n",
1142 s, __func__,
1143 V_tcp_sc_rst_sock_fail ?
1144 "sending RST" : "try again");
1145 if (V_tcp_sc_rst_sock_fail) {
1146 rstreason = BANDLIM_UNLIMITED;
1147 goto dropwithreset;
1148 } else
1149 goto dropunlock;
1150 }
1151 /*
1152 * Socket is created in state SYN_RECEIVED.
1153 * Unlock the listen socket, lock the newly
1154 * created socket and update the tp variable.
1155 */
1156 INP_WUNLOCK(inp); /* listen socket */
1157 inp = sotoinpcb(so);
1158 /*
1159 * New connection inpcb is already locked by
1160 * syncache_expand().
1161 */
1162 INP_WLOCK_ASSERT(inp);
1163 tp = intotcpcb(inp);
1164 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1165 ("%s: ", __func__));
1166 #ifdef TCP_SIGNATURE
1167 if (sig_checked == 0) {
1168 tcp_dooptions(&to, optp, optlen,
1169 (thflags & TH_SYN) ? TO_SYN : 0);
1170 if (!tcp_signature_verify_input(m, off0, tlen,
1171 optlen, &to, th, tp->t_flags)) {
1172
1173 /*
1174 * In SYN_SENT state if it receives an
1175 * RST, it is allowed for further
1176 * processing.
1177 */
1178 if ((thflags & TH_RST) == 0 ||
1179 (tp->t_state == TCPS_SYN_SENT) == 0)
1180 goto dropunlock;
1181 }
1182 sig_checked = 1;
1183 }
1184 #endif
1185
1186 /*
1187 * Process the segment and the data it
1188 * contains. tcp_do_segment() consumes
1189 * the mbuf chain and unlocks the inpcb.
1190 */
1191 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1192 iptos, ti_locked);
1193 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1194 return (IPPROTO_DONE);
1195 }
1196 /*
1197 * Segment flag validation for new connection attempts:
1198 *
1199 * Our (SYN|ACK) response was rejected.
1200 * Check with syncache and remove entry to prevent
1201 * retransmits.
1202 *
1203 * NB: syncache_chkrst does its own logging of failure
1204 * causes.
1205 */
1206 if (thflags & TH_RST) {
1207 syncache_chkrst(&inc, th);
1208 goto dropunlock;
1209 }
1210 /*
1211 * We can't do anything without SYN.
1212 */
1213 if ((thflags & TH_SYN) == 0) {
1214 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1215 log(LOG_DEBUG, "%s; %s: Listen socket: "
1216 "SYN is missing, segment ignored\n",
1217 s, __func__);
1218 TCPSTAT_INC(tcps_badsyn);
1219 goto dropunlock;
1220 }
1221 /*
1222 * (SYN|ACK) is bogus on a listen socket.
1223 */
1224 if (thflags & TH_ACK) {
1225 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1226 log(LOG_DEBUG, "%s; %s: Listen socket: "
1227 "SYN|ACK invalid, segment rejected\n",
1228 s, __func__);
1229 syncache_badack(&inc); /* XXX: Not needed! */
1230 TCPSTAT_INC(tcps_badsyn);
1231 rstreason = BANDLIM_RST_OPENPORT;
1232 goto dropwithreset;
1233 }
1234 /*
1235 * If the drop_synfin option is enabled, drop all
1236 * segments with both the SYN and FIN bits set.
1237 * This prevents e.g. nmap from identifying the
1238 * TCP/IP stack.
1239 * XXX: Poor reasoning. nmap has other methods
1240 * and is constantly refining its stack detection
1241 * strategies.
1242 * XXX: This is a violation of the TCP specification
1243 * and was used by RFC1644.
1244 */
1245 if ((thflags & TH_FIN) && V_drop_synfin) {
1246 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1247 log(LOG_DEBUG, "%s; %s: Listen socket: "
1248 "SYN|FIN segment ignored (based on "
1249 "sysctl setting)\n", s, __func__);
1250 TCPSTAT_INC(tcps_badsyn);
1251 goto dropunlock;
1252 }
1253 /*
1254 * Segment's flags are (SYN) or (SYN|FIN).
1255 *
1256 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1257 * as they do not affect the state of the TCP FSM.
1258 * The data pointed to by TH_URG and th_urp is ignored.
1259 */
1260 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1261 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1262 KASSERT(thflags & (TH_SYN),
1263 ("%s: Listen socket: TH_SYN not set", __func__));
1264 #ifdef INET6
1265 /*
1266 * If deprecated address is forbidden,
1267 * we do not accept SYN to deprecated interface
1268 * address to prevent any new inbound connection from
1269 * getting established.
1270 * When we do not accept SYN, we send a TCP RST,
1271 * with deprecated source address (instead of dropping
1272 * it). We compromise it as it is much better for peer
1273 * to send a RST, and RST will be the final packet
1274 * for the exchange.
1275 *
1276 * If we do not forbid deprecated addresses, we accept
1277 * the SYN packet. RFC2462 does not suggest dropping
1278 * SYN in this case.
1279 * If we decipher RFC2462 5.5.4, it says like this:
1280 * 1. use of deprecated addr with existing
1281 * communication is okay - "SHOULD continue to be
1282 * used"
1283 * 2. use of it with new communication:
1284 * (2a) "SHOULD NOT be used if alternate address
1285 * with sufficient scope is available"
1286 * (2b) nothing mentioned otherwise.
1287 * Here we fall into (2b) case as we have no choice in
1288 * our source address selection - we must obey the peer.
1289 *
1290 * The wording in RFC2462 is confusing, and there are
1291 * multiple description text for deprecated address
1292 * handling - worse, they are not exactly the same.
1293 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1294 */
1295 if (isipv6 && !V_ip6_use_deprecated) {
1296 struct in6_ifaddr *ia6;
1297
1298 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
1299 if (ia6 != NULL &&
1300 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1301 ifa_free(&ia6->ia_ifa);
1302 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1303 log(LOG_DEBUG, "%s; %s: Listen socket: "
1304 "Connection attempt to deprecated "
1305 "IPv6 address rejected\n",
1306 s, __func__);
1307 rstreason = BANDLIM_RST_OPENPORT;
1308 goto dropwithreset;
1309 }
1310 if (ia6)
1311 ifa_free(&ia6->ia_ifa);
1312 }
1313 #endif /* INET6 */
1314 /*
1315 * Basic sanity checks on incoming SYN requests:
1316 * Don't respond if the destination is a link layer
1317 * broadcast according to RFC1122 4.2.3.10, p. 104.
1318 * If it is from this socket it must be forged.
1319 * Don't respond if the source or destination is a
1320 * global or subnet broad- or multicast address.
1321 * Note that it is quite possible to receive unicast
1322 * link-layer packets with a broadcast IP address. Use
1323 * in_broadcast() to find them.
1324 */
1325 if (m->m_flags & (M_BCAST|M_MCAST)) {
1326 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1327 log(LOG_DEBUG, "%s; %s: Listen socket: "
1328 "Connection attempt from broad- or multicast "
1329 "link layer address ignored\n", s, __func__);
1330 goto dropunlock;
1331 }
1332 #ifdef INET6
1333 if (isipv6) {
1334 if (th->th_dport == th->th_sport &&
1335 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1336 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1337 log(LOG_DEBUG, "%s; %s: Listen socket: "
1338 "Connection attempt to/from self "
1339 "ignored\n", s, __func__);
1340 goto dropunlock;
1341 }
1342 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1343 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1344 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1345 log(LOG_DEBUG, "%s; %s: Listen socket: "
1346 "Connection attempt from/to multicast "
1347 "address ignored\n", s, __func__);
1348 goto dropunlock;
1349 }
1350 }
1351 #endif
1352 #if defined(INET) && defined(INET6)
1353 else
1354 #endif
1355 #ifdef INET
1356 {
1357 if (th->th_dport == th->th_sport &&
1358 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1359 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1360 log(LOG_DEBUG, "%s; %s: Listen socket: "
1361 "Connection attempt from/to self "
1362 "ignored\n", s, __func__);
1363 goto dropunlock;
1364 }
1365 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1366 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1367 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1368 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1369 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1370 log(LOG_DEBUG, "%s; %s: Listen socket: "
1371 "Connection attempt from/to broad- "
1372 "or multicast address ignored\n",
1373 s, __func__);
1374 goto dropunlock;
1375 }
1376 }
1377 #endif
1378 /*
1379 * SYN appears to be valid. Create compressed TCP state
1380 * for syncache.
1381 */
1382 #ifdef TCPDEBUG
1383 if (so->so_options & SO_DEBUG)
1384 tcp_trace(TA_INPUT, ostate, tp,
1385 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1386 #endif
1387 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *));
1388 tcp_dooptions(&to, optp, optlen, TO_SYN);
1389 #ifdef TCP_RFC7413
1390 if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL))
1391 goto new_tfo_socket;
1392 #else
1393 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL);
1394 #endif
1395 /*
1396 * Entry added to syncache and mbuf consumed.
1397 * Only the listen socket is unlocked by syncache_add().
1398 */
1399 if (ti_locked == TI_RLOCKED) {
1400 INP_INFO_RUNLOCK(&V_tcbinfo);
1401 ti_locked = TI_UNLOCKED;
1402 }
1403 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1404 return (IPPROTO_DONE);
1405 } else if (tp->t_state == TCPS_LISTEN) {
1406 /*
1407 * When a listen socket is torn down the SO_ACCEPTCONN
1408 * flag is removed first while connections are drained
1409 * from the accept queue in a unlock/lock cycle of the
1410 * ACCEPT_LOCK, opening a race condition allowing a SYN
1411 * attempt go through unhandled.
1412 */
1413 goto dropunlock;
1414 }
1415
1416 #ifdef TCP_SIGNATURE
1417 if (sig_checked == 0) {
1418 tcp_dooptions(&to, optp, optlen,
1419 (thflags & TH_SYN) ? TO_SYN : 0);
1420 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to,
1421 th, tp->t_flags)) {
1422
1423 /*
1424 * In SYN_SENT state if it receives an RST, it is
1425 * allowed for further processing.
1426 */
1427 if ((thflags & TH_RST) == 0 ||
1428 (tp->t_state == TCPS_SYN_SENT) == 0)
1429 goto dropunlock;
1430 }
1431 sig_checked = 1;
1432 }
1433 #endif
1434
1435 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th);
1436
1437 /*
1438 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1439 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1440 * the inpcb, and unlocks pcbinfo.
1441 */
1442 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1443 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1444 return (IPPROTO_DONE);
1445
1446 dropwithreset:
1447 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th);
1448
1449 if (ti_locked == TI_RLOCKED) {
1450 INP_INFO_RUNLOCK(&V_tcbinfo);
1451 ti_locked = TI_UNLOCKED;
1452 }
1453 #ifdef INVARIANTS
1454 else {
1455 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1456 "ti_locked: %d", __func__, ti_locked));
1457 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1458 }
1459 #endif
1460
1461 if (inp != NULL) {
1462 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1463 INP_WUNLOCK(inp);
1464 } else
1465 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1466 m = NULL; /* mbuf chain got consumed. */
1467 goto drop;
1468
1469 dropunlock:
1470 if (m != NULL)
1471 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th);
1472
1473 if (ti_locked == TI_RLOCKED) {
1474 INP_INFO_RUNLOCK(&V_tcbinfo);
1475 ti_locked = TI_UNLOCKED;
1476 }
1477 #ifdef INVARIANTS
1478 else {
1479 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1480 "ti_locked: %d", __func__, ti_locked));
1481 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1482 }
1483 #endif
1484
1485 if (inp != NULL)
1486 INP_WUNLOCK(inp);
1487
1488 drop:
1489 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1490 if (s != NULL)
1491 free(s, M_TCPLOG);
1492 if (m != NULL)
1493 m_freem(m);
1494 return (IPPROTO_DONE);
1495 }
1496
1497 void
1498 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1499 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1500 int ti_locked)
1501 {
1502 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed;
1503 int rstreason, todrop, win;
1504 u_long tiwin;
1505 char *s;
1506 struct in_conninfo *inc;
1507 struct mbuf *mfree;
1508 struct tcpopt to;
1509 int tfo_syn;
1510
1511 #ifdef TCPDEBUG
1512 /*
1513 * The size of tcp_saveipgen must be the size of the max ip header,
1514 * now IPv6.
1515 */
1516 u_char tcp_saveipgen[IP6_HDR_LEN];
1517 struct tcphdr tcp_savetcp;
1518 short ostate = 0;
1519 #endif
1520 thflags = th->th_flags;
1521 inc = &tp->t_inpcb->inp_inc;
1522 tp->sackhint.last_sack_ack = 0;
1523 sack_changed = 0;
1524
1525 /*
1526 * If this is either a state-changing packet or current state isn't
1527 * established, we require a write lock on tcbinfo. Otherwise, we
1528 * allow the tcbinfo to be in either alocked or unlocked, as the
1529 * caller may have unnecessarily acquired a write lock due to a race.
1530 */
1531 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1532 tp->t_state != TCPS_ESTABLISHED) {
1533 KASSERT(ti_locked == TI_RLOCKED, ("%s ti_locked %d for "
1534 "SYN/FIN/RST/!EST", __func__, ti_locked));
1535 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1536 } else {
1537 #ifdef INVARIANTS
1538 if (ti_locked == TI_RLOCKED)
1539 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1540 else {
1541 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1542 "ti_locked: %d", __func__, ti_locked));
1543 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1544 }
1545 #endif
1546 }
1547 INP_WLOCK_ASSERT(tp->t_inpcb);
1548 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1549 __func__));
1550 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1551 __func__));
1552
1553 #ifdef TCPPCAP
1554 /* Save segment, if requested. */
1555 tcp_pcap_add(th, m, &(tp->t_inpkts));
1556 #endif
1557
1558 /*
1559 * Segment received on connection.
1560 * Reset idle time and keep-alive timer.
1561 * XXX: This should be done after segment
1562 * validation to ignore broken/spoofed segs.
1563 */
1564 tp->t_rcvtime = ticks;
1565 if (TCPS_HAVEESTABLISHED(tp->t_state))
1566 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
1567
1568 /*
1569 * Scale up the window into a 32-bit value.
1570 * For the SYN_SENT state the scale is zero.
1571 */
1572 tiwin = th->th_win << tp->snd_scale;
1573
1574 /*
1575 * TCP ECN processing.
1576 */
1577 if (tp->t_flags & TF_ECN_PERMIT) {
1578 if (thflags & TH_CWR)
1579 tp->t_flags &= ~TF_ECN_SND_ECE;
1580 switch (iptos & IPTOS_ECN_MASK) {
1581 case IPTOS_ECN_CE:
1582 tp->t_flags |= TF_ECN_SND_ECE;
1583 TCPSTAT_INC(tcps_ecn_ce);
1584 break;
1585 case IPTOS_ECN_ECT0:
1586 TCPSTAT_INC(tcps_ecn_ect0);
1587 break;
1588 case IPTOS_ECN_ECT1:
1589 TCPSTAT_INC(tcps_ecn_ect1);
1590 break;
1591 }
1592
1593 /* Process a packet differently from RFC3168. */
1594 cc_ecnpkt_handler(tp, th, iptos);
1595
1596 /* Congestion experienced. */
1597 if (thflags & TH_ECE) {
1598 cc_cong_signal(tp, th, CC_ECN);
1599 }
1600 }
1601
1602 /*
1603 * Parse options on any incoming segment.
1604 */
1605 tcp_dooptions(&to, (u_char *)(th + 1),
1606 (th->th_off << 2) - sizeof(struct tcphdr),
1607 (thflags & TH_SYN) ? TO_SYN : 0);
1608
1609 /*
1610 * If echoed timestamp is later than the current time,
1611 * fall back to non RFC1323 RTT calculation. Normalize
1612 * timestamp if syncookies were used when this connection
1613 * was established.
1614 */
1615 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1616 to.to_tsecr -= tp->ts_offset;
1617 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1618 to.to_tsecr = 0;
1619 }
1620 /*
1621 * If timestamps were negotiated during SYN/ACK they should
1622 * appear on every segment during this session and vice versa.
1623 */
1624 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1625 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1626 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1627 "no action\n", s, __func__);
1628 free(s, M_TCPLOG);
1629 }
1630 }
1631 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1632 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1633 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1634 "no action\n", s, __func__);
1635 free(s, M_TCPLOG);
1636 }
1637 }
1638
1639 /*
1640 * Process options only when we get SYN/ACK back. The SYN case
1641 * for incoming connections is handled in tcp_syncache.
1642 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1643 * or <SYN,ACK>) segment itself is never scaled.
1644 * XXX this is traditional behavior, may need to be cleaned up.
1645 */
1646 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1647 if ((to.to_flags & TOF_SCALE) &&
1648 (tp->t_flags & TF_REQ_SCALE)) {
1649 tp->t_flags |= TF_RCVD_SCALE;
1650 tp->snd_scale = to.to_wscale;
1651 }
1652 /*
1653 * Initial send window. It will be updated with
1654 * the next incoming segment to the scaled value.
1655 */
1656 tp->snd_wnd = th->th_win;
1657 if (to.to_flags & TOF_TS) {
1658 tp->t_flags |= TF_RCVD_TSTMP;
1659 tp->ts_recent = to.to_tsval;
1660 tp->ts_recent_age = tcp_ts_getticks();
1661 }
1662 if (to.to_flags & TOF_MSS)
1663 tcp_mss(tp, to.to_mss);
1664 if ((tp->t_flags & TF_SACK_PERMIT) &&
1665 (to.to_flags & TOF_SACKPERM) == 0)
1666 tp->t_flags &= ~TF_SACK_PERMIT;
1667 }
1668
1669 /*
1670 * Header prediction: check for the two common cases
1671 * of a uni-directional data xfer. If the packet has
1672 * no control flags, is in-sequence, the window didn't
1673 * change and we're not retransmitting, it's a
1674 * candidate. If the length is zero and the ack moved
1675 * forward, we're the sender side of the xfer. Just
1676 * free the data acked & wake any higher level process
1677 * that was blocked waiting for space. If the length
1678 * is non-zero and the ack didn't move, we're the
1679 * receiver side. If we're getting packets in-order
1680 * (the reassembly queue is empty), add the data to
1681 * the socket buffer and note that we need a delayed ack.
1682 * Make sure that the hidden state-flags are also off.
1683 * Since we check for TCPS_ESTABLISHED first, it can only
1684 * be TH_NEEDSYN.
1685 */
1686 if (tp->t_state == TCPS_ESTABLISHED &&
1687 th->th_seq == tp->rcv_nxt &&
1688 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1689 tp->snd_nxt == tp->snd_max &&
1690 tiwin && tiwin == tp->snd_wnd &&
1691 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1692 LIST_EMPTY(&tp->t_segq) &&
1693 ((to.to_flags & TOF_TS) == 0 ||
1694 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1695
1696 /*
1697 * If last ACK falls within this segment's sequence numbers,
1698 * record the timestamp.
1699 * NOTE that the test is modified according to the latest
1700 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1701 */
1702 if ((to.to_flags & TOF_TS) != 0 &&
1703 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1704 tp->ts_recent_age = tcp_ts_getticks();
1705 tp->ts_recent = to.to_tsval;
1706 }
1707
1708 if (tlen == 0) {
1709 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1710 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1711 !IN_RECOVERY(tp->t_flags) &&
1712 (to.to_flags & TOF_SACK) == 0 &&
1713 TAILQ_EMPTY(&tp->snd_holes)) {
1714 /*
1715 * This is a pure ack for outstanding data.
1716 */
1717 if (ti_locked == TI_RLOCKED)
1718 INP_INFO_RUNLOCK(&V_tcbinfo);
1719 ti_locked = TI_UNLOCKED;
1720
1721 TCPSTAT_INC(tcps_predack);
1722
1723 /*
1724 * "bad retransmit" recovery.
1725 */
1726 if (tp->t_rxtshift == 1 &&
1727 tp->t_flags & TF_PREVVALID &&
1728 (int)(ticks - tp->t_badrxtwin) < 0) {
1729 cc_cong_signal(tp, th, CC_RTO_ERR);
1730 }
1731
1732 /*
1733 * Recalculate the transmit timer / rtt.
1734 *
1735 * Some boxes send broken timestamp replies
1736 * during the SYN+ACK phase, ignore
1737 * timestamps of 0 or we could calculate a
1738 * huge RTT and blow up the retransmit timer.
1739 */
1740 if ((to.to_flags & TOF_TS) != 0 &&
1741 to.to_tsecr) {
1742 u_int t;
1743
1744 t = tcp_ts_getticks() - to.to_tsecr;
1745 if (!tp->t_rttlow || tp->t_rttlow > t)
1746 tp->t_rttlow = t;
1747 tcp_xmit_timer(tp,
1748 TCP_TS_TO_TICKS(t) + 1);
1749 } else if (tp->t_rtttime &&
1750 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1751 if (!tp->t_rttlow ||
1752 tp->t_rttlow > ticks - tp->t_rtttime)
1753 tp->t_rttlow = ticks - tp->t_rtttime;
1754 tcp_xmit_timer(tp,
1755 ticks - tp->t_rtttime);
1756 }
1757 acked = BYTES_THIS_ACK(tp, th);
1758
1759 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1760 hhook_run_tcp_est_in(tp, th, &to);
1761
1762 TCPSTAT_INC(tcps_rcvackpack);
1763 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1764 sbdrop(&so->so_snd, acked);
1765 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1766 SEQ_LEQ(th->th_ack, tp->snd_recover))
1767 tp->snd_recover = th->th_ack - 1;
1768
1769 /*
1770 * Let the congestion control algorithm update
1771 * congestion control related information. This
1772 * typically means increasing the congestion
1773 * window.
1774 */
1775 cc_ack_received(tp, th, CC_ACK);
1776
1777 tp->snd_una = th->th_ack;
1778 /*
1779 * Pull snd_wl2 up to prevent seq wrap relative
1780 * to th_ack.
1781 */
1782 tp->snd_wl2 = th->th_ack;
1783 tp->t_dupacks = 0;
1784 m_freem(m);
1785
1786 /*
1787 * If all outstanding data are acked, stop
1788 * retransmit timer, otherwise restart timer
1789 * using current (possibly backed-off) value.
1790 * If process is waiting for space,
1791 * wakeup/selwakeup/signal. If data
1792 * are ready to send, let tcp_output
1793 * decide between more output or persist.
1794 */
1795 #ifdef TCPDEBUG
1796 if (so->so_options & SO_DEBUG)
1797 tcp_trace(TA_INPUT, ostate, tp,
1798 (void *)tcp_saveipgen,
1799 &tcp_savetcp, 0);
1800 #endif
1801 TCP_PROBE3(debug__input, tp, th,
1802 mtod(m, const char *));
1803 if (tp->snd_una == tp->snd_max)
1804 tcp_timer_activate(tp, TT_REXMT, 0);
1805 else if (!tcp_timer_active(tp, TT_PERSIST))
1806 tcp_timer_activate(tp, TT_REXMT,
1807 tp->t_rxtcur);
1808 sowwakeup(so);
1809 if (sbavail(&so->so_snd))
1810 (void) tp->t_fb->tfb_tcp_output(tp);
1811 goto check_delack;
1812 }
1813 } else if (th->th_ack == tp->snd_una &&
1814 tlen <= sbspace(&so->so_rcv)) {
1815 int newsize = 0; /* automatic sockbuf scaling */
1816
1817 /*
1818 * This is a pure, in-sequence data packet with
1819 * nothing on the reassembly queue and we have enough
1820 * buffer space to take it.
1821 */
1822 if (ti_locked == TI_RLOCKED)
1823 INP_INFO_RUNLOCK(&V_tcbinfo);
1824 ti_locked = TI_UNLOCKED;
1825
1826 /* Clean receiver SACK report if present */
1827 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1828 tcp_clean_sackreport(tp);
1829 TCPSTAT_INC(tcps_preddat);
1830 tp->rcv_nxt += tlen;
1831 /*
1832 * Pull snd_wl1 up to prevent seq wrap relative to
1833 * th_seq.
1834 */
1835 tp->snd_wl1 = th->th_seq;
1836 /*
1837 * Pull rcv_up up to prevent seq wrap relative to
1838 * rcv_nxt.
1839 */
1840 tp->rcv_up = tp->rcv_nxt;
1841 TCPSTAT_INC(tcps_rcvpack);
1842 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1843 #ifdef TCPDEBUG
1844 if (so->so_options & SO_DEBUG)
1845 tcp_trace(TA_INPUT, ostate, tp,
1846 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1847 #endif
1848 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *));
1849
1850 /*
1851 * Automatic sizing of receive socket buffer. Often the send
1852 * buffer size is not optimally adjusted to the actual network
1853 * conditions at hand (delay bandwidth product). Setting the
1854 * buffer size too small limits throughput on links with high
1855 * bandwidth and high delay (eg. trans-continental/oceanic links).
1856 *
1857 * On the receive side the socket buffer memory is only rarely
1858 * used to any significant extent. This allows us to be much
1859 * more aggressive in scaling the receive socket buffer. For
1860 * the case that the buffer space is actually used to a large
1861 * extent and we run out of kernel memory we can simply drop
1862 * the new segments; TCP on the sender will just retransmit it
1863 * later. Setting the buffer size too big may only consume too
1864 * much kernel memory if the application doesn't read() from
1865 * the socket or packet loss or reordering makes use of the
1866 * reassembly queue.
1867 *
1868 * The criteria to step up the receive buffer one notch are:
1869 * 1. Application has not set receive buffer size with
1870 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE.
1871 * 2. the number of bytes received during the time it takes
1872 * one timestamp to be reflected back to us (the RTT);
1873 * 3. received bytes per RTT is within seven eighth of the
1874 * current socket buffer size;
1875 * 4. receive buffer size has not hit maximal automatic size;
1876 *
1877 * This algorithm does one step per RTT at most and only if
1878 * we receive a bulk stream w/o packet losses or reorderings.
1879 * Shrinking the buffer during idle times is not necessary as
1880 * it doesn't consume any memory when idle.
1881 *
1882 * TODO: Only step up if the application is actually serving
1883 * the buffer to better manage the socket buffer resources.
1884 */
1885 if (V_tcp_do_autorcvbuf &&
1886 (to.to_flags & TOF_TS) &&
1887 to.to_tsecr &&
1888 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1889 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) &&
1890 to.to_tsecr - tp->rfbuf_ts < hz) {
1891 if (tp->rfbuf_cnt >
1892 (so->so_rcv.sb_hiwat / 8 * 7) &&
1893 so->so_rcv.sb_hiwat <
1894 V_tcp_autorcvbuf_max) {
1895 newsize =
1896 min(so->so_rcv.sb_hiwat +
1897 V_tcp_autorcvbuf_inc,
1898 V_tcp_autorcvbuf_max);
1899 }
1900 /* Start over with next RTT. */
1901 tp->rfbuf_ts = 0;
1902 tp->rfbuf_cnt = 0;
1903 } else
1904 tp->rfbuf_cnt += tlen; /* add up */
1905 }
1906
1907 /* Add data to socket buffer. */
1908 SOCKBUF_LOCK(&so->so_rcv);
1909 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1910 m_freem(m);
1911 } else {
1912 /*
1913 * Set new socket buffer size.
1914 * Give up when limit is reached.
1915 */
1916 if (newsize)
1917 if (!sbreserve_locked(&so->so_rcv,
1918 newsize, so, NULL))
1919 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1920 m_adj(m, drop_hdrlen); /* delayed header drop */
1921 sbappendstream_locked(&so->so_rcv, m, 0);
1922 }
1923 /* NB: sorwakeup_locked() does an implicit unlock. */
1924 sorwakeup_locked(so);
1925 if (DELAY_ACK(tp, tlen)) {
1926 tp->t_flags |= TF_DELACK;
1927 } else {
1928 tp->t_flags |= TF_ACKNOW;
1929 tp->t_fb->tfb_tcp_output(tp);
1930 }
1931 goto check_delack;
1932 }
1933 }
1934
1935 /*
1936 * Calculate amount of space in receive window,
1937 * and then do TCP input processing.
1938 * Receive window is amount of space in rcv queue,
1939 * but not less than advertised window.
1940 */
1941 win = sbspace(&so->so_rcv);
1942 if (win < 0)
1943 win = 0;
1944 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1945
1946 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1947 tp->rfbuf_ts = 0;
1948 tp->rfbuf_cnt = 0;
1949
1950 switch (tp->t_state) {
1951
1952 /*
1953 * If the state is SYN_RECEIVED:
1954 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1955 */
1956 case TCPS_SYN_RECEIVED:
1957 if ((thflags & TH_ACK) &&
1958 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1959 SEQ_GT(th->th_ack, tp->snd_max))) {
1960 rstreason = BANDLIM_RST_OPENPORT;
1961 goto dropwithreset;
1962 }
1963 #ifdef TCP_RFC7413
1964 if (tp->t_flags & TF_FASTOPEN) {
1965 /*
1966 * When a TFO connection is in SYN_RECEIVED, the
1967 * only valid packets are the initial SYN, a
1968 * retransmit/copy of the initial SYN (possibly with
1969 * a subset of the original data), a valid ACK, a
1970 * FIN, or a RST.
1971 */
1972 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
1973 rstreason = BANDLIM_RST_OPENPORT;
1974 goto dropwithreset;
1975 } else if (thflags & TH_SYN) {
1976 /* non-initial SYN is ignored */
1977 if ((tcp_timer_active(tp, TT_DELACK) ||
1978 tcp_timer_active(tp, TT_REXMT)))
1979 goto drop;
1980 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) {
1981 goto drop;
1982 }
1983 }
1984 #endif
1985 break;
1986
1987 /*
1988 * If the state is SYN_SENT:
1989 * if seg contains an ACK, but not for our SYN, drop the input.
1990 * if seg contains a RST, then drop the connection.
1991 * if seg does not contain SYN, then drop it.
1992 * Otherwise this is an acceptable SYN segment
1993 * initialize tp->rcv_nxt and tp->irs
1994 * if seg contains ack then advance tp->snd_una
1995 * if seg contains an ECE and ECN support is enabled, the stream
1996 * is ECN capable.
1997 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1998 * arrange for segment to be acked (eventually)
1999 * continue processing rest of data/controls, beginning with URG
2000 */
2001 case TCPS_SYN_SENT:
2002 if ((thflags & TH_ACK) &&
2003 (SEQ_LEQ(th->th_ack, tp->iss) ||
2004 SEQ_GT(th->th_ack, tp->snd_max))) {
2005 rstreason = BANDLIM_UNLIMITED;
2006 goto dropwithreset;
2007 }
2008 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
2009 TCP_PROBE5(connect__refused, NULL, tp,
2010 mtod(m, const char *), tp, th);
2011 tp = tcp_drop(tp, ECONNREFUSED);
2012 }
2013 if (thflags & TH_RST)
2014 goto drop;
2015 if (!(thflags & TH_SYN))
2016 goto drop;
2017
2018 tp->irs = th->th_seq;
2019 tcp_rcvseqinit(tp);
2020 if (thflags & TH_ACK) {
2021 TCPSTAT_INC(tcps_connects);
2022 soisconnected(so);
2023 #ifdef MAC
2024 mac_socketpeer_set_from_mbuf(m, so);
2025 #endif
2026 /* Do window scaling on this connection? */
2027 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2028 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2029 tp->rcv_scale = tp->request_r_scale;
2030 }
2031 tp->rcv_adv += imin(tp->rcv_wnd,
2032 TCP_MAXWIN << tp->rcv_scale);
2033 tp->snd_una++; /* SYN is acked */
2034 /*
2035 * If there's data, delay ACK; if there's also a FIN
2036 * ACKNOW will be turned on later.
2037 */
2038 if (DELAY_ACK(tp, tlen) && tlen != 0)
2039 tcp_timer_activate(tp, TT_DELACK,
2040 tcp_delacktime);
2041 else
2042 tp->t_flags |= TF_ACKNOW;
2043
2044 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
2045 tp->t_flags |= TF_ECN_PERMIT;
2046 TCPSTAT_INC(tcps_ecn_shs);
2047 }
2048
2049 /*
2050 * Received <SYN,ACK> in SYN_SENT[*] state.
2051 * Transitions:
2052 * SYN_SENT --> ESTABLISHED
2053 * SYN_SENT* --> FIN_WAIT_1
2054 */
2055 tp->t_starttime = ticks;
2056 if (tp->t_flags & TF_NEEDFIN) {
2057 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2058 tp->t_flags &= ~TF_NEEDFIN;
2059 thflags &= ~TH_SYN;
2060 } else {
2061 tcp_state_change(tp, TCPS_ESTABLISHED);
2062 TCP_PROBE5(connect__established, NULL, tp,
2063 mtod(m, const char *), tp, th);
2064 cc_conn_init(tp);
2065 tcp_timer_activate(tp, TT_KEEP,
2066 TP_KEEPIDLE(tp));
2067 }
2068 } else {
2069 /*
2070 * Received initial SYN in SYN-SENT[*] state =>
2071 * simultaneous open.
2072 * If it succeeds, connection is * half-synchronized.
2073 * Otherwise, do 3-way handshake:
2074 * SYN-SENT -> SYN-RECEIVED
2075 * SYN-SENT* -> SYN-RECEIVED*
2076 */
2077 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
2078 tcp_timer_activate(tp, TT_REXMT, 0);
2079 tcp_state_change(tp, TCPS_SYN_RECEIVED);
2080 }
2081
2082 KASSERT(ti_locked == TI_RLOCKED, ("%s: trimthenstep6: "
2083 "ti_locked %d", __func__, ti_locked));
2084 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2085 INP_WLOCK_ASSERT(tp->t_inpcb);
2086
2087 /*
2088 * Advance th->th_seq to correspond to first data byte.
2089 * If data, trim to stay within window,
2090 * dropping FIN if necessary.
2091 */
2092 th->th_seq++;
2093 if (tlen > tp->rcv_wnd) {
2094 todrop = tlen - tp->rcv_wnd;
2095 m_adj(m, -todrop);
2096 tlen = tp->rcv_wnd;
2097 thflags &= ~TH_FIN;
2098 TCPSTAT_INC(tcps_rcvpackafterwin);
2099 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2100 }
2101 tp->snd_wl1 = th->th_seq - 1;
2102 tp->rcv_up = th->th_seq;
2103 /*
2104 * Client side of transaction: already sent SYN and data.
2105 * If the remote host used T/TCP to validate the SYN,
2106 * our data will be ACK'd; if so, enter normal data segment
2107 * processing in the middle of step 5, ack processing.
2108 * Otherwise, goto step 6.
2109 */
2110 if (thflags & TH_ACK)
2111 goto process_ACK;
2112
2113 goto step6;
2114
2115 /*
2116 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2117 * do normal processing.
2118 *
2119 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2120 */
2121 case TCPS_LAST_ACK:
2122 case TCPS_CLOSING:
2123 break; /* continue normal processing */
2124 }
2125
2126 /*
2127 * States other than LISTEN or SYN_SENT.
2128 * First check the RST flag and sequence number since reset segments
2129 * are exempt from the timestamp and connection count tests. This
2130 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2131 * below which allowed reset segments in half the sequence space
2132 * to fall though and be processed (which gives forged reset
2133 * segments with a random sequence number a 50 percent chance of
2134 * killing a connection).
2135 * Then check timestamp, if present.
2136 * Then check the connection count, if present.
2137 * Then check that at least some bytes of segment are within
2138 * receive window. If segment begins before rcv_nxt,
2139 * drop leading data (and SYN); if nothing left, just ack.
2140 */
2141 if (thflags & TH_RST) {
2142 /*
2143 * RFC5961 Section 3.2
2144 *
2145 * - RST drops connection only if SEG.SEQ == RCV.NXT.
2146 * - If RST is in window, we send challenge ACK.
2147 *
2148 * Note: to take into account delayed ACKs, we should
2149 * test against last_ack_sent instead of rcv_nxt.
2150 * Note 2: we handle special case of closed window, not
2151 * covered by the RFC.
2152 */
2153 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2154 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
2155 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
2156
2157 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2158 KASSERT(ti_locked == TI_RLOCKED,
2159 ("%s: TH_RST ti_locked %d, th %p tp %p",
2160 __func__, ti_locked, th, tp));
2161 KASSERT(tp->t_state != TCPS_SYN_SENT,
2162 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
2163 __func__, th, tp));
2164
2165 if (V_tcp_insecure_rst ||
2166 tp->last_ack_sent == th->th_seq) {
2167 TCPSTAT_INC(tcps_drops);
2168 /* Drop the connection. */
2169 switch (tp->t_state) {
2170 case TCPS_SYN_RECEIVED:
2171 so->so_error = ECONNREFUSED;
2172 goto close;
2173 case TCPS_ESTABLISHED:
2174 case TCPS_FIN_WAIT_1:
2175 case TCPS_FIN_WAIT_2:
2176 case TCPS_CLOSE_WAIT:
2177 so->so_error = ECONNRESET;
2178 close:
2179 tcp_state_change(tp, TCPS_CLOSED);
2180 /* FALLTHROUGH */
2181 default:
2182 tp = tcp_close(tp);
2183 }
2184 } else {
2185 TCPSTAT_INC(tcps_badrst);
2186 /* Send challenge ACK. */
2187 tcp_respond(tp, mtod(m, void *), th, m,
2188 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
2189 tp->last_ack_sent = tp->rcv_nxt;
2190 m = NULL;
2191 }
2192 }
2193 goto drop;
2194 }
2195
2196 /*
2197 * RFC5961 Section 4.2
2198 * Send challenge ACK for any SYN in synchronized state.
2199 */
2200 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT &&
2201 tp->t_state != TCPS_SYN_RECEIVED) {
2202 KASSERT(ti_locked == TI_RLOCKED,
2203 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2204 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2205
2206 TCPSTAT_INC(tcps_badsyn);
2207 if (V_tcp_insecure_syn &&
2208 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2209 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2210 tp = tcp_drop(tp, ECONNRESET);
2211 rstreason = BANDLIM_UNLIMITED;
2212 } else {
2213 /* Send challenge ACK. */
2214 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
2215 tp->snd_nxt, TH_ACK);
2216 tp->last_ack_sent = tp->rcv_nxt;
2217 m = NULL;
2218 }
2219 goto drop;
2220 }
2221
2222 /*
2223 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2224 * and it's less than ts_recent, drop it.
2225 */
2226 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2227 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2228
2229 /* Check to see if ts_recent is over 24 days old. */
2230 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2231 /*
2232 * Invalidate ts_recent. If this segment updates
2233 * ts_recent, the age will be reset later and ts_recent
2234 * will get a valid value. If it does not, setting
2235 * ts_recent to zero will at least satisfy the
2236 * requirement that zero be placed in the timestamp
2237 * echo reply when ts_recent isn't valid. The
2238 * age isn't reset until we get a valid ts_recent
2239 * because we don't want out-of-order segments to be
2240 * dropped when ts_recent is old.
2241 */
2242 tp->ts_recent = 0;
2243 } else {
2244 TCPSTAT_INC(tcps_rcvduppack);
2245 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2246 TCPSTAT_INC(tcps_pawsdrop);
2247 if (tlen)
2248 goto dropafterack;
2249 goto drop;
2250 }
2251 }
2252
2253 /*
2254 * In the SYN-RECEIVED state, validate that the packet belongs to
2255 * this connection before trimming the data to fit the receive
2256 * window. Check the sequence number versus IRS since we know
2257 * the sequence numbers haven't wrapped. This is a partial fix
2258 * for the "LAND" DoS attack.
2259 */
2260 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2261 rstreason = BANDLIM_RST_OPENPORT;
2262 goto dropwithreset;
2263 }
2264
2265 todrop = tp->rcv_nxt - th->th_seq;
2266 if (todrop > 0) {
2267 if (thflags & TH_SYN) {
2268 thflags &= ~TH_SYN;
2269 th->th_seq++;
2270 if (th->th_urp > 1)
2271 th->th_urp--;
2272 else
2273 thflags &= ~TH_URG;
2274 todrop--;
2275 }
2276 /*
2277 * Following if statement from Stevens, vol. 2, p. 960.
2278 */
2279 if (todrop > tlen
2280 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2281 /*
2282 * Any valid FIN must be to the left of the window.
2283 * At this point the FIN must be a duplicate or out
2284 * of sequence; drop it.
2285 */
2286 thflags &= ~TH_FIN;
2287
2288 /*
2289 * Send an ACK to resynchronize and drop any data.
2290 * But keep on processing for RST or ACK.
2291 */
2292 tp->t_flags |= TF_ACKNOW;
2293 todrop = tlen;
2294 TCPSTAT_INC(tcps_rcvduppack);
2295 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2296 } else {
2297 TCPSTAT_INC(tcps_rcvpartduppack);
2298 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2299 }
2300 drop_hdrlen += todrop; /* drop from the top afterwards */
2301 th->th_seq += todrop;
2302 tlen -= todrop;
2303 if (th->th_urp > todrop)
2304 th->th_urp -= todrop;
2305 else {
2306 thflags &= ~TH_URG;
2307 th->th_urp = 0;
2308 }
2309 }
2310
2311 /*
2312 * If new data are received on a connection after the
2313 * user processes are gone, then RST the other end.
2314 */
2315 if ((so->so_state & SS_NOFDREF) &&
2316 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2317 KASSERT(ti_locked == TI_RLOCKED, ("%s: SS_NOFDEREF && "
2318 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2319 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2320
2321 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2322 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2323 "after socket was closed, "
2324 "sending RST and removing tcpcb\n",
2325 s, __func__, tcpstates[tp->t_state], tlen);
2326 free(s, M_TCPLOG);
2327 }
2328 tp = tcp_close(tp);
2329 TCPSTAT_INC(tcps_rcvafterclose);
2330 rstreason = BANDLIM_UNLIMITED;
2331 goto dropwithreset;
2332 }
2333
2334 /*
2335 * If segment ends after window, drop trailing data
2336 * (and PUSH and FIN); if nothing left, just ACK.
2337 */
2338 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2339 if (todrop > 0) {
2340 TCPSTAT_INC(tcps_rcvpackafterwin);
2341 if (todrop >= tlen) {
2342 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2343 /*
2344 * If window is closed can only take segments at
2345 * window edge, and have to drop data and PUSH from
2346 * incoming segments. Continue processing, but
2347 * remember to ack. Otherwise, drop segment
2348 * and ack.
2349 */
2350 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2351 tp->t_flags |= TF_ACKNOW;
2352 TCPSTAT_INC(tcps_rcvwinprobe);
2353 } else
2354 goto dropafterack;
2355 } else
2356 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2357 m_adj(m, -todrop);
2358 tlen -= todrop;
2359 thflags &= ~(TH_PUSH|TH_FIN);
2360 }
2361
2362 /*
2363 * If last ACK falls within this segment's sequence numbers,
2364 * record its timestamp.
2365 * NOTE:
2366 * 1) That the test incorporates suggestions from the latest
2367 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2368 * 2) That updating only on newer timestamps interferes with
2369 * our earlier PAWS tests, so this check should be solely
2370 * predicated on the sequence space of this segment.
2371 * 3) That we modify the segment boundary check to be
2372 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2373 * instead of RFC1323's
2374 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2375 * This modified check allows us to overcome RFC1323's
2376 * limitations as described in Stevens TCP/IP Illustrated
2377 * Vol. 2 p.869. In such cases, we can still calculate the
2378 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2379 */
2380 if ((to.to_flags & TOF_TS) != 0 &&
2381 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2382 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2383 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2384 tp->ts_recent_age = tcp_ts_getticks();
2385 tp->ts_recent = to.to_tsval;
2386 }
2387
2388 /*
2389 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2390 * flag is on (half-synchronized state), then queue data for
2391 * later processing; else drop segment and return.
2392 */
2393 if ((thflags & TH_ACK) == 0) {
2394 if (tp->t_state == TCPS_SYN_RECEIVED ||
2395 (tp->t_flags & TF_NEEDSYN)) {
2396 #ifdef TCP_RFC7413
2397 if (tp->t_state == TCPS_SYN_RECEIVED &&
2398 tp->t_flags & TF_FASTOPEN) {
2399 tp->snd_wnd = tiwin;
2400 cc_conn_init(tp);
2401 }
2402 #endif
2403 goto step6;
2404 } else if (tp->t_flags & TF_ACKNOW)
2405 goto dropafterack;
2406 else
2407 goto drop;
2408 }
2409
2410 /*
2411 * Ack processing.
2412 */
2413 switch (tp->t_state) {
2414
2415 /*
2416 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2417 * ESTABLISHED state and continue processing.
2418 * The ACK was checked above.
2419 */
2420 case TCPS_SYN_RECEIVED:
2421
2422 TCPSTAT_INC(tcps_connects);
2423 soisconnected(so);
2424 /* Do window scaling? */
2425 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2426 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2427 tp->rcv_scale = tp->request_r_scale;
2428 tp->snd_wnd = tiwin;
2429 }
2430 /*
2431 * Make transitions:
2432 * SYN-RECEIVED -> ESTABLISHED
2433 * SYN-RECEIVED* -> FIN-WAIT-1
2434 */
2435 tp->t_starttime = ticks;
2436 if (tp->t_flags & TF_NEEDFIN) {
2437 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2438 tp->t_flags &= ~TF_NEEDFIN;
2439 } else {
2440 tcp_state_change(tp, TCPS_ESTABLISHED);
2441 TCP_PROBE5(accept__established, NULL, tp,
2442 mtod(m, const char *), tp, th);
2443 #ifdef TCP_RFC7413
2444 if (tp->t_tfo_pending) {
2445 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2446 tp->t_tfo_pending = NULL;
2447
2448 /*
2449 * Account for the ACK of our SYN prior to
2450 * regular ACK processing below.
2451 */
2452 tp->snd_una++;
2453 }
2454 /*
2455 * TFO connections call cc_conn_init() during SYN
2456 * processing. Calling it again here for such
2457 * connections is not harmless as it would undo the
2458 * snd_cwnd reduction that occurs when a TFO SYN|ACK
2459 * is retransmitted.
2460 */
2461 if (!(tp->t_flags & TF_FASTOPEN))
2462 #endif
2463 cc_conn_init(tp);
2464 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2465 }
2466 /*
2467 * If segment contains data or ACK, will call tcp_reass()
2468 * later; if not, do so now to pass queued data to user.
2469 */
2470 if (tlen == 0 && (thflags & TH_FIN) == 0)
2471 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2472 (struct mbuf *)0);
2473 tp->snd_wl1 = th->th_seq - 1;
2474 /* FALLTHROUGH */
2475
2476 /*
2477 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2478 * ACKs. If the ack is in the range
2479 * tp->snd_una < th->th_ack <= tp->snd_max
2480 * then advance tp->snd_una to th->th_ack and drop
2481 * data from the retransmission queue. If this ACK reflects
2482 * more up to date window information we update our window information.
2483 */
2484 case TCPS_ESTABLISHED:
2485 case TCPS_FIN_WAIT_1:
2486 case TCPS_FIN_WAIT_2:
2487 case TCPS_CLOSE_WAIT:
2488 case TCPS_CLOSING:
2489 case TCPS_LAST_ACK:
2490 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2491 TCPSTAT_INC(tcps_rcvacktoomuch);
2492 goto dropafterack;
2493 }
2494 if ((tp->t_flags & TF_SACK_PERMIT) &&
2495 ((to.to_flags & TOF_SACK) ||
2496 !TAILQ_EMPTY(&tp->snd_holes)))
2497 sack_changed = tcp_sack_doack(tp, &to, th->th_ack);
2498 else
2499 /*
2500 * Reset the value so that previous (valid) value
2501 * from the last ack with SACK doesn't get used.
2502 */
2503 tp->sackhint.sacked_bytes = 0;
2504
2505 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2506 hhook_run_tcp_est_in(tp, th, &to);
2507
2508 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2509 u_int maxseg;
2510
2511 maxseg = tcp_maxseg(tp);
2512 if (tlen == 0 &&
2513 (tiwin == tp->snd_wnd ||
2514 (tp->t_flags & TF_SACK_PERMIT))) {
2515 /*
2516 * If this is the first time we've seen a
2517 * FIN from the remote, this is not a
2518 * duplicate and it needs to be processed
2519 * normally. This happens during a
2520 * simultaneous close.
2521 */
2522 if ((thflags & TH_FIN) &&
2523 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2524 tp->t_dupacks = 0;
2525 break;
2526 }
2527 TCPSTAT_INC(tcps_rcvdupack);
2528 /*
2529 * If we have outstanding data (other than
2530 * a window probe), this is a completely
2531 * duplicate ack (ie, window info didn't
2532 * change and FIN isn't set),
2533 * the ack is the biggest we've
2534 * seen and we've seen exactly our rexmt
2535 * threshold of them, assume a packet
2536 * has been dropped and retransmit it.
2537 * Kludge snd_nxt & the congestion
2538 * window so we send only this one
2539 * packet.
2540 *
2541 * We know we're losing at the current
2542 * window size so do congestion avoidance
2543 * (set ssthresh to half the current window
2544 * and pull our congestion window back to
2545 * the new ssthresh).
2546 *
2547 * Dup acks mean that packets have left the
2548 * network (they're now cached at the receiver)
2549 * so bump cwnd by the amount in the receiver
2550 * to keep a constant cwnd packets in the
2551 * network.
2552 *
2553 * When using TCP ECN, notify the peer that
2554 * we reduced the cwnd.
2555 */
2556 /*
2557 * Following 2 kinds of acks should not affect
2558 * dupack counting:
2559 * 1) Old acks
2560 * 2) Acks with SACK but without any new SACK
2561 * information in them. These could result from
2562 * any anomaly in the network like a switch
2563 * duplicating packets or a possible DoS attack.
2564 */
2565 if (th->th_ack != tp->snd_una ||
2566 ((tp->t_flags & TF_SACK_PERMIT) &&
2567 !sack_changed))
2568 break;
2569 else if (!tcp_timer_active(tp, TT_REXMT))
2570 tp->t_dupacks = 0;
2571 else if (++tp->t_dupacks > tcprexmtthresh ||
2572 IN_FASTRECOVERY(tp->t_flags)) {
2573 cc_ack_received(tp, th, CC_DUPACK);
2574 if ((tp->t_flags & TF_SACK_PERMIT) &&
2575 IN_FASTRECOVERY(tp->t_flags)) {
2576 int awnd;
2577
2578 /*
2579 * Compute the amount of data in flight first.
2580 * We can inject new data into the pipe iff
2581 * we have less than 1/2 the original window's
2582 * worth of data in flight.
2583 */
2584 if (V_tcp_do_rfc6675_pipe)
2585 awnd = tcp_compute_pipe(tp);
2586 else
2587 awnd = (tp->snd_nxt - tp->snd_fack) +
2588 tp->sackhint.sack_bytes_rexmit;
2589
2590 if (awnd < tp->snd_ssthresh) {
2591 tp->snd_cwnd += maxseg;
2592 if (tp->snd_cwnd > tp->snd_ssthresh)
2593 tp->snd_cwnd = tp->snd_ssthresh;
2594 }
2595 } else
2596 tp->snd_cwnd += maxseg;
2597 (void) tp->t_fb->tfb_tcp_output(tp);
2598 goto drop;
2599 } else if (tp->t_dupacks == tcprexmtthresh) {
2600 tcp_seq onxt = tp->snd_nxt;
2601
2602 /*
2603 * If we're doing sack, check to
2604 * see if we're already in sack
2605 * recovery. If we're not doing sack,
2606 * check to see if we're in newreno
2607 * recovery.
2608 */
2609 if (tp->t_flags & TF_SACK_PERMIT) {
2610 if (IN_FASTRECOVERY(tp->t_flags)) {
2611 tp->t_dupacks = 0;
2612 break;
2613 }
2614 } else {
2615 if (SEQ_LEQ(th->th_ack,
2616 tp->snd_recover)) {
2617 tp->t_dupacks = 0;
2618 break;
2619 }
2620 }
2621 /* Congestion signal before ack. */
2622 cc_cong_signal(tp, th, CC_NDUPACK);
2623 cc_ack_received(tp, th, CC_DUPACK);
2624 tcp_timer_activate(tp, TT_REXMT, 0);
2625 tp->t_rtttime = 0;
2626 if (tp->t_flags & TF_SACK_PERMIT) {
2627 TCPSTAT_INC(
2628 tcps_sack_recovery_episode);
2629 tp->sack_newdata = tp->snd_nxt;
2630 tp->snd_cwnd = maxseg;
2631 (void) tp->t_fb->tfb_tcp_output(tp);
2632 goto drop;
2633 }
2634 tp->snd_nxt = th->th_ack;
2635 tp->snd_cwnd = maxseg;
2636 (void) tp->t_fb->tfb_tcp_output(tp);
2637 KASSERT(tp->snd_limited <= 2,
2638 ("%s: tp->snd_limited too big",
2639 __func__));
2640 tp->snd_cwnd = tp->snd_ssthresh +
2641 maxseg *
2642 (tp->t_dupacks - tp->snd_limited);
2643 if (SEQ_GT(onxt, tp->snd_nxt))
2644 tp->snd_nxt = onxt;
2645 goto drop;
2646 } else if (V_tcp_do_rfc3042) {
2647 /*
2648 * Process first and second duplicate
2649 * ACKs. Each indicates a segment
2650 * leaving the network, creating room
2651 * for more. Make sure we can send a
2652 * packet on reception of each duplicate
2653 * ACK by increasing snd_cwnd by one
2654 * segment. Restore the original
2655 * snd_cwnd after packet transmission.
2656 */
2657 cc_ack_received(tp, th, CC_DUPACK);
2658 u_long oldcwnd = tp->snd_cwnd;
2659 tcp_seq oldsndmax = tp->snd_max;
2660 u_int sent;
2661 int avail;
2662
2663 KASSERT(tp->t_dupacks == 1 ||
2664 tp->t_dupacks == 2,
2665 ("%s: dupacks not 1 or 2",
2666 __func__));
2667 if (tp->t_dupacks == 1)
2668 tp->snd_limited = 0;
2669 tp->snd_cwnd =
2670 (tp->snd_nxt - tp->snd_una) +
2671 (tp->t_dupacks - tp->snd_limited) *
2672 maxseg;
2673 /*
2674 * Only call tcp_output when there
2675 * is new data available to be sent.
2676 * Otherwise we would send pure ACKs.
2677 */
2678 SOCKBUF_LOCK(&so->so_snd);
2679 avail = sbavail(&so->so_snd) -
2680 (tp->snd_nxt - tp->snd_una);
2681 SOCKBUF_UNLOCK(&so->so_snd);
2682 if (avail > 0)
2683 (void) tp->t_fb->tfb_tcp_output(tp);
2684 sent = tp->snd_max - oldsndmax;
2685 if (sent > maxseg) {
2686 KASSERT((tp->t_dupacks == 2 &&
2687 tp->snd_limited == 0) ||
2688 (sent == maxseg + 1 &&
2689 tp->t_flags & TF_SENTFIN),
2690 ("%s: sent too much",
2691 __func__));
2692 tp->snd_limited = 2;
2693 } else if (sent > 0)
2694 ++tp->snd_limited;
2695 tp->snd_cwnd = oldcwnd;
2696 goto drop;
2697 }
2698 }
2699 break;
2700 } else {
2701 /*
2702 * This ack is advancing the left edge, reset the
2703 * counter.
2704 */
2705 tp->t_dupacks = 0;
2706 /*
2707 * If this ack also has new SACK info, increment the
2708 * counter as per rfc6675.
2709 */
2710 if ((tp->t_flags & TF_SACK_PERMIT) && sack_changed)
2711 tp->t_dupacks++;
2712 }
2713
2714 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2715 ("%s: th_ack <= snd_una", __func__));
2716
2717 /*
2718 * If the congestion window was inflated to account
2719 * for the other side's cached packets, retract it.
2720 */
2721 if (IN_FASTRECOVERY(tp->t_flags)) {
2722 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2723 if (tp->t_flags & TF_SACK_PERMIT)
2724 tcp_sack_partialack(tp, th);
2725 else
2726 tcp_newreno_partial_ack(tp, th);
2727 } else
2728 cc_post_recovery(tp, th);
2729 }
2730 /*
2731 * If we reach this point, ACK is not a duplicate,
2732 * i.e., it ACKs something we sent.
2733 */
2734 if (tp->t_flags & TF_NEEDSYN) {
2735 /*
2736 * T/TCP: Connection was half-synchronized, and our
2737 * SYN has been ACK'd (so connection is now fully
2738 * synchronized). Go to non-starred state,
2739 * increment snd_una for ACK of SYN, and check if
2740 * we can do window scaling.
2741 */
2742 tp->t_flags &= ~TF_NEEDSYN;
2743 tp->snd_una++;
2744 /* Do window scaling? */
2745 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2746 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2747 tp->rcv_scale = tp->request_r_scale;
2748 /* Send window already scaled. */
2749 }
2750 }
2751
2752 process_ACK:
2753 INP_WLOCK_ASSERT(tp->t_inpcb);
2754
2755 acked = BYTES_THIS_ACK(tp, th);
2756 KASSERT(acked >= 0, ("%s: acked unexepectedly negative "
2757 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__,
2758 tp->snd_una, th->th_ack, tp, m));
2759 TCPSTAT_INC(tcps_rcvackpack);
2760 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2761
2762 /*
2763 * If we just performed our first retransmit, and the ACK
2764 * arrives within our recovery window, then it was a mistake
2765 * to do the retransmit in the first place. Recover our
2766 * original cwnd and ssthresh, and proceed to transmit where
2767 * we left off.
2768 */
2769 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2770 (int)(ticks - tp->t_badrxtwin) < 0)
2771 cc_cong_signal(tp, th, CC_RTO_ERR);
2772
2773 /*
2774 * If we have a timestamp reply, update smoothed
2775 * round trip time. If no timestamp is present but
2776 * transmit timer is running and timed sequence
2777 * number was acked, update smoothed round trip time.
2778 * Since we now have an rtt measurement, cancel the
2779 * timer backoff (cf., Phil Karn's retransmit alg.).
2780 * Recompute the initial retransmit timer.
2781 *
2782 * Some boxes send broken timestamp replies
2783 * during the SYN+ACK phase, ignore
2784 * timestamps of 0 or we could calculate a
2785 * huge RTT and blow up the retransmit timer.
2786 */
2787 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2788 u_int t;
2789
2790 t = tcp_ts_getticks() - to.to_tsecr;
2791 if (!tp->t_rttlow || tp->t_rttlow > t)
2792 tp->t_rttlow = t;
2793 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2794 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2795 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2796 tp->t_rttlow = ticks - tp->t_rtttime;
2797 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2798 }
2799
2800 /*
2801 * If all outstanding data is acked, stop retransmit
2802 * timer and remember to restart (more output or persist).
2803 * If there is more data to be acked, restart retransmit
2804 * timer, using current (possibly backed-off) value.
2805 */
2806 if (th->th_ack == tp->snd_max) {
2807 tcp_timer_activate(tp, TT_REXMT, 0);
2808 needoutput = 1;
2809 } else if (!tcp_timer_active(tp, TT_PERSIST))
2810 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2811
2812 /*
2813 * If no data (only SYN) was ACK'd,
2814 * skip rest of ACK processing.
2815 */
2816 if (acked == 0)
2817 goto step6;
2818
2819 /*
2820 * Let the congestion control algorithm update congestion
2821 * control related information. This typically means increasing
2822 * the congestion window.
2823 */
2824 cc_ack_received(tp, th, CC_ACK);
2825
2826 SOCKBUF_LOCK(&so->so_snd);
2827 if (acked > sbavail(&so->so_snd)) {
2828 if (tp->snd_wnd >= sbavail(&so->so_snd))
2829 tp->snd_wnd -= sbavail(&so->so_snd);
2830 else
2831 tp->snd_wnd = 0;
2832 mfree = sbcut_locked(&so->so_snd,
2833 (int)sbavail(&so->so_snd));
2834 ourfinisacked = 1;
2835 } else {
2836 mfree = sbcut_locked(&so->so_snd, acked);
2837 if (tp->snd_wnd >= (u_long) acked)
2838 tp->snd_wnd -= acked;
2839 else
2840 tp->snd_wnd = 0;
2841 ourfinisacked = 0;
2842 }
2843 /* NB: sowwakeup_locked() does an implicit unlock. */
2844 sowwakeup_locked(so);
2845 m_freem(mfree);
2846 /* Detect una wraparound. */
2847 if (!IN_RECOVERY(tp->t_flags) &&
2848 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2849 SEQ_LEQ(th->th_ack, tp->snd_recover))
2850 tp->snd_recover = th->th_ack - 1;
2851 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2852 if (IN_RECOVERY(tp->t_flags) &&
2853 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2854 EXIT_RECOVERY(tp->t_flags);
2855 }
2856 tp->snd_una = th->th_ack;
2857 if (tp->t_flags & TF_SACK_PERMIT) {
2858 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2859 tp->snd_recover = tp->snd_una;
2860 }
2861 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2862 tp->snd_nxt = tp->snd_una;
2863
2864 switch (tp->t_state) {
2865
2866 /*
2867 * In FIN_WAIT_1 STATE in addition to the processing
2868 * for the ESTABLISHED state if our FIN is now acknowledged
2869 * then enter FIN_WAIT_2.
2870 */
2871 case TCPS_FIN_WAIT_1:
2872 if (ourfinisacked) {
2873 /*
2874 * If we can't receive any more
2875 * data, then closing user can proceed.
2876 * Starting the timer is contrary to the
2877 * specification, but if we don't get a FIN
2878 * we'll hang forever.
2879 *
2880 * XXXjl:
2881 * we should release the tp also, and use a
2882 * compressed state.
2883 */
2884 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2885 soisdisconnected(so);
2886 tcp_timer_activate(tp, TT_2MSL,
2887 (tcp_fast_finwait2_recycle ?
2888 tcp_finwait2_timeout :
2889 TP_MAXIDLE(tp)));
2890 }
2891 tcp_state_change(tp, TCPS_FIN_WAIT_2);
2892 }
2893 break;
2894
2895 /*
2896 * In CLOSING STATE in addition to the processing for
2897 * the ESTABLISHED state if the ACK acknowledges our FIN
2898 * then enter the TIME-WAIT state, otherwise ignore
2899 * the segment.
2900 */
2901 case TCPS_CLOSING:
2902 if (ourfinisacked) {
2903 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2904 tcp_twstart(tp);
2905 INP_INFO_RUNLOCK(&V_tcbinfo);
2906 m_freem(m);
2907 return;
2908 }
2909 break;
2910
2911 /*
2912 * In LAST_ACK, we may still be waiting for data to drain
2913 * and/or to be acked, as well as for the ack of our FIN.
2914 * If our FIN is now acknowledged, delete the TCB,
2915 * enter the closed state and return.
2916 */
2917 case TCPS_LAST_ACK:
2918 if (ourfinisacked) {
2919 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2920 tp = tcp_close(tp);
2921 goto drop;
2922 }
2923 break;
2924 }
2925 }
2926
2927 step6:
2928 INP_WLOCK_ASSERT(tp->t_inpcb);
2929
2930 /*
2931 * Update window information.
2932 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2933 */
2934 if ((thflags & TH_ACK) &&
2935 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2936 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2937 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2938 /* keep track of pure window updates */
2939 if (tlen == 0 &&
2940 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2941 TCPSTAT_INC(tcps_rcvwinupd);
2942 tp->snd_wnd = tiwin;
2943 tp->snd_wl1 = th->th_seq;
2944 tp->snd_wl2 = th->th_ack;
2945 if (tp->snd_wnd > tp->max_sndwnd)
2946 tp->max_sndwnd = tp->snd_wnd;
2947 needoutput = 1;
2948 }
2949
2950 /*
2951 * Process segments with URG.
2952 */
2953 if ((thflags & TH_URG) && th->th_urp &&
2954 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2955 /*
2956 * This is a kludge, but if we receive and accept
2957 * random urgent pointers, we'll crash in
2958 * soreceive. It's hard to imagine someone
2959 * actually wanting to send this much urgent data.
2960 */
2961 SOCKBUF_LOCK(&so->so_rcv);
2962 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
2963 th->th_urp = 0; /* XXX */
2964 thflags &= ~TH_URG; /* XXX */
2965 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2966 goto dodata; /* XXX */
2967 }
2968 /*
2969 * If this segment advances the known urgent pointer,
2970 * then mark the data stream. This should not happen
2971 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2972 * a FIN has been received from the remote side.
2973 * In these states we ignore the URG.
2974 *
2975 * According to RFC961 (Assigned Protocols),
2976 * the urgent pointer points to the last octet
2977 * of urgent data. We continue, however,
2978 * to consider it to indicate the first octet
2979 * of data past the urgent section as the original
2980 * spec states (in one of two places).
2981 */
2982 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2983 tp->rcv_up = th->th_seq + th->th_urp;
2984 so->so_oobmark = sbavail(&so->so_rcv) +
2985 (tp->rcv_up - tp->rcv_nxt) - 1;
2986 if (so->so_oobmark == 0)
2987 so->so_rcv.sb_state |= SBS_RCVATMARK;
2988 sohasoutofband(so);
2989 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2990 }
2991 SOCKBUF_UNLOCK(&so->so_rcv);
2992 /*
2993 * Remove out of band data so doesn't get presented to user.
2994 * This can happen independent of advancing the URG pointer,
2995 * but if two URG's are pending at once, some out-of-band
2996 * data may creep in... ick.
2997 */
2998 if (th->th_urp <= (u_long)tlen &&
2999 !(so->so_options & SO_OOBINLINE)) {
3000 /* hdr drop is delayed */
3001 tcp_pulloutofband(so, th, m, drop_hdrlen);
3002 }
3003 } else {
3004 /*
3005 * If no out of band data is expected,
3006 * pull receive urgent pointer along
3007 * with the receive window.
3008 */
3009 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
3010 tp->rcv_up = tp->rcv_nxt;
3011 }
3012 dodata: /* XXX */
3013 INP_WLOCK_ASSERT(tp->t_inpcb);
3014
3015 /*
3016 * Process the segment text, merging it into the TCP sequencing queue,
3017 * and arranging for acknowledgment of receipt if necessary.
3018 * This process logically involves adjusting tp->rcv_wnd as data
3019 * is presented to the user (this happens in tcp_usrreq.c,
3020 * case PRU_RCVD). If a FIN has already been received on this
3021 * connection then we just ignore the text.
3022 */
3023 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
3024 (tp->t_flags & TF_FASTOPEN));
3025 if ((tlen || (thflags & TH_FIN) || tfo_syn) &&
3026 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3027 tcp_seq save_start = th->th_seq;
3028 m_adj(m, drop_hdrlen); /* delayed header drop */
3029 /*
3030 * Insert segment which includes th into TCP reassembly queue
3031 * with control block tp. Set thflags to whether reassembly now
3032 * includes a segment with FIN. This handles the common case
3033 * inline (segment is the next to be received on an established
3034 * connection, and the queue is empty), avoiding linkage into
3035 * and removal from the queue and repetition of various
3036 * conversions.
3037 * Set DELACK for segments received in order, but ack
3038 * immediately when segments are out of order (so
3039 * fast retransmit can work).
3040 */
3041 if (th->th_seq == tp->rcv_nxt &&
3042 LIST_EMPTY(&tp->t_segq) &&
3043 (TCPS_HAVEESTABLISHED(tp->t_state) ||
3044 tfo_syn)) {
3045 if (DELAY_ACK(tp, tlen) || tfo_syn)
3046 tp->t_flags |= TF_DELACK;
3047 else
3048 tp->t_flags |= TF_ACKNOW;
3049 tp->rcv_nxt += tlen;
3050 thflags = th->th_flags & TH_FIN;
3051 TCPSTAT_INC(tcps_rcvpack);
3052 TCPSTAT_ADD(tcps_rcvbyte, tlen);
3053 SOCKBUF_LOCK(&so->so_rcv);
3054 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
3055 m_freem(m);
3056 else
3057 sbappendstream_locked(&so->so_rcv, m, 0);
3058 /* NB: sorwakeup_locked() does an implicit unlock. */
3059 sorwakeup_locked(so);
3060 } else {
3061 /*
3062 * XXX: Due to the header drop above "th" is
3063 * theoretically invalid by now. Fortunately
3064 * m_adj() doesn't actually frees any mbufs
3065 * when trimming from the head.
3066 */
3067 thflags = tcp_reass(tp, th, &tlen, m);
3068 tp->t_flags |= TF_ACKNOW;
3069 }
3070 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
3071 tcp_update_sack_list(tp, save_start, save_start + tlen);
3072 #if 0
3073 /*
3074 * Note the amount of data that peer has sent into
3075 * our window, in order to estimate the sender's
3076 * buffer size.
3077 * XXX: Unused.
3078 */
3079 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
3080 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
3081 else
3082 len = so->so_rcv.sb_hiwat;
3083 #endif
3084 } else {
3085 m_freem(m);
3086 thflags &= ~TH_FIN;
3087 }
3088
3089 /*
3090 * If FIN is received ACK the FIN and let the user know
3091 * that the connection is closing.
3092 */
3093 if (thflags & TH_FIN) {
3094 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3095 socantrcvmore(so);
3096 /*
3097 * If connection is half-synchronized
3098 * (ie NEEDSYN flag on) then delay ACK,
3099 * so it may be piggybacked when SYN is sent.
3100 * Otherwise, since we received a FIN then no
3101 * more input can be expected, send ACK now.
3102 */
3103 if (tp->t_flags & TF_NEEDSYN)
3104 tp->t_flags |= TF_DELACK;
3105 else
3106 tp->t_flags |= TF_ACKNOW;
3107 tp->rcv_nxt++;
3108 }
3109 switch (tp->t_state) {
3110
3111 /*
3112 * In SYN_RECEIVED and ESTABLISHED STATES
3113 * enter the CLOSE_WAIT state.
3114 */
3115 case TCPS_SYN_RECEIVED:
3116 tp->t_starttime = ticks;
3117 /* FALLTHROUGH */
3118 case TCPS_ESTABLISHED:
3119 tcp_state_change(tp, TCPS_CLOSE_WAIT);
3120 break;
3121
3122 /*
3123 * If still in FIN_WAIT_1 STATE FIN has not been acked so
3124 * enter the CLOSING state.
3125 */
3126 case TCPS_FIN_WAIT_1:
3127 tcp_state_change(tp, TCPS_CLOSING);
3128 break;
3129
3130 /*
3131 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3132 * starting the time-wait timer, turning off the other
3133 * standard timers.
3134 */
3135 case TCPS_FIN_WAIT_2:
3136 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
3137 KASSERT(ti_locked == TI_RLOCKED, ("%s: dodata "
3138 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
3139 ti_locked));
3140
3141 tcp_twstart(tp);
3142 INP_INFO_RUNLOCK(&V_tcbinfo);
3143 return;
3144 }
3145 }
3146 if (ti_locked == TI_RLOCKED)
3147 INP_INFO_RUNLOCK(&V_tcbinfo);
3148 ti_locked = TI_UNLOCKED;
3149
3150 #ifdef TCPDEBUG
3151 if (so->so_options & SO_DEBUG)
3152 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3153 &tcp_savetcp, 0);
3154 #endif
3155 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *));
3156
3157 /*
3158 * Return any desired output.
3159 */
3160 if (needoutput || (tp->t_flags & TF_ACKNOW))
3161 (void) tp->t_fb->tfb_tcp_output(tp);
3162
3163 check_delack:
3164 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
3165 __func__, ti_locked));
3166 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3167 INP_WLOCK_ASSERT(tp->t_inpcb);
3168
3169 if (tp->t_flags & TF_DELACK) {
3170 tp->t_flags &= ~TF_DELACK;
3171 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3172 }
3173 INP_WUNLOCK(tp->t_inpcb);
3174 return;
3175
3176 dropafterack:
3177 /*
3178 * Generate an ACK dropping incoming segment if it occupies
3179 * sequence space, where the ACK reflects our state.
3180 *
3181 * We can now skip the test for the RST flag since all
3182 * paths to this code happen after packets containing
3183 * RST have been dropped.
3184 *
3185 * In the SYN-RECEIVED state, don't send an ACK unless the
3186 * segment we received passes the SYN-RECEIVED ACK test.
3187 * If it fails send a RST. This breaks the loop in the
3188 * "LAND" DoS attack, and also prevents an ACK storm
3189 * between two listening ports that have been sent forged
3190 * SYN segments, each with the source address of the other.
3191 */
3192 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3193 (SEQ_GT(tp->snd_una, th->th_ack) ||
3194 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3195 rstreason = BANDLIM_RST_OPENPORT;
3196 goto dropwithreset;
3197 }
3198 #ifdef TCPDEBUG
3199 if (so->so_options & SO_DEBUG)
3200 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3201 &tcp_savetcp, 0);
3202 #endif
3203 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *));
3204 if (ti_locked == TI_RLOCKED)
3205 INP_INFO_RUNLOCK(&V_tcbinfo);
3206 ti_locked = TI_UNLOCKED;
3207
3208 tp->t_flags |= TF_ACKNOW;
3209 (void) tp->t_fb->tfb_tcp_output(tp);
3210 INP_WUNLOCK(tp->t_inpcb);
3211 m_freem(m);
3212 return;
3213
3214 dropwithreset:
3215 if (ti_locked == TI_RLOCKED)
3216 INP_INFO_RUNLOCK(&V_tcbinfo);
3217 ti_locked = TI_UNLOCKED;
3218
3219 if (tp != NULL) {
3220 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3221 INP_WUNLOCK(tp->t_inpcb);
3222 } else
3223 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3224 return;
3225
3226 drop:
3227 if (ti_locked == TI_RLOCKED) {
3228 INP_INFO_RUNLOCK(&V_tcbinfo);
3229 ti_locked = TI_UNLOCKED;
3230 }
3231 #ifdef INVARIANTS
3232 else
3233 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3234 #endif
3235
3236 /*
3237 * Drop space held by incoming segment and return.
3238 */
3239 #ifdef TCPDEBUG
3240 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3241 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3242 &tcp_savetcp, 0);
3243 #endif
3244 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *));
3245 if (tp != NULL)
3246 INP_WUNLOCK(tp->t_inpcb);
3247 m_freem(m);
3248 }
3249
3250 /*
3251 * Issue RST and make ACK acceptable to originator of segment.
3252 * The mbuf must still include the original packet header.
3253 * tp may be NULL.
3254 */
3255 void
3256 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3257 int tlen, int rstreason)
3258 {
3259 #ifdef INET
3260 struct ip *ip;
3261 #endif
3262 #ifdef INET6
3263 struct ip6_hdr *ip6;
3264 #endif
3265
3266 if (tp != NULL) {
3267 INP_WLOCK_ASSERT(tp->t_inpcb);
3268 }
3269
3270 /* Don't bother if destination was broadcast/multicast. */
3271 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3272 goto drop;
3273 #ifdef INET6
3274 if (mtod(m, struct ip *)->ip_v == 6) {
3275 ip6 = mtod(m, struct ip6_hdr *);
3276 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3277 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3278 goto drop;
3279 /* IPv6 anycast check is done at tcp6_input() */
3280 }
3281 #endif
3282 #if defined(INET) && defined(INET6)
3283 else
3284 #endif
3285 #ifdef INET
3286 {
3287 ip = mtod(m, struct ip *);
3288 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3289 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3290 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3291 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3292 goto drop;
3293 }
3294 #endif
3295
3296 /* Perform bandwidth limiting. */
3297 if (badport_bandlim(rstreason) < 0)
3298 goto drop;
3299
3300 /* tcp_respond consumes the mbuf chain. */
3301 if (th->th_flags & TH_ACK) {
3302 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3303 th->th_ack, TH_RST);
3304 } else {
3305 if (th->th_flags & TH_SYN)
3306 tlen++;
3307 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3308 (tcp_seq)0, TH_RST|TH_ACK);
3309 }
3310 return;
3311 drop:
3312 m_freem(m);
3313 }
3314
3315 /*
3316 * Parse TCP options and place in tcpopt.
3317 */
3318 void
3319 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3320 {
3321 int opt, optlen;
3322
3323 to->to_flags = 0;
3324 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3325 opt = cp[0];
3326 if (opt == TCPOPT_EOL)
3327 break;
3328 if (opt == TCPOPT_NOP)
3329 optlen = 1;
3330 else {
3331 if (cnt < 2)
3332 break;
3333 optlen = cp[1];
3334 if (optlen < 2 || optlen > cnt)
3335 break;
3336 }
3337 switch (opt) {
3338 case TCPOPT_MAXSEG:
3339 if (optlen != TCPOLEN_MAXSEG)
3340 continue;
3341 if (!(flags & TO_SYN))
3342 continue;
3343 to->to_flags |= TOF_MSS;
3344 bcopy((char *)cp + 2,
3345 (char *)&to->to_mss, sizeof(to->to_mss));
3346 to->to_mss = ntohs(to->to_mss);
3347 break;
3348 case TCPOPT_WINDOW:
3349 if (optlen != TCPOLEN_WINDOW)
3350 continue;
3351 if (!(flags & TO_SYN))
3352 continue;
3353 to->to_flags |= TOF_SCALE;
3354 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3355 break;
3356 case TCPOPT_TIMESTAMP:
3357 if (optlen != TCPOLEN_TIMESTAMP)
3358 continue;
3359 to->to_flags |= TOF_TS;
3360 bcopy((char *)cp + 2,
3361 (char *)&to->to_tsval, sizeof(to->to_tsval));
3362 to->to_tsval = ntohl(to->to_tsval);
3363 bcopy((char *)cp + 6,
3364 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3365 to->to_tsecr = ntohl(to->to_tsecr);
3366 break;
3367 #ifdef TCP_SIGNATURE
3368 /*
3369 * XXX In order to reply to a host which has set the
3370 * TCP_SIGNATURE option in its initial SYN, we have to
3371 * record the fact that the option was observed here
3372 * for the syncache code to perform the correct response.
3373 */
3374 case TCPOPT_SIGNATURE:
3375 if (optlen != TCPOLEN_SIGNATURE)
3376 continue;
3377 to->to_flags |= TOF_SIGNATURE;
3378 to->to_signature = cp + 2;
3379 break;
3380 #endif
3381 case TCPOPT_SACK_PERMITTED:
3382 if (optlen != TCPOLEN_SACK_PERMITTED)
3383 continue;
3384 if (!(flags & TO_SYN))
3385 continue;
3386 if (!V_tcp_do_sack)
3387 continue;
3388 to->to_flags |= TOF_SACKPERM;
3389 break;
3390 case TCPOPT_SACK:
3391 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3392 continue;
3393 if (flags & TO_SYN)
3394 continue;
3395 to->to_flags |= TOF_SACK;
3396 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3397 to->to_sacks = cp + 2;
3398 TCPSTAT_INC(tcps_sack_rcv_blocks);
3399 break;
3400 #ifdef TCP_RFC7413
3401 case TCPOPT_FAST_OPEN:
3402 if ((optlen != TCPOLEN_FAST_OPEN_EMPTY) &&
3403 (optlen < TCPOLEN_FAST_OPEN_MIN) &&
3404 (optlen > TCPOLEN_FAST_OPEN_MAX))
3405 continue;
3406 if (!(flags & TO_SYN))
3407 continue;
3408 if (!V_tcp_fastopen_enabled)
3409 continue;
3410 to->to_flags |= TOF_FASTOPEN;
3411 to->to_tfo_len = optlen - 2;
3412 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL;
3413 break;
3414 #endif
3415 default:
3416 continue;
3417 }
3418 }
3419 }
3420
3421 /*
3422 * Pull out of band byte out of a segment so
3423 * it doesn't appear in the user's data queue.
3424 * It is still reflected in the segment length for
3425 * sequencing purposes.
3426 */
3427 void
3428 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3429 int off)
3430 {
3431 int cnt = off + th->th_urp - 1;
3432
3433 while (cnt >= 0) {
3434 if (m->m_len > cnt) {
3435 char *cp = mtod(m, caddr_t) + cnt;
3436 struct tcpcb *tp = sototcpcb(so);
3437
3438 INP_WLOCK_ASSERT(tp->t_inpcb);
3439
3440 tp->t_iobc = *cp;
3441 tp->t_oobflags |= TCPOOB_HAVEDATA;
3442 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3443 m->m_len--;
3444 if (m->m_flags & M_PKTHDR)
3445 m->m_pkthdr.len--;
3446 return;
3447 }
3448 cnt -= m->m_len;
3449 m = m->m_next;
3450 if (m == NULL)
3451 break;
3452 }
3453 panic("tcp_pulloutofband");
3454 }
3455
3456 /*
3457 * Collect new round-trip time estimate
3458 * and update averages and current timeout.
3459 */
3460 void
3461 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3462 {
3463 int delta;
3464
3465 INP_WLOCK_ASSERT(tp->t_inpcb);
3466
3467 TCPSTAT_INC(tcps_rttupdated);
3468 tp->t_rttupdated++;
3469 if (tp->t_srtt != 0) {
3470 /*
3471 * srtt is stored as fixed point with 5 bits after the
3472 * binary point (i.e., scaled by 8). The following magic
3473 * is equivalent to the smoothing algorithm in rfc793 with
3474 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3475 * point). Adjust rtt to origin 0.
3476 */
3477 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3478 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3479
3480 if ((tp->t_srtt += delta) <= 0)
3481 tp->t_srtt = 1;
3482
3483 /*
3484 * We accumulate a smoothed rtt variance (actually, a
3485 * smoothed mean difference), then set the retransmit
3486 * timer to smoothed rtt + 4 times the smoothed variance.
3487 * rttvar is stored as fixed point with 4 bits after the
3488 * binary point (scaled by 16). The following is
3489 * equivalent to rfc793 smoothing with an alpha of .75
3490 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3491 * rfc793's wired-in beta.
3492 */
3493 if (delta < 0)
3494 delta = -delta;
3495 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3496 if ((tp->t_rttvar += delta) <= 0)
3497 tp->t_rttvar = 1;
3498 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3499 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3500 } else {
3501 /*
3502 * No rtt measurement yet - use the unsmoothed rtt.
3503 * Set the variance to half the rtt (so our first
3504 * retransmit happens at 3*rtt).
3505 */
3506 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3507 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3508 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3509 }
3510 tp->t_rtttime = 0;
3511 tp->t_rxtshift = 0;
3512
3513 /*
3514 * the retransmit should happen at rtt + 4 * rttvar.
3515 * Because of the way we do the smoothing, srtt and rttvar
3516 * will each average +1/2 tick of bias. When we compute
3517 * the retransmit timer, we want 1/2 tick of rounding and
3518 * 1 extra tick because of +-1/2 tick uncertainty in the
3519 * firing of the timer. The bias will give us exactly the
3520 * 1.5 tick we need. But, because the bias is
3521 * statistical, we have to test that we don't drop below
3522 * the minimum feasible timer (which is 2 ticks).
3523 */
3524 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3525 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3526
3527 /*
3528 * We received an ack for a packet that wasn't retransmitted;
3529 * it is probably safe to discard any error indications we've
3530 * received recently. This isn't quite right, but close enough
3531 * for now (a route might have failed after we sent a segment,
3532 * and the return path might not be symmetrical).
3533 */
3534 tp->t_softerror = 0;
3535 }
3536
3537 /*
3538 * Determine a reasonable value for maxseg size.
3539 * If the route is known, check route for mtu.
3540 * If none, use an mss that can be handled on the outgoing interface
3541 * without forcing IP to fragment. If no route is found, route has no mtu,
3542 * or the destination isn't local, use a default, hopefully conservative
3543 * size (usually 512 or the default IP max size, but no more than the mtu
3544 * of the interface), as we can't discover anything about intervening
3545 * gateways or networks. We also initialize the congestion/slow start
3546 * window to be a single segment if the destination isn't local.
3547 * While looking at the routing entry, we also initialize other path-dependent
3548 * parameters from pre-set or cached values in the routing entry.
3549 *
3550 * NOTE that resulting t_maxseg doesn't include space for TCP options or
3551 * IP options, e.g. IPSEC data, since length of this data may vary, and
3552 * thus it is calculated for every segment separately in tcp_output().
3553 *
3554 * NOTE that this routine is only called when we process an incoming
3555 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3556 * settings are handled in tcp_mssopt().
3557 */
3558 void
3559 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3560 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3561 {
3562 int mss = 0;
3563 u_long maxmtu = 0;
3564 struct inpcb *inp = tp->t_inpcb;
3565 struct hc_metrics_lite metrics;
3566 #ifdef INET6
3567 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3568 size_t min_protoh = isipv6 ?
3569 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3570 sizeof (struct tcpiphdr);
3571 #else
3572 const size_t min_protoh = sizeof(struct tcpiphdr);
3573 #endif
3574
3575 INP_WLOCK_ASSERT(tp->t_inpcb);
3576
3577 if (mtuoffer != -1) {
3578 KASSERT(offer == -1, ("%s: conflict", __func__));
3579 offer = mtuoffer - min_protoh;
3580 }
3581
3582 /* Initialize. */
3583 #ifdef INET6
3584 if (isipv6) {
3585 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3586 tp->t_maxseg = V_tcp_v6mssdflt;
3587 }
3588 #endif
3589 #if defined(INET) && defined(INET6)
3590 else
3591 #endif
3592 #ifdef INET
3593 {
3594 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3595 tp->t_maxseg = V_tcp_mssdflt;
3596 }
3597 #endif
3598
3599 /*
3600 * No route to sender, stay with default mss and return.
3601 */
3602 if (maxmtu == 0) {
3603 /*
3604 * In case we return early we need to initialize metrics
3605 * to a defined state as tcp_hc_get() would do for us
3606 * if there was no cache hit.
3607 */
3608 if (metricptr != NULL)
3609 bzero(metricptr, sizeof(struct hc_metrics_lite));
3610 return;
3611 }
3612
3613 /* What have we got? */
3614 switch (offer) {
3615 case 0:
3616 /*
3617 * Offer == 0 means that there was no MSS on the SYN
3618 * segment, in this case we use tcp_mssdflt as
3619 * already assigned to t_maxseg above.
3620 */
3621 offer = tp->t_maxseg;
3622 break;
3623
3624 case -1:
3625 /*
3626 * Offer == -1 means that we didn't receive SYN yet.
3627 */
3628 /* FALLTHROUGH */
3629
3630 default:
3631 /*
3632 * Prevent DoS attack with too small MSS. Round up
3633 * to at least minmss.
3634 */
3635 offer = max(offer, V_tcp_minmss);
3636 }
3637
3638 /*
3639 * rmx information is now retrieved from tcp_hostcache.
3640 */
3641 tcp_hc_get(&inp->inp_inc, &metrics);
3642 if (metricptr != NULL)
3643 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3644
3645 /*
3646 * If there's a discovered mtu in tcp hostcache, use it.
3647 * Else, use the link mtu.
3648 */
3649 if (metrics.rmx_mtu)
3650 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3651 else {
3652 #ifdef INET6
3653 if (isipv6) {
3654 mss = maxmtu - min_protoh;
3655 if (!V_path_mtu_discovery &&
3656 !in6_localaddr(&inp->in6p_faddr))
3657 mss = min(mss, V_tcp_v6mssdflt);
3658 }
3659 #endif
3660 #if defined(INET) && defined(INET6)
3661 else
3662 #endif
3663 #ifdef INET
3664 {
3665 mss = maxmtu - min_protoh;
3666 if (!V_path_mtu_discovery &&
3667 !in_localaddr(inp->inp_faddr))
3668 mss = min(mss, V_tcp_mssdflt);
3669 }
3670 #endif
3671 /*
3672 * XXX - The above conditional (mss = maxmtu - min_protoh)
3673 * probably violates the TCP spec.
3674 * The problem is that, since we don't know the
3675 * other end's MSS, we are supposed to use a conservative
3676 * default. But, if we do that, then MTU discovery will
3677 * never actually take place, because the conservative
3678 * default is much less than the MTUs typically seen
3679 * on the Internet today. For the moment, we'll sweep
3680 * this under the carpet.
3681 *
3682 * The conservative default might not actually be a problem
3683 * if the only case this occurs is when sending an initial
3684 * SYN with options and data to a host we've never talked
3685 * to before. Then, they will reply with an MSS value which
3686 * will get recorded and the new parameters should get
3687 * recomputed. For Further Study.
3688 */
3689 }
3690 mss = min(mss, offer);
3691
3692 /*
3693 * Sanity check: make sure that maxseg will be large
3694 * enough to allow some data on segments even if the
3695 * all the option space is used (40bytes). Otherwise
3696 * funny things may happen in tcp_output.
3697 *
3698 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3699 */
3700 mss = max(mss, 64);
3701
3702 tp->t_maxseg = mss;
3703 }
3704
3705 void
3706 tcp_mss(struct tcpcb *tp, int offer)
3707 {
3708 int mss;
3709 u_long bufsize;
3710 struct inpcb *inp;
3711 struct socket *so;
3712 struct hc_metrics_lite metrics;
3713 struct tcp_ifcap cap;
3714
3715 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3716
3717 bzero(&cap, sizeof(cap));
3718 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3719
3720 mss = tp->t_maxseg;
3721 inp = tp->t_inpcb;
3722
3723 /*
3724 * If there's a pipesize, change the socket buffer to that size,
3725 * don't change if sb_hiwat is different than default (then it
3726 * has been changed on purpose with setsockopt).
3727 * Make the socket buffers an integral number of mss units;
3728 * if the mss is larger than the socket buffer, decrease the mss.
3729 */
3730 so = inp->inp_socket;
3731 SOCKBUF_LOCK(&so->so_snd);
3732 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3733 bufsize = metrics.rmx_sendpipe;
3734 else
3735 bufsize = so->so_snd.sb_hiwat;
3736 if (bufsize < mss)
3737 mss = bufsize;
3738 else {
3739 bufsize = roundup(bufsize, mss);
3740 if (bufsize > sb_max)
3741 bufsize = sb_max;
3742 if (bufsize > so->so_snd.sb_hiwat)
3743 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3744 }
3745 SOCKBUF_UNLOCK(&so->so_snd);
3746 tp->t_maxseg = mss;
3747
3748 SOCKBUF_LOCK(&so->so_rcv);
3749 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3750 bufsize = metrics.rmx_recvpipe;
3751 else
3752 bufsize = so->so_rcv.sb_hiwat;
3753 if (bufsize > mss) {
3754 bufsize = roundup(bufsize, mss);
3755 if (bufsize > sb_max)
3756 bufsize = sb_max;
3757 if (bufsize > so->so_rcv.sb_hiwat)
3758 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3759 }
3760 SOCKBUF_UNLOCK(&so->so_rcv);
3761
3762 /* Check the interface for TSO capabilities. */
3763 if (cap.ifcap & CSUM_TSO) {
3764 tp->t_flags |= TF_TSO;
3765 tp->t_tsomax = cap.tsomax;
3766 tp->t_tsomaxsegcount = cap.tsomaxsegcount;
3767 tp->t_tsomaxsegsize = cap.tsomaxsegsize;
3768 }
3769 }
3770
3771 /*
3772 * Determine the MSS option to send on an outgoing SYN.
3773 */
3774 int
3775 tcp_mssopt(struct in_conninfo *inc)
3776 {
3777 int mss = 0;
3778 u_long maxmtu = 0;
3779 u_long thcmtu = 0;
3780 size_t min_protoh;
3781
3782 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3783
3784 #ifdef INET6
3785 if (inc->inc_flags & INC_ISIPV6) {
3786 mss = V_tcp_v6mssdflt;
3787 maxmtu = tcp_maxmtu6(inc, NULL);
3788 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3789 }
3790 #endif
3791 #if defined(INET) && defined(INET6)
3792 else
3793 #endif
3794 #ifdef INET
3795 {
3796 mss = V_tcp_mssdflt;
3797 maxmtu = tcp_maxmtu(inc, NULL);
3798 min_protoh = sizeof(struct tcpiphdr);
3799 }
3800 #endif
3801 #if defined(INET6) || defined(INET)
3802 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3803 #endif
3804
3805 if (maxmtu && thcmtu)
3806 mss = min(maxmtu, thcmtu) - min_protoh;
3807 else if (maxmtu || thcmtu)
3808 mss = max(maxmtu, thcmtu) - min_protoh;
3809
3810 return (mss);
3811 }
3812
3813
3814 /*
3815 * On a partial ack arrives, force the retransmission of the
3816 * next unacknowledged segment. Do not clear tp->t_dupacks.
3817 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3818 * be started again.
3819 */
3820 void
3821 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3822 {
3823 tcp_seq onxt = tp->snd_nxt;
3824 u_long ocwnd = tp->snd_cwnd;
3825 u_int maxseg = tcp_maxseg(tp);
3826
3827 INP_WLOCK_ASSERT(tp->t_inpcb);
3828
3829 tcp_timer_activate(tp, TT_REXMT, 0);
3830 tp->t_rtttime = 0;
3831 tp->snd_nxt = th->th_ack;
3832 /*
3833 * Set snd_cwnd to one segment beyond acknowledged offset.
3834 * (tp->snd_una has not yet been updated when this function is called.)
3835 */
3836 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th);
3837 tp->t_flags |= TF_ACKNOW;
3838 (void) tp->t_fb->tfb_tcp_output(tp);
3839 tp->snd_cwnd = ocwnd;
3840 if (SEQ_GT(onxt, tp->snd_nxt))
3841 tp->snd_nxt = onxt;
3842 /*
3843 * Partial window deflation. Relies on fact that tp->snd_una
3844 * not updated yet.
3845 */
3846 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3847 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3848 else
3849 tp->snd_cwnd = 0;
3850 tp->snd_cwnd += maxseg;
3851 }
3852
3853 int
3854 tcp_compute_pipe(struct tcpcb *tp)
3855 {
3856 return (tp->snd_max - tp->snd_una +
3857 tp->sackhint.sack_bytes_rexmit -
3858 tp->sackhint.sacked_bytes);
3859 }
Cache object: c0291b8f62549635b878a9d16627917a
|