1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
8 * Copyright (c) 2010-2011 Juniper Networks, Inc.
9 * All rights reserved.
10 *
11 * Portions of this software were developed at the Centre for Advanced Internet
12 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
13 * James Healy and David Hayes, made possible in part by a grant from the Cisco
14 * University Research Program Fund at Community Foundation Silicon Valley.
15 *
16 * Portions of this software were developed at the Centre for Advanced
17 * Internet Architectures, Swinburne University of Technology, Melbourne,
18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
19 *
20 * Portions of this software were developed by Robert N. M. Watson under
21 * contract to Juniper Networks, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 *
47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/11.2/sys/netinet/tcp_input.c 332829 2018-04-20 15:44:29Z jtl $");
52
53 #include "opt_inet.h"
54 #include "opt_inet6.h"
55 #include "opt_ipsec.h"
56 #include "opt_tcpdebug.h"
57
58 #include <sys/param.h>
59 #include <sys/kernel.h>
60 #include <sys/hhook.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/proc.h> /* for proc0 declaration */
64 #include <sys/protosw.h>
65 #include <sys/sdt.h>
66 #include <sys/signalvar.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/sysctl.h>
70 #include <sys/syslog.h>
71 #include <sys/systm.h>
72
73 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
74
75 #include <vm/uma.h>
76
77 #include <net/if.h>
78 #include <net/if_var.h>
79 #include <net/route.h>
80 #include <net/vnet.h>
81
82 #define TCPSTATES /* for logging */
83
84 #include <netinet/in.h>
85 #include <netinet/in_kdtrace.h>
86 #include <netinet/in_pcb.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
90 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
91 #include <netinet/ip_var.h>
92 #include <netinet/ip_options.h>
93 #include <netinet/ip6.h>
94 #include <netinet/icmp6.h>
95 #include <netinet6/in6_pcb.h>
96 #include <netinet6/in6_var.h>
97 #include <netinet6/ip6_var.h>
98 #include <netinet6/nd6.h>
99 #ifdef TCP_RFC7413
100 #include <netinet/tcp_fastopen.h>
101 #endif
102 #include <netinet/tcp.h>
103 #include <netinet/tcp_fsm.h>
104 #include <netinet/tcp_seq.h>
105 #include <netinet/tcp_timer.h>
106 #include <netinet/tcp_var.h>
107 #include <netinet6/tcp6_var.h>
108 #include <netinet/tcpip.h>
109 #include <netinet/cc/cc.h>
110 #ifdef TCPPCAP
111 #include <netinet/tcp_pcap.h>
112 #endif
113 #include <netinet/tcp_syncache.h>
114 #ifdef TCPDEBUG
115 #include <netinet/tcp_debug.h>
116 #endif /* TCPDEBUG */
117 #ifdef TCP_OFFLOAD
118 #include <netinet/tcp_offload.h>
119 #endif
120
121 #include <netipsec/ipsec_support.h>
122
123 #include <machine/in_cksum.h>
124
125 #include <security/mac/mac_framework.h>
126
127 const int tcprexmtthresh = 3;
128
129 int tcp_log_in_vain = 0;
130 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
131 &tcp_log_in_vain, 0,
132 "Log all incoming TCP segments to closed ports");
133
134 VNET_DEFINE(int, blackhole) = 0;
135 #define V_blackhole VNET(blackhole)
136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW,
137 &VNET_NAME(blackhole), 0,
138 "Do not send RST on segments to closed ports");
139
140 VNET_DEFINE(int, tcp_delack_enabled) = 1;
141 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW,
142 &VNET_NAME(tcp_delack_enabled), 0,
143 "Delay ACK to try and piggyback it onto a data packet");
144
145 VNET_DEFINE(int, drop_synfin) = 0;
146 #define V_drop_synfin VNET(drop_synfin)
147 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW,
148 &VNET_NAME(drop_synfin), 0,
149 "Drop TCP packets with SYN+FIN set");
150
151 VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0;
152 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_pipe, CTLFLAG_VNET | CTLFLAG_RW,
153 &VNET_NAME(tcp_do_rfc6675_pipe), 0,
154 "Use calculated pipe/in-flight bytes per RFC 6675");
155
156 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
157 #define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
158 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW,
159 &VNET_NAME(tcp_do_rfc3042), 0,
160 "Enable RFC 3042 (Limited Transmit)");
161
162 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW,
164 &VNET_NAME(tcp_do_rfc3390), 0,
165 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
166
167 VNET_DEFINE(int, tcp_initcwnd_segments) = 10;
168 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments,
169 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0,
170 "Slow-start flight size (initial congestion window) in number of segments");
171
172 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW,
174 &VNET_NAME(tcp_do_rfc3465), 0,
175 "Enable RFC 3465 (Appropriate Byte Counting)");
176
177 VNET_DEFINE(int, tcp_abc_l_var) = 2;
178 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW,
179 &VNET_NAME(tcp_abc_l_var), 2,
180 "Cap the max cwnd increment during slow-start to this number of segments");
181
182 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
183
184 VNET_DEFINE(int, tcp_do_ecn) = 2;
185 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
186 &VNET_NAME(tcp_do_ecn), 0,
187 "TCP ECN support");
188
189 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
190 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW,
191 &VNET_NAME(tcp_ecn_maxretries), 0,
192 "Max retries before giving up on ECN");
193
194 VNET_DEFINE(int, tcp_insecure_syn) = 0;
195 #define V_tcp_insecure_syn VNET(tcp_insecure_syn)
196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW,
197 &VNET_NAME(tcp_insecure_syn), 0,
198 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets");
199
200 VNET_DEFINE(int, tcp_insecure_rst) = 0;
201 #define V_tcp_insecure_rst VNET(tcp_insecure_rst)
202 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW,
203 &VNET_NAME(tcp_insecure_rst), 0,
204 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets");
205
206 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
207 #define V_tcp_recvspace VNET(tcp_recvspace)
208 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW,
209 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
210
211 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
212 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
213 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
214 &VNET_NAME(tcp_do_autorcvbuf), 0,
215 "Enable automatic receive buffer sizing");
216
217 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
218 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
219 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
220 &VNET_NAME(tcp_autorcvbuf_inc), 0,
221 "Incrementor step size of automatic receive buffer");
222
223 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
224 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
225 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
226 &VNET_NAME(tcp_autorcvbuf_max), 0,
227 "Max size of automatic receive buffer");
228
229 VNET_DEFINE(struct inpcbhead, tcb);
230 #define tcb6 tcb /* for KAME src sync over BSD*'s */
231 VNET_DEFINE(struct inpcbinfo, tcbinfo);
232
233 /*
234 * TCP statistics are stored in an array of counter(9)s, which size matches
235 * size of struct tcpstat. TCP running connection count is a regular array.
236 */
237 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
238 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
239 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
240 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]);
241 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD |
242 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES,
243 "TCP connection counts by TCP state");
244
245 static void
246 tcp_vnet_init(const void *unused)
247 {
248
249 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK);
250 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK);
251 }
252 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
253 tcp_vnet_init, NULL);
254
255 #ifdef VIMAGE
256 static void
257 tcp_vnet_uninit(const void *unused)
258 {
259
260 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES);
261 VNET_PCPUSTAT_FREE(tcpstat);
262 }
263 VNET_SYSUNINIT(tcp_vnet_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
264 tcp_vnet_uninit, NULL);
265 #endif /* VIMAGE */
266
267 /*
268 * Kernel module interface for updating tcpstat. The argument is an index
269 * into tcpstat treated as an array.
270 */
271 void
272 kmod_tcpstat_inc(int statnum)
273 {
274
275 counter_u64_add(VNET(tcpstat)[statnum], 1);
276 }
277
278 /*
279 * Wrapper for the TCP established input helper hook.
280 */
281 void
282 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
283 {
284 struct tcp_hhook_data hhook_data;
285
286 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
287 hhook_data.tp = tp;
288 hhook_data.th = th;
289 hhook_data.to = to;
290
291 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
292 tp->osd);
293 }
294 }
295
296 /*
297 * CC wrapper hook functions
298 */
299 void
300 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
301 {
302 INP_WLOCK_ASSERT(tp->t_inpcb);
303
304 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
305 if (tp->snd_cwnd <= tp->snd_wnd)
306 tp->ccv->flags |= CCF_CWND_LIMITED;
307 else
308 tp->ccv->flags &= ~CCF_CWND_LIMITED;
309
310 if (type == CC_ACK) {
311 if (tp->snd_cwnd > tp->snd_ssthresh) {
312 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
313 V_tcp_abc_l_var * tcp_maxseg(tp));
314 if (tp->t_bytes_acked >= tp->snd_cwnd) {
315 tp->t_bytes_acked -= tp->snd_cwnd;
316 tp->ccv->flags |= CCF_ABC_SENTAWND;
317 }
318 } else {
319 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
320 tp->t_bytes_acked = 0;
321 }
322 }
323
324 if (CC_ALGO(tp)->ack_received != NULL) {
325 /* XXXLAS: Find a way to live without this */
326 tp->ccv->curack = th->th_ack;
327 CC_ALGO(tp)->ack_received(tp->ccv, type);
328 }
329 }
330
331 void
332 cc_conn_init(struct tcpcb *tp)
333 {
334 struct hc_metrics_lite metrics;
335 struct inpcb *inp = tp->t_inpcb;
336 u_int maxseg;
337 int rtt;
338
339 INP_WLOCK_ASSERT(tp->t_inpcb);
340
341 tcp_hc_get(&inp->inp_inc, &metrics);
342 maxseg = tcp_maxseg(tp);
343
344 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
345 tp->t_srtt = rtt;
346 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
347 TCPSTAT_INC(tcps_usedrtt);
348 if (metrics.rmx_rttvar) {
349 tp->t_rttvar = metrics.rmx_rttvar;
350 TCPSTAT_INC(tcps_usedrttvar);
351 } else {
352 /* default variation is +- 1 rtt */
353 tp->t_rttvar =
354 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
355 }
356 TCPT_RANGESET(tp->t_rxtcur,
357 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
358 tp->t_rttmin, TCPTV_REXMTMAX);
359 }
360 if (metrics.rmx_ssthresh) {
361 /*
362 * There's some sort of gateway or interface
363 * buffer limit on the path. Use this to set
364 * the slow start threshold, but set the
365 * threshold to no less than 2*mss.
366 */
367 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh);
368 TCPSTAT_INC(tcps_usedssthresh);
369 }
370
371 /*
372 * Set the initial slow-start flight size.
373 *
374 * RFC5681 Section 3.1 specifies the default conservative values.
375 * RFC3390 specifies slightly more aggressive values.
376 * RFC6928 increases it to ten segments.
377 * Support for user specified value for initial flight size.
378 *
379 * If a SYN or SYN/ACK was lost and retransmitted, we have to
380 * reduce the initial CWND to one segment as congestion is likely
381 * requiring us to be cautious.
382 */
383 if (tp->snd_cwnd == 1)
384 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */
385 else if (V_tcp_initcwnd_segments)
386 tp->snd_cwnd = min(V_tcp_initcwnd_segments * maxseg,
387 max(2 * maxseg, V_tcp_initcwnd_segments * 1460));
388 else if (V_tcp_do_rfc3390)
389 tp->snd_cwnd = min(4 * maxseg, max(2 * maxseg, 4380));
390 else {
391 /* Per RFC5681 Section 3.1 */
392 if (maxseg > 2190)
393 tp->snd_cwnd = 2 * maxseg;
394 else if (maxseg > 1095)
395 tp->snd_cwnd = 3 * maxseg;
396 else
397 tp->snd_cwnd = 4 * maxseg;
398 }
399
400 if (CC_ALGO(tp)->conn_init != NULL)
401 CC_ALGO(tp)->conn_init(tp->ccv);
402 }
403
404 void inline
405 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
406 {
407 u_int maxseg;
408
409 INP_WLOCK_ASSERT(tp->t_inpcb);
410
411 switch(type) {
412 case CC_NDUPACK:
413 if (!IN_FASTRECOVERY(tp->t_flags)) {
414 tp->snd_recover = tp->snd_max;
415 if (tp->t_flags & TF_ECN_PERMIT)
416 tp->t_flags |= TF_ECN_SND_CWR;
417 }
418 break;
419 case CC_ECN:
420 if (!IN_CONGRECOVERY(tp->t_flags)) {
421 TCPSTAT_INC(tcps_ecn_rcwnd);
422 tp->snd_recover = tp->snd_max;
423 if (tp->t_flags & TF_ECN_PERMIT)
424 tp->t_flags |= TF_ECN_SND_CWR;
425 }
426 break;
427 case CC_RTO:
428 maxseg = tcp_maxseg(tp);
429 tp->t_dupacks = 0;
430 tp->t_bytes_acked = 0;
431 EXIT_RECOVERY(tp->t_flags);
432 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
433 maxseg) * maxseg;
434 tp->snd_cwnd = maxseg;
435 break;
436 case CC_RTO_ERR:
437 TCPSTAT_INC(tcps_sndrexmitbad);
438 /* RTO was unnecessary, so reset everything. */
439 tp->snd_cwnd = tp->snd_cwnd_prev;
440 tp->snd_ssthresh = tp->snd_ssthresh_prev;
441 tp->snd_recover = tp->snd_recover_prev;
442 if (tp->t_flags & TF_WASFRECOVERY)
443 ENTER_FASTRECOVERY(tp->t_flags);
444 if (tp->t_flags & TF_WASCRECOVERY)
445 ENTER_CONGRECOVERY(tp->t_flags);
446 tp->snd_nxt = tp->snd_max;
447 tp->t_flags &= ~TF_PREVVALID;
448 tp->t_badrxtwin = 0;
449 break;
450 }
451
452 if (CC_ALGO(tp)->cong_signal != NULL) {
453 if (th != NULL)
454 tp->ccv->curack = th->th_ack;
455 CC_ALGO(tp)->cong_signal(tp->ccv, type);
456 }
457 }
458
459 void inline
460 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
461 {
462 INP_WLOCK_ASSERT(tp->t_inpcb);
463
464 /* XXXLAS: KASSERT that we're in recovery? */
465
466 if (CC_ALGO(tp)->post_recovery != NULL) {
467 tp->ccv->curack = th->th_ack;
468 CC_ALGO(tp)->post_recovery(tp->ccv);
469 }
470 /* XXXLAS: EXIT_RECOVERY ? */
471 tp->t_bytes_acked = 0;
472 }
473
474 /*
475 * Indicate whether this ack should be delayed. We can delay the ack if
476 * following conditions are met:
477 * - There is no delayed ack timer in progress.
478 * - Our last ack wasn't a 0-sized window. We never want to delay
479 * the ack that opens up a 0-sized window.
480 * - LRO wasn't used for this segment. We make sure by checking that the
481 * segment size is not larger than the MSS.
482 */
483 #define DELAY_ACK(tp, tlen) \
484 ((!tcp_timer_active(tp, TT_DELACK) && \
485 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
486 (tlen <= tp->t_maxseg) && \
487 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
488
489 static void inline
490 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos)
491 {
492 INP_WLOCK_ASSERT(tp->t_inpcb);
493
494 if (CC_ALGO(tp)->ecnpkt_handler != NULL) {
495 switch (iptos & IPTOS_ECN_MASK) {
496 case IPTOS_ECN_CE:
497 tp->ccv->flags |= CCF_IPHDR_CE;
498 break;
499 case IPTOS_ECN_ECT0:
500 tp->ccv->flags &= ~CCF_IPHDR_CE;
501 break;
502 case IPTOS_ECN_ECT1:
503 tp->ccv->flags &= ~CCF_IPHDR_CE;
504 break;
505 }
506
507 if (th->th_flags & TH_CWR)
508 tp->ccv->flags |= CCF_TCPHDR_CWR;
509 else
510 tp->ccv->flags &= ~CCF_TCPHDR_CWR;
511
512 if (tp->t_flags & TF_DELACK)
513 tp->ccv->flags |= CCF_DELACK;
514 else
515 tp->ccv->flags &= ~CCF_DELACK;
516
517 CC_ALGO(tp)->ecnpkt_handler(tp->ccv);
518
519 if (tp->ccv->flags & CCF_ACKNOW)
520 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
521 }
522 }
523
524 /*
525 * TCP input handling is split into multiple parts:
526 * tcp6_input is a thin wrapper around tcp_input for the extended
527 * ip6_protox[] call format in ip6_input
528 * tcp_input handles primary segment validation, inpcb lookup and
529 * SYN processing on listen sockets
530 * tcp_do_segment processes the ACK and text of the segment for
531 * establishing, established and closing connections
532 */
533 #ifdef INET6
534 int
535 tcp6_input(struct mbuf **mp, int *offp, int proto)
536 {
537 struct mbuf *m = *mp;
538 struct in6_ifaddr *ia6;
539 struct ip6_hdr *ip6;
540
541 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
542
543 /*
544 * draft-itojun-ipv6-tcp-to-anycast
545 * better place to put this in?
546 */
547 ip6 = mtod(m, struct ip6_hdr *);
548 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
549 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
550 struct ip6_hdr *ip6;
551
552 ifa_free(&ia6->ia_ifa);
553 ip6 = mtod(m, struct ip6_hdr *);
554 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
555 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
556 return (IPPROTO_DONE);
557 }
558 if (ia6)
559 ifa_free(&ia6->ia_ifa);
560
561 return (tcp_input(mp, offp, proto));
562 }
563 #endif /* INET6 */
564
565 int
566 tcp_input(struct mbuf **mp, int *offp, int proto)
567 {
568 struct mbuf *m = *mp;
569 struct tcphdr *th = NULL;
570 struct ip *ip = NULL;
571 struct inpcb *inp = NULL;
572 struct tcpcb *tp = NULL;
573 struct socket *so = NULL;
574 u_char *optp = NULL;
575 int off0;
576 int optlen = 0;
577 #ifdef INET
578 int len;
579 #endif
580 int tlen = 0, off;
581 int drop_hdrlen;
582 int thflags;
583 int rstreason = 0; /* For badport_bandlim accounting purposes */
584 uint8_t iptos;
585 struct m_tag *fwd_tag = NULL;
586 #ifdef INET6
587 struct ip6_hdr *ip6 = NULL;
588 int isipv6;
589 #else
590 const void *ip6 = NULL;
591 #endif /* INET6 */
592 struct tcpopt to; /* options in this segment */
593 char *s = NULL; /* address and port logging */
594 int ti_locked;
595 #ifdef TCPDEBUG
596 /*
597 * The size of tcp_saveipgen must be the size of the max ip header,
598 * now IPv6.
599 */
600 u_char tcp_saveipgen[IP6_HDR_LEN];
601 struct tcphdr tcp_savetcp;
602 short ostate = 0;
603 #endif
604
605 #ifdef INET6
606 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
607 #endif
608
609 off0 = *offp;
610 m = *mp;
611 *mp = NULL;
612 to.to_flags = 0;
613 TCPSTAT_INC(tcps_rcvtotal);
614
615 #ifdef INET6
616 if (isipv6) {
617 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
618
619 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
620 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
621 if (m == NULL) {
622 TCPSTAT_INC(tcps_rcvshort);
623 return (IPPROTO_DONE);
624 }
625 }
626
627 ip6 = mtod(m, struct ip6_hdr *);
628 th = (struct tcphdr *)((caddr_t)ip6 + off0);
629 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
630 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
631 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
632 th->th_sum = m->m_pkthdr.csum_data;
633 else
634 th->th_sum = in6_cksum_pseudo(ip6, tlen,
635 IPPROTO_TCP, m->m_pkthdr.csum_data);
636 th->th_sum ^= 0xffff;
637 } else
638 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
639 if (th->th_sum) {
640 TCPSTAT_INC(tcps_rcvbadsum);
641 goto drop;
642 }
643
644 /*
645 * Be proactive about unspecified IPv6 address in source.
646 * As we use all-zero to indicate unbounded/unconnected pcb,
647 * unspecified IPv6 address can be used to confuse us.
648 *
649 * Note that packets with unspecified IPv6 destination is
650 * already dropped in ip6_input.
651 */
652 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
653 /* XXX stat */
654 goto drop;
655 }
656 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
657 }
658 #endif
659 #if defined(INET) && defined(INET6)
660 else
661 #endif
662 #ifdef INET
663 {
664 /*
665 * Get IP and TCP header together in first mbuf.
666 * Note: IP leaves IP header in first mbuf.
667 */
668 if (off0 > sizeof (struct ip)) {
669 ip_stripoptions(m);
670 off0 = sizeof(struct ip);
671 }
672 if (m->m_len < sizeof (struct tcpiphdr)) {
673 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
674 == NULL) {
675 TCPSTAT_INC(tcps_rcvshort);
676 return (IPPROTO_DONE);
677 }
678 }
679 ip = mtod(m, struct ip *);
680 th = (struct tcphdr *)((caddr_t)ip + off0);
681 tlen = ntohs(ip->ip_len) - off0;
682
683 iptos = ip->ip_tos;
684 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
685 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
686 th->th_sum = m->m_pkthdr.csum_data;
687 else
688 th->th_sum = in_pseudo(ip->ip_src.s_addr,
689 ip->ip_dst.s_addr,
690 htonl(m->m_pkthdr.csum_data + tlen +
691 IPPROTO_TCP));
692 th->th_sum ^= 0xffff;
693 } else {
694 struct ipovly *ipov = (struct ipovly *)ip;
695
696 /*
697 * Checksum extended TCP header and data.
698 */
699 len = off0 + tlen;
700 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
701 ipov->ih_len = htons(tlen);
702 th->th_sum = in_cksum(m, len);
703 /* Reset length for SDT probes. */
704 ip->ip_len = htons(len);
705 /* Reset TOS bits */
706 ip->ip_tos = iptos;
707 /* Re-initialization for later version check */
708 ip->ip_v = IPVERSION;
709 ip->ip_hl = off0 >> 2;
710 }
711
712 if (th->th_sum) {
713 TCPSTAT_INC(tcps_rcvbadsum);
714 goto drop;
715 }
716 }
717 #endif /* INET */
718
719 /*
720 * Check that TCP offset makes sense,
721 * pull out TCP options and adjust length. XXX
722 */
723 off = th->th_off << 2;
724 if (off < sizeof (struct tcphdr) || off > tlen) {
725 TCPSTAT_INC(tcps_rcvbadoff);
726 goto drop;
727 }
728 tlen -= off; /* tlen is used instead of ti->ti_len */
729 if (off > sizeof (struct tcphdr)) {
730 #ifdef INET6
731 if (isipv6) {
732 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE);
733 ip6 = mtod(m, struct ip6_hdr *);
734 th = (struct tcphdr *)((caddr_t)ip6 + off0);
735 }
736 #endif
737 #if defined(INET) && defined(INET6)
738 else
739 #endif
740 #ifdef INET
741 {
742 if (m->m_len < sizeof(struct ip) + off) {
743 if ((m = m_pullup(m, sizeof (struct ip) + off))
744 == NULL) {
745 TCPSTAT_INC(tcps_rcvshort);
746 return (IPPROTO_DONE);
747 }
748 ip = mtod(m, struct ip *);
749 th = (struct tcphdr *)((caddr_t)ip + off0);
750 }
751 }
752 #endif
753 optlen = off - sizeof (struct tcphdr);
754 optp = (u_char *)(th + 1);
755 }
756 thflags = th->th_flags;
757
758 /*
759 * Convert TCP protocol specific fields to host format.
760 */
761 tcp_fields_to_host(th);
762
763 /*
764 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
765 */
766 drop_hdrlen = off0 + off;
767
768 /*
769 * Locate pcb for segment; if we're likely to add or remove a
770 * connection then first acquire pcbinfo lock. There are three cases
771 * where we might discover later we need a write lock despite the
772 * flags: ACKs moving a connection out of the syncache, ACKs for a
773 * connection in TIMEWAIT and SYNs not targeting a listening socket.
774 */
775 if ((thflags & (TH_FIN | TH_RST)) != 0) {
776 INP_INFO_RLOCK(&V_tcbinfo);
777 ti_locked = TI_RLOCKED;
778 } else
779 ti_locked = TI_UNLOCKED;
780
781 /*
782 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
783 */
784 if (
785 #ifdef INET6
786 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
787 #ifdef INET
788 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
789 #endif
790 #endif
791 #if defined(INET) && !defined(INET6)
792 (m->m_flags & M_IP_NEXTHOP)
793 #endif
794 )
795 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
796
797 findpcb:
798 #ifdef INVARIANTS
799 if (ti_locked == TI_RLOCKED) {
800 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
801 } else {
802 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
803 }
804 #endif
805 #ifdef INET6
806 if (isipv6 && fwd_tag != NULL) {
807 struct sockaddr_in6 *next_hop6;
808
809 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
810 /*
811 * Transparently forwarded. Pretend to be the destination.
812 * Already got one like this?
813 */
814 inp = in6_pcblookup_mbuf(&V_tcbinfo,
815 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
816 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
817 if (!inp) {
818 /*
819 * It's new. Try to find the ambushing socket.
820 * Because we've rewritten the destination address,
821 * any hardware-generated hash is ignored.
822 */
823 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
824 th->th_sport, &next_hop6->sin6_addr,
825 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
826 th->th_dport, INPLOOKUP_WILDCARD |
827 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
828 }
829 } else if (isipv6) {
830 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
831 th->th_sport, &ip6->ip6_dst, th->th_dport,
832 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
833 m->m_pkthdr.rcvif, m);
834 }
835 #endif /* INET6 */
836 #if defined(INET6) && defined(INET)
837 else
838 #endif
839 #ifdef INET
840 if (fwd_tag != NULL) {
841 struct sockaddr_in *next_hop;
842
843 next_hop = (struct sockaddr_in *)(fwd_tag+1);
844 /*
845 * Transparently forwarded. Pretend to be the destination.
846 * already got one like this?
847 */
848 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
849 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
850 m->m_pkthdr.rcvif, m);
851 if (!inp) {
852 /*
853 * It's new. Try to find the ambushing socket.
854 * Because we've rewritten the destination address,
855 * any hardware-generated hash is ignored.
856 */
857 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
858 th->th_sport, next_hop->sin_addr,
859 next_hop->sin_port ? ntohs(next_hop->sin_port) :
860 th->th_dport, INPLOOKUP_WILDCARD |
861 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
862 }
863 } else
864 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
865 th->th_sport, ip->ip_dst, th->th_dport,
866 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
867 m->m_pkthdr.rcvif, m);
868 #endif /* INET */
869
870 /*
871 * If the INPCB does not exist then all data in the incoming
872 * segment is discarded and an appropriate RST is sent back.
873 * XXX MRT Send RST using which routing table?
874 */
875 if (inp == NULL) {
876 /*
877 * Log communication attempts to ports that are not
878 * in use.
879 */
880 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
881 tcp_log_in_vain == 2) {
882 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
883 log(LOG_INFO, "%s; %s: Connection attempt "
884 "to closed port\n", s, __func__);
885 }
886 /*
887 * When blackholing do not respond with a RST but
888 * completely ignore the segment and drop it.
889 */
890 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
891 V_blackhole == 2)
892 goto dropunlock;
893
894 rstreason = BANDLIM_RST_CLOSEDPORT;
895 goto dropwithreset;
896 }
897 INP_WLOCK_ASSERT(inp);
898 /*
899 * While waiting for inp lock during the lookup, another thread
900 * can have dropped the inpcb, in which case we need to loop back
901 * and try to find a new inpcb to deliver to.
902 */
903 if (inp->inp_flags & INP_DROPPED) {
904 INP_WUNLOCK(inp);
905 inp = NULL;
906 goto findpcb;
907 }
908 if ((inp->inp_flowtype == M_HASHTYPE_NONE) &&
909 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) &&
910 ((inp->inp_socket == NULL) ||
911 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) {
912 inp->inp_flowid = m->m_pkthdr.flowid;
913 inp->inp_flowtype = M_HASHTYPE_GET(m);
914 }
915 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
916 #ifdef INET6
917 if (isipv6 && IPSEC_ENABLED(ipv6) &&
918 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) {
919 goto dropunlock;
920 }
921 #ifdef INET
922 else
923 #endif
924 #endif /* INET6 */
925 #ifdef INET
926 if (IPSEC_ENABLED(ipv4) &&
927 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) {
928 goto dropunlock;
929 }
930 #endif /* INET */
931 #endif /* IPSEC */
932
933 /*
934 * Check the minimum TTL for socket.
935 */
936 if (inp->inp_ip_minttl != 0) {
937 #ifdef INET6
938 if (isipv6) {
939 if (inp->inp_ip_minttl > ip6->ip6_hlim)
940 goto dropunlock;
941 } else
942 #endif
943 if (inp->inp_ip_minttl > ip->ip_ttl)
944 goto dropunlock;
945 }
946
947 /*
948 * A previous connection in TIMEWAIT state is supposed to catch stray
949 * or duplicate segments arriving late. If this segment was a
950 * legitimate new connection attempt, the old INPCB gets removed and
951 * we can try again to find a listening socket.
952 *
953 * At this point, due to earlier optimism, we may hold only an inpcb
954 * lock, and not the inpcbinfo write lock. If so, we need to try to
955 * acquire it, or if that fails, acquire a reference on the inpcb,
956 * drop all locks, acquire a global write lock, and then re-acquire
957 * the inpcb lock. We may at that point discover that another thread
958 * has tried to free the inpcb, in which case we need to loop back
959 * and try to find a new inpcb to deliver to.
960 *
961 * XXXRW: It may be time to rethink timewait locking.
962 */
963 relocked:
964 if (inp->inp_flags & INP_TIMEWAIT) {
965 if (ti_locked == TI_UNLOCKED) {
966 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
967 in_pcbref(inp);
968 INP_WUNLOCK(inp);
969 INP_INFO_RLOCK(&V_tcbinfo);
970 ti_locked = TI_RLOCKED;
971 INP_WLOCK(inp);
972 if (in_pcbrele_wlocked(inp)) {
973 inp = NULL;
974 goto findpcb;
975 } else if (inp->inp_flags & INP_DROPPED) {
976 INP_WUNLOCK(inp);
977 inp = NULL;
978 goto findpcb;
979 }
980 } else
981 ti_locked = TI_RLOCKED;
982 }
983 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
984
985 if (thflags & TH_SYN)
986 tcp_dooptions(&to, optp, optlen, TO_SYN);
987 /*
988 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
989 */
990 if (tcp_twcheck(inp, &to, th, m, tlen))
991 goto findpcb;
992 INP_INFO_RUNLOCK(&V_tcbinfo);
993 return (IPPROTO_DONE);
994 }
995 /*
996 * The TCPCB may no longer exist if the connection is winding
997 * down or it is in the CLOSED state. Either way we drop the
998 * segment and send an appropriate response.
999 */
1000 tp = intotcpcb(inp);
1001 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
1002 rstreason = BANDLIM_RST_CLOSEDPORT;
1003 goto dropwithreset;
1004 }
1005
1006 #ifdef TCP_OFFLOAD
1007 if (tp->t_flags & TF_TOE) {
1008 tcp_offload_input(tp, m);
1009 m = NULL; /* consumed by the TOE driver */
1010 goto dropunlock;
1011 }
1012 #endif
1013
1014 /*
1015 * We've identified a valid inpcb, but it could be that we need an
1016 * inpcbinfo write lock but don't hold it. In this case, attempt to
1017 * acquire using the same strategy as the TIMEWAIT case above. If we
1018 * relock, we have to jump back to 'relocked' as the connection might
1019 * now be in TIMEWAIT.
1020 */
1021 #ifdef INVARIANTS
1022 if ((thflags & (TH_FIN | TH_RST)) != 0)
1023 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1024 #endif
1025 if (!((tp->t_state == TCPS_ESTABLISHED && (thflags & TH_SYN) == 0) ||
1026 (tp->t_state == TCPS_LISTEN && (thflags & TH_SYN) &&
1027 !(tp->t_flags & TF_FASTOPEN)))) {
1028 if (ti_locked == TI_UNLOCKED) {
1029 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
1030 in_pcbref(inp);
1031 INP_WUNLOCK(inp);
1032 INP_INFO_RLOCK(&V_tcbinfo);
1033 ti_locked = TI_RLOCKED;
1034 INP_WLOCK(inp);
1035 if (in_pcbrele_wlocked(inp)) {
1036 inp = NULL;
1037 goto findpcb;
1038 } else if (inp->inp_flags & INP_DROPPED) {
1039 INP_WUNLOCK(inp);
1040 inp = NULL;
1041 goto findpcb;
1042 }
1043 goto relocked;
1044 } else
1045 ti_locked = TI_RLOCKED;
1046 }
1047 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1048 }
1049
1050 #ifdef MAC
1051 INP_WLOCK_ASSERT(inp);
1052 if (mac_inpcb_check_deliver(inp, m))
1053 goto dropunlock;
1054 #endif
1055 so = inp->inp_socket;
1056 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1057 #ifdef TCPDEBUG
1058 if (so->so_options & SO_DEBUG) {
1059 ostate = tp->t_state;
1060 #ifdef INET6
1061 if (isipv6) {
1062 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1063 } else
1064 #endif
1065 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1066 tcp_savetcp = *th;
1067 }
1068 #endif /* TCPDEBUG */
1069 /*
1070 * When the socket is accepting connections (the INPCB is in LISTEN
1071 * state) we look into the SYN cache if this is a new connection
1072 * attempt or the completion of a previous one.
1073 */
1074 KASSERT(tp->t_state == TCPS_LISTEN || !(so->so_options & SO_ACCEPTCONN),
1075 ("%s: so accepting but tp %p not listening", __func__, tp));
1076 if (tp->t_state == TCPS_LISTEN && (so->so_options & SO_ACCEPTCONN)) {
1077 struct in_conninfo inc;
1078
1079 bzero(&inc, sizeof(inc));
1080 #ifdef INET6
1081 if (isipv6) {
1082 inc.inc_flags |= INC_ISIPV6;
1083 inc.inc6_faddr = ip6->ip6_src;
1084 inc.inc6_laddr = ip6->ip6_dst;
1085 } else
1086 #endif
1087 {
1088 inc.inc_faddr = ip->ip_src;
1089 inc.inc_laddr = ip->ip_dst;
1090 }
1091 inc.inc_fport = th->th_sport;
1092 inc.inc_lport = th->th_dport;
1093 inc.inc_fibnum = so->so_fibnum;
1094
1095 /*
1096 * Check for an existing connection attempt in syncache if
1097 * the flag is only ACK. A successful lookup creates a new
1098 * socket appended to the listen queue in SYN_RECEIVED state.
1099 */
1100 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1101
1102 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1103 /*
1104 * Parse the TCP options here because
1105 * syncookies need access to the reflected
1106 * timestamp.
1107 */
1108 tcp_dooptions(&to, optp, optlen, 0);
1109 /*
1110 * NB: syncache_expand() doesn't unlock
1111 * inp and tcpinfo locks.
1112 */
1113 rstreason = syncache_expand(&inc, &to, th, &so, m);
1114 if (rstreason < 0) {
1115 /*
1116 * A failing TCP MD5 signature comparison
1117 * must result in the segment being dropped
1118 * and must not produce any response back
1119 * to the sender.
1120 */
1121 goto dropunlock;
1122 } else if (rstreason == 0) {
1123 /*
1124 * No syncache entry or ACK was not
1125 * for our SYN/ACK. Send a RST.
1126 * NB: syncache did its own logging
1127 * of the failure cause.
1128 */
1129 rstreason = BANDLIM_RST_OPENPORT;
1130 goto dropwithreset;
1131 }
1132 #ifdef TCP_RFC7413
1133 new_tfo_socket:
1134 #endif
1135 if (so == NULL) {
1136 /*
1137 * We completed the 3-way handshake
1138 * but could not allocate a socket
1139 * either due to memory shortage,
1140 * listen queue length limits or
1141 * global socket limits. Send RST
1142 * or wait and have the remote end
1143 * retransmit the ACK for another
1144 * try.
1145 */
1146 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1147 log(LOG_DEBUG, "%s; %s: Listen socket: "
1148 "Socket allocation failed due to "
1149 "limits or memory shortage, %s\n",
1150 s, __func__,
1151 V_tcp_sc_rst_sock_fail ?
1152 "sending RST" : "try again");
1153 if (V_tcp_sc_rst_sock_fail) {
1154 rstreason = BANDLIM_UNLIMITED;
1155 goto dropwithreset;
1156 } else
1157 goto dropunlock;
1158 }
1159 /*
1160 * Socket is created in state SYN_RECEIVED.
1161 * Unlock the listen socket, lock the newly
1162 * created socket and update the tp variable.
1163 */
1164 INP_WUNLOCK(inp); /* listen socket */
1165 inp = sotoinpcb(so);
1166 /*
1167 * New connection inpcb is already locked by
1168 * syncache_expand().
1169 */
1170 INP_WLOCK_ASSERT(inp);
1171 tp = intotcpcb(inp);
1172 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1173 ("%s: ", __func__));
1174 /*
1175 * Process the segment and the data it
1176 * contains. tcp_do_segment() consumes
1177 * the mbuf chain and unlocks the inpcb.
1178 */
1179 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1180 iptos, ti_locked);
1181 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1182 return (IPPROTO_DONE);
1183 }
1184 /*
1185 * Segment flag validation for new connection attempts:
1186 *
1187 * Our (SYN|ACK) response was rejected.
1188 * Check with syncache and remove entry to prevent
1189 * retransmits.
1190 *
1191 * NB: syncache_chkrst does its own logging of failure
1192 * causes.
1193 */
1194 if (thflags & TH_RST) {
1195 syncache_chkrst(&inc, th);
1196 goto dropunlock;
1197 }
1198 /*
1199 * We can't do anything without SYN.
1200 */
1201 if ((thflags & TH_SYN) == 0) {
1202 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1203 log(LOG_DEBUG, "%s; %s: Listen socket: "
1204 "SYN is missing, segment ignored\n",
1205 s, __func__);
1206 TCPSTAT_INC(tcps_badsyn);
1207 goto dropunlock;
1208 }
1209 /*
1210 * (SYN|ACK) is bogus on a listen socket.
1211 */
1212 if (thflags & TH_ACK) {
1213 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1214 log(LOG_DEBUG, "%s; %s: Listen socket: "
1215 "SYN|ACK invalid, segment rejected\n",
1216 s, __func__);
1217 syncache_badack(&inc); /* XXX: Not needed! */
1218 TCPSTAT_INC(tcps_badsyn);
1219 rstreason = BANDLIM_RST_OPENPORT;
1220 goto dropwithreset;
1221 }
1222 /*
1223 * If the drop_synfin option is enabled, drop all
1224 * segments with both the SYN and FIN bits set.
1225 * This prevents e.g. nmap from identifying the
1226 * TCP/IP stack.
1227 * XXX: Poor reasoning. nmap has other methods
1228 * and is constantly refining its stack detection
1229 * strategies.
1230 * XXX: This is a violation of the TCP specification
1231 * and was used by RFC1644.
1232 */
1233 if ((thflags & TH_FIN) && V_drop_synfin) {
1234 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1235 log(LOG_DEBUG, "%s; %s: Listen socket: "
1236 "SYN|FIN segment ignored (based on "
1237 "sysctl setting)\n", s, __func__);
1238 TCPSTAT_INC(tcps_badsyn);
1239 goto dropunlock;
1240 }
1241 /*
1242 * Segment's flags are (SYN) or (SYN|FIN).
1243 *
1244 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1245 * as they do not affect the state of the TCP FSM.
1246 * The data pointed to by TH_URG and th_urp is ignored.
1247 */
1248 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1249 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1250 KASSERT(thflags & (TH_SYN),
1251 ("%s: Listen socket: TH_SYN not set", __func__));
1252 #ifdef INET6
1253 /*
1254 * If deprecated address is forbidden,
1255 * we do not accept SYN to deprecated interface
1256 * address to prevent any new inbound connection from
1257 * getting established.
1258 * When we do not accept SYN, we send a TCP RST,
1259 * with deprecated source address (instead of dropping
1260 * it). We compromise it as it is much better for peer
1261 * to send a RST, and RST will be the final packet
1262 * for the exchange.
1263 *
1264 * If we do not forbid deprecated addresses, we accept
1265 * the SYN packet. RFC2462 does not suggest dropping
1266 * SYN in this case.
1267 * If we decipher RFC2462 5.5.4, it says like this:
1268 * 1. use of deprecated addr with existing
1269 * communication is okay - "SHOULD continue to be
1270 * used"
1271 * 2. use of it with new communication:
1272 * (2a) "SHOULD NOT be used if alternate address
1273 * with sufficient scope is available"
1274 * (2b) nothing mentioned otherwise.
1275 * Here we fall into (2b) case as we have no choice in
1276 * our source address selection - we must obey the peer.
1277 *
1278 * The wording in RFC2462 is confusing, and there are
1279 * multiple description text for deprecated address
1280 * handling - worse, they are not exactly the same.
1281 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1282 */
1283 if (isipv6 && !V_ip6_use_deprecated) {
1284 struct in6_ifaddr *ia6;
1285
1286 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
1287 if (ia6 != NULL &&
1288 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1289 ifa_free(&ia6->ia_ifa);
1290 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1291 log(LOG_DEBUG, "%s; %s: Listen socket: "
1292 "Connection attempt to deprecated "
1293 "IPv6 address rejected\n",
1294 s, __func__);
1295 rstreason = BANDLIM_RST_OPENPORT;
1296 goto dropwithreset;
1297 }
1298 if (ia6)
1299 ifa_free(&ia6->ia_ifa);
1300 }
1301 #endif /* INET6 */
1302 /*
1303 * Basic sanity checks on incoming SYN requests:
1304 * Don't respond if the destination is a link layer
1305 * broadcast according to RFC1122 4.2.3.10, p. 104.
1306 * If it is from this socket it must be forged.
1307 * Don't respond if the source or destination is a
1308 * global or subnet broad- or multicast address.
1309 * Note that it is quite possible to receive unicast
1310 * link-layer packets with a broadcast IP address. Use
1311 * in_broadcast() to find them.
1312 */
1313 if (m->m_flags & (M_BCAST|M_MCAST)) {
1314 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1315 log(LOG_DEBUG, "%s; %s: Listen socket: "
1316 "Connection attempt from broad- or multicast "
1317 "link layer address ignored\n", s, __func__);
1318 goto dropunlock;
1319 }
1320 #ifdef INET6
1321 if (isipv6) {
1322 if (th->th_dport == th->th_sport &&
1323 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1324 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1325 log(LOG_DEBUG, "%s; %s: Listen socket: "
1326 "Connection attempt to/from self "
1327 "ignored\n", s, __func__);
1328 goto dropunlock;
1329 }
1330 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1331 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1332 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1333 log(LOG_DEBUG, "%s; %s: Listen socket: "
1334 "Connection attempt from/to multicast "
1335 "address ignored\n", s, __func__);
1336 goto dropunlock;
1337 }
1338 }
1339 #endif
1340 #if defined(INET) && defined(INET6)
1341 else
1342 #endif
1343 #ifdef INET
1344 {
1345 if (th->th_dport == th->th_sport &&
1346 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1347 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1348 log(LOG_DEBUG, "%s; %s: Listen socket: "
1349 "Connection attempt from/to self "
1350 "ignored\n", s, __func__);
1351 goto dropunlock;
1352 }
1353 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1354 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1355 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1356 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1357 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1358 log(LOG_DEBUG, "%s; %s: Listen socket: "
1359 "Connection attempt from/to broad- "
1360 "or multicast address ignored\n",
1361 s, __func__);
1362 goto dropunlock;
1363 }
1364 }
1365 #endif
1366 /*
1367 * SYN appears to be valid. Create compressed TCP state
1368 * for syncache.
1369 */
1370 #ifdef TCPDEBUG
1371 if (so->so_options & SO_DEBUG)
1372 tcp_trace(TA_INPUT, ostate, tp,
1373 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1374 #endif
1375 TCP_PROBE3(debug__input, tp, th, m);
1376 tcp_dooptions(&to, optp, optlen, TO_SYN);
1377 #ifdef TCP_RFC7413
1378 if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL))
1379 goto new_tfo_socket;
1380 #else
1381 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL);
1382 #endif
1383 /*
1384 * Entry added to syncache and mbuf consumed.
1385 * Only the listen socket is unlocked by syncache_add().
1386 */
1387 if (ti_locked == TI_RLOCKED) {
1388 INP_INFO_RUNLOCK(&V_tcbinfo);
1389 ti_locked = TI_UNLOCKED;
1390 }
1391 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1392 return (IPPROTO_DONE);
1393 } else if (tp->t_state == TCPS_LISTEN) {
1394 /*
1395 * When a listen socket is torn down the SO_ACCEPTCONN
1396 * flag is removed first while connections are drained
1397 * from the accept queue in a unlock/lock cycle of the
1398 * ACCEPT_LOCK, opening a race condition allowing a SYN
1399 * attempt go through unhandled.
1400 */
1401 goto dropunlock;
1402 }
1403 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1404 if (tp->t_flags & TF_SIGNATURE) {
1405 tcp_dooptions(&to, optp, optlen, thflags);
1406 if ((to.to_flags & TOF_SIGNATURE) == 0) {
1407 TCPSTAT_INC(tcps_sig_err_nosigopt);
1408 goto dropunlock;
1409 }
1410 if (!TCPMD5_ENABLED() ||
1411 TCPMD5_INPUT(m, th, to.to_signature) != 0)
1412 goto dropunlock;
1413 }
1414 #endif
1415 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1416
1417 /*
1418 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1419 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1420 * the inpcb, and unlocks pcbinfo.
1421 */
1422 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1423 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1424 return (IPPROTO_DONE);
1425
1426 dropwithreset:
1427 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1428
1429 if (ti_locked == TI_RLOCKED) {
1430 INP_INFO_RUNLOCK(&V_tcbinfo);
1431 ti_locked = TI_UNLOCKED;
1432 }
1433 #ifdef INVARIANTS
1434 else {
1435 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1436 "ti_locked: %d", __func__, ti_locked));
1437 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1438 }
1439 #endif
1440
1441 if (inp != NULL) {
1442 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1443 INP_WUNLOCK(inp);
1444 } else
1445 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1446 m = NULL; /* mbuf chain got consumed. */
1447 goto drop;
1448
1449 dropunlock:
1450 if (m != NULL)
1451 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1452
1453 if (ti_locked == TI_RLOCKED) {
1454 INP_INFO_RUNLOCK(&V_tcbinfo);
1455 ti_locked = TI_UNLOCKED;
1456 }
1457 #ifdef INVARIANTS
1458 else {
1459 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1460 "ti_locked: %d", __func__, ti_locked));
1461 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1462 }
1463 #endif
1464
1465 if (inp != NULL)
1466 INP_WUNLOCK(inp);
1467
1468 drop:
1469 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1470 if (s != NULL)
1471 free(s, M_TCPLOG);
1472 if (m != NULL)
1473 m_freem(m);
1474 return (IPPROTO_DONE);
1475 }
1476
1477 /*
1478 * Automatic sizing of receive socket buffer. Often the send
1479 * buffer size is not optimally adjusted to the actual network
1480 * conditions at hand (delay bandwidth product). Setting the
1481 * buffer size too small limits throughput on links with high
1482 * bandwidth and high delay (eg. trans-continental/oceanic links).
1483 *
1484 * On the receive side the socket buffer memory is only rarely
1485 * used to any significant extent. This allows us to be much
1486 * more aggressive in scaling the receive socket buffer. For
1487 * the case that the buffer space is actually used to a large
1488 * extent and we run out of kernel memory we can simply drop
1489 * the new segments; TCP on the sender will just retransmit it
1490 * later. Setting the buffer size too big may only consume too
1491 * much kernel memory if the application doesn't read() from
1492 * the socket or packet loss or reordering makes use of the
1493 * reassembly queue.
1494 *
1495 * The criteria to step up the receive buffer one notch are:
1496 * 1. Application has not set receive buffer size with
1497 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE.
1498 * 2. the number of bytes received during the time it takes
1499 * one timestamp to be reflected back to us (the RTT);
1500 * 3. received bytes per RTT is within seven eighth of the
1501 * current socket buffer size;
1502 * 4. receive buffer size has not hit maximal automatic size;
1503 *
1504 * This algorithm does one step per RTT at most and only if
1505 * we receive a bulk stream w/o packet losses or reorderings.
1506 * Shrinking the buffer during idle times is not necessary as
1507 * it doesn't consume any memory when idle.
1508 *
1509 * TODO: Only step up if the application is actually serving
1510 * the buffer to better manage the socket buffer resources.
1511 */
1512 int
1513 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so,
1514 struct tcpcb *tp, int tlen)
1515 {
1516 int newsize = 0;
1517
1518 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) &&
1519 tp->t_srtt != 0 && tp->rfbuf_ts != 0 &&
1520 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) >
1521 (tp->t_srtt >> TCP_RTT_SHIFT)) {
1522 if (tp->rfbuf_cnt > (so->so_rcv.sb_hiwat / 8 * 7) &&
1523 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) {
1524 newsize = min(so->so_rcv.sb_hiwat +
1525 V_tcp_autorcvbuf_inc, V_tcp_autorcvbuf_max);
1526 }
1527 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize);
1528
1529 /* Start over with next RTT. */
1530 tp->rfbuf_ts = 0;
1531 tp->rfbuf_cnt = 0;
1532 } else {
1533 tp->rfbuf_cnt += tlen; /* add up */
1534 }
1535
1536 return (newsize);
1537 }
1538
1539 void
1540 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1541 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1542 int ti_locked)
1543 {
1544 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed;
1545 int rstreason, todrop, win;
1546 u_long tiwin;
1547 char *s;
1548 struct in_conninfo *inc;
1549 struct mbuf *mfree;
1550 struct tcpopt to;
1551 int tfo_syn;
1552
1553 #ifdef TCPDEBUG
1554 /*
1555 * The size of tcp_saveipgen must be the size of the max ip header,
1556 * now IPv6.
1557 */
1558 u_char tcp_saveipgen[IP6_HDR_LEN];
1559 struct tcphdr tcp_savetcp;
1560 short ostate = 0;
1561 #endif
1562 thflags = th->th_flags;
1563 inc = &tp->t_inpcb->inp_inc;
1564 tp->sackhint.last_sack_ack = 0;
1565 sack_changed = 0;
1566
1567 /*
1568 * If this is either a state-changing packet or current state isn't
1569 * established, we require a write lock on tcbinfo. Otherwise, we
1570 * allow the tcbinfo to be in either alocked or unlocked, as the
1571 * caller may have unnecessarily acquired a write lock due to a race.
1572 */
1573 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1574 tp->t_state != TCPS_ESTABLISHED) {
1575 KASSERT(ti_locked == TI_RLOCKED, ("%s ti_locked %d for "
1576 "SYN/FIN/RST/!EST", __func__, ti_locked));
1577 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1578 } else {
1579 #ifdef INVARIANTS
1580 if (ti_locked == TI_RLOCKED)
1581 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1582 else {
1583 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1584 "ti_locked: %d", __func__, ti_locked));
1585 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1586 }
1587 #endif
1588 }
1589 INP_WLOCK_ASSERT(tp->t_inpcb);
1590 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1591 __func__));
1592 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1593 __func__));
1594
1595 #ifdef TCPPCAP
1596 /* Save segment, if requested. */
1597 tcp_pcap_add(th, m, &(tp->t_inpkts));
1598 #endif
1599
1600 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
1601 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1602 log(LOG_DEBUG, "%s; %s: "
1603 "SYN|FIN segment ignored (based on "
1604 "sysctl setting)\n", s, __func__);
1605 free(s, M_TCPLOG);
1606 }
1607 goto drop;
1608 }
1609
1610 /*
1611 * If a segment with the ACK-bit set arrives in the SYN-SENT state
1612 * check SEQ.ACK first.
1613 */
1614 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
1615 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
1616 rstreason = BANDLIM_UNLIMITED;
1617 goto dropwithreset;
1618 }
1619
1620 /*
1621 * Segment received on connection.
1622 * Reset idle time and keep-alive timer.
1623 * XXX: This should be done after segment
1624 * validation to ignore broken/spoofed segs.
1625 */
1626 tp->t_rcvtime = ticks;
1627 if (TCPS_HAVEESTABLISHED(tp->t_state))
1628 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
1629
1630 /*
1631 * Scale up the window into a 32-bit value.
1632 * For the SYN_SENT state the scale is zero.
1633 */
1634 tiwin = th->th_win << tp->snd_scale;
1635
1636 /*
1637 * TCP ECN processing.
1638 */
1639 if (tp->t_flags & TF_ECN_PERMIT) {
1640 if (thflags & TH_CWR)
1641 tp->t_flags &= ~TF_ECN_SND_ECE;
1642 switch (iptos & IPTOS_ECN_MASK) {
1643 case IPTOS_ECN_CE:
1644 tp->t_flags |= TF_ECN_SND_ECE;
1645 TCPSTAT_INC(tcps_ecn_ce);
1646 break;
1647 case IPTOS_ECN_ECT0:
1648 TCPSTAT_INC(tcps_ecn_ect0);
1649 break;
1650 case IPTOS_ECN_ECT1:
1651 TCPSTAT_INC(tcps_ecn_ect1);
1652 break;
1653 }
1654
1655 /* Process a packet differently from RFC3168. */
1656 cc_ecnpkt_handler(tp, th, iptos);
1657
1658 /* Congestion experienced. */
1659 if (thflags & TH_ECE) {
1660 cc_cong_signal(tp, th, CC_ECN);
1661 }
1662 }
1663
1664 /*
1665 * Parse options on any incoming segment.
1666 */
1667 tcp_dooptions(&to, (u_char *)(th + 1),
1668 (th->th_off << 2) - sizeof(struct tcphdr),
1669 (thflags & TH_SYN) ? TO_SYN : 0);
1670
1671 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1672 if ((tp->t_flags & TF_SIGNATURE) != 0 &&
1673 (to.to_flags & TOF_SIGNATURE) == 0) {
1674 TCPSTAT_INC(tcps_sig_err_sigopt);
1675 /* XXX: should drop? */
1676 }
1677 #endif
1678 /*
1679 * If echoed timestamp is later than the current time,
1680 * fall back to non RFC1323 RTT calculation. Normalize
1681 * timestamp if syncookies were used when this connection
1682 * was established.
1683 */
1684 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1685 to.to_tsecr -= tp->ts_offset;
1686 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1687 to.to_tsecr = 0;
1688 }
1689 /*
1690 * Process options only when we get SYN/ACK back. The SYN case
1691 * for incoming connections is handled in tcp_syncache.
1692 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1693 * or <SYN,ACK>) segment itself is never scaled.
1694 * XXX this is traditional behavior, may need to be cleaned up.
1695 */
1696 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1697 if ((to.to_flags & TOF_SCALE) &&
1698 (tp->t_flags & TF_REQ_SCALE)) {
1699 tp->t_flags |= TF_RCVD_SCALE;
1700 tp->snd_scale = to.to_wscale;
1701 }
1702 /*
1703 * Initial send window. It will be updated with
1704 * the next incoming segment to the scaled value.
1705 */
1706 tp->snd_wnd = th->th_win;
1707 if (to.to_flags & TOF_TS) {
1708 tp->t_flags |= TF_RCVD_TSTMP;
1709 tp->ts_recent = to.to_tsval;
1710 tp->ts_recent_age = tcp_ts_getticks();
1711 }
1712 if (to.to_flags & TOF_MSS)
1713 tcp_mss(tp, to.to_mss);
1714 if ((tp->t_flags & TF_SACK_PERMIT) &&
1715 (to.to_flags & TOF_SACKPERM) == 0)
1716 tp->t_flags &= ~TF_SACK_PERMIT;
1717 }
1718
1719 /*
1720 * If timestamps were negotiated during SYN/ACK they should
1721 * appear on every segment during this session and vice versa.
1722 */
1723 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1724 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1725 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1726 "no action\n", s, __func__);
1727 free(s, M_TCPLOG);
1728 }
1729 }
1730 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1731 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1732 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1733 "no action\n", s, __func__);
1734 free(s, M_TCPLOG);
1735 }
1736 }
1737
1738 /*
1739 * Header prediction: check for the two common cases
1740 * of a uni-directional data xfer. If the packet has
1741 * no control flags, is in-sequence, the window didn't
1742 * change and we're not retransmitting, it's a
1743 * candidate. If the length is zero and the ack moved
1744 * forward, we're the sender side of the xfer. Just
1745 * free the data acked & wake any higher level process
1746 * that was blocked waiting for space. If the length
1747 * is non-zero and the ack didn't move, we're the
1748 * receiver side. If we're getting packets in-order
1749 * (the reassembly queue is empty), add the data to
1750 * the socket buffer and note that we need a delayed ack.
1751 * Make sure that the hidden state-flags are also off.
1752 * Since we check for TCPS_ESTABLISHED first, it can only
1753 * be TH_NEEDSYN.
1754 */
1755 if (tp->t_state == TCPS_ESTABLISHED &&
1756 th->th_seq == tp->rcv_nxt &&
1757 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1758 tp->snd_nxt == tp->snd_max &&
1759 tiwin && tiwin == tp->snd_wnd &&
1760 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1761 LIST_EMPTY(&tp->t_segq) &&
1762 ((to.to_flags & TOF_TS) == 0 ||
1763 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1764
1765 /*
1766 * If last ACK falls within this segment's sequence numbers,
1767 * record the timestamp.
1768 * NOTE that the test is modified according to the latest
1769 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1770 */
1771 if ((to.to_flags & TOF_TS) != 0 &&
1772 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1773 tp->ts_recent_age = tcp_ts_getticks();
1774 tp->ts_recent = to.to_tsval;
1775 }
1776
1777 if (tlen == 0) {
1778 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1779 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1780 !IN_RECOVERY(tp->t_flags) &&
1781 (to.to_flags & TOF_SACK) == 0 &&
1782 TAILQ_EMPTY(&tp->snd_holes)) {
1783 /*
1784 * This is a pure ack for outstanding data.
1785 */
1786 if (ti_locked == TI_RLOCKED)
1787 INP_INFO_RUNLOCK(&V_tcbinfo);
1788 ti_locked = TI_UNLOCKED;
1789
1790 TCPSTAT_INC(tcps_predack);
1791
1792 /*
1793 * "bad retransmit" recovery.
1794 */
1795 if (tp->t_rxtshift == 1 &&
1796 tp->t_flags & TF_PREVVALID &&
1797 (int)(ticks - tp->t_badrxtwin) < 0) {
1798 cc_cong_signal(tp, th, CC_RTO_ERR);
1799 }
1800
1801 /*
1802 * Recalculate the transmit timer / rtt.
1803 *
1804 * Some boxes send broken timestamp replies
1805 * during the SYN+ACK phase, ignore
1806 * timestamps of 0 or we could calculate a
1807 * huge RTT and blow up the retransmit timer.
1808 */
1809 if ((to.to_flags & TOF_TS) != 0 &&
1810 to.to_tsecr) {
1811 u_int t;
1812
1813 t = tcp_ts_getticks() - to.to_tsecr;
1814 if (!tp->t_rttlow || tp->t_rttlow > t)
1815 tp->t_rttlow = t;
1816 tcp_xmit_timer(tp,
1817 TCP_TS_TO_TICKS(t) + 1);
1818 } else if (tp->t_rtttime &&
1819 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1820 if (!tp->t_rttlow ||
1821 tp->t_rttlow > ticks - tp->t_rtttime)
1822 tp->t_rttlow = ticks - tp->t_rtttime;
1823 tcp_xmit_timer(tp,
1824 ticks - tp->t_rtttime);
1825 }
1826 acked = BYTES_THIS_ACK(tp, th);
1827
1828 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1829 hhook_run_tcp_est_in(tp, th, &to);
1830
1831 TCPSTAT_INC(tcps_rcvackpack);
1832 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1833 sbdrop(&so->so_snd, acked);
1834 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1835 SEQ_LEQ(th->th_ack, tp->snd_recover))
1836 tp->snd_recover = th->th_ack - 1;
1837
1838 /*
1839 * Let the congestion control algorithm update
1840 * congestion control related information. This
1841 * typically means increasing the congestion
1842 * window.
1843 */
1844 cc_ack_received(tp, th, CC_ACK);
1845
1846 tp->snd_una = th->th_ack;
1847 /*
1848 * Pull snd_wl2 up to prevent seq wrap relative
1849 * to th_ack.
1850 */
1851 tp->snd_wl2 = th->th_ack;
1852 tp->t_dupacks = 0;
1853 m_freem(m);
1854
1855 /*
1856 * If all outstanding data are acked, stop
1857 * retransmit timer, otherwise restart timer
1858 * using current (possibly backed-off) value.
1859 * If process is waiting for space,
1860 * wakeup/selwakeup/signal. If data
1861 * are ready to send, let tcp_output
1862 * decide between more output or persist.
1863 */
1864 #ifdef TCPDEBUG
1865 if (so->so_options & SO_DEBUG)
1866 tcp_trace(TA_INPUT, ostate, tp,
1867 (void *)tcp_saveipgen,
1868 &tcp_savetcp, 0);
1869 #endif
1870 TCP_PROBE3(debug__input, tp, th, m);
1871 if (tp->snd_una == tp->snd_max)
1872 tcp_timer_activate(tp, TT_REXMT, 0);
1873 else if (!tcp_timer_active(tp, TT_PERSIST))
1874 tcp_timer_activate(tp, TT_REXMT,
1875 tp->t_rxtcur);
1876 sowwakeup(so);
1877 if (sbavail(&so->so_snd))
1878 (void) tp->t_fb->tfb_tcp_output(tp);
1879 goto check_delack;
1880 }
1881 } else if (th->th_ack == tp->snd_una &&
1882 tlen <= sbspace(&so->so_rcv)) {
1883 int newsize = 0; /* automatic sockbuf scaling */
1884
1885 /*
1886 * This is a pure, in-sequence data packet with
1887 * nothing on the reassembly queue and we have enough
1888 * buffer space to take it.
1889 */
1890 if (ti_locked == TI_RLOCKED)
1891 INP_INFO_RUNLOCK(&V_tcbinfo);
1892 ti_locked = TI_UNLOCKED;
1893
1894 /* Clean receiver SACK report if present */
1895 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1896 tcp_clean_sackreport(tp);
1897 TCPSTAT_INC(tcps_preddat);
1898 tp->rcv_nxt += tlen;
1899 /*
1900 * Pull snd_wl1 up to prevent seq wrap relative to
1901 * th_seq.
1902 */
1903 tp->snd_wl1 = th->th_seq;
1904 /*
1905 * Pull rcv_up up to prevent seq wrap relative to
1906 * rcv_nxt.
1907 */
1908 tp->rcv_up = tp->rcv_nxt;
1909 TCPSTAT_INC(tcps_rcvpack);
1910 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1911 #ifdef TCPDEBUG
1912 if (so->so_options & SO_DEBUG)
1913 tcp_trace(TA_INPUT, ostate, tp,
1914 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1915 #endif
1916 TCP_PROBE3(debug__input, tp, th, m);
1917
1918 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
1919
1920 /* Add data to socket buffer. */
1921 SOCKBUF_LOCK(&so->so_rcv);
1922 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1923 m_freem(m);
1924 } else {
1925 /*
1926 * Set new socket buffer size.
1927 * Give up when limit is reached.
1928 */
1929 if (newsize)
1930 if (!sbreserve_locked(&so->so_rcv,
1931 newsize, so, NULL))
1932 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1933 m_adj(m, drop_hdrlen); /* delayed header drop */
1934 sbappendstream_locked(&so->so_rcv, m, 0);
1935 }
1936 /* NB: sorwakeup_locked() does an implicit unlock. */
1937 sorwakeup_locked(so);
1938 if (DELAY_ACK(tp, tlen)) {
1939 tp->t_flags |= TF_DELACK;
1940 } else {
1941 tp->t_flags |= TF_ACKNOW;
1942 tp->t_fb->tfb_tcp_output(tp);
1943 }
1944 goto check_delack;
1945 }
1946 }
1947
1948 /*
1949 * Calculate amount of space in receive window,
1950 * and then do TCP input processing.
1951 * Receive window is amount of space in rcv queue,
1952 * but not less than advertised window.
1953 */
1954 win = sbspace(&so->so_rcv);
1955 if (win < 0)
1956 win = 0;
1957 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1958
1959 switch (tp->t_state) {
1960
1961 /*
1962 * If the state is SYN_RECEIVED:
1963 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1964 */
1965 case TCPS_SYN_RECEIVED:
1966 if ((thflags & TH_ACK) &&
1967 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1968 SEQ_GT(th->th_ack, tp->snd_max))) {
1969 rstreason = BANDLIM_RST_OPENPORT;
1970 goto dropwithreset;
1971 }
1972 #ifdef TCP_RFC7413
1973 if (tp->t_flags & TF_FASTOPEN) {
1974 /*
1975 * When a TFO connection is in SYN_RECEIVED, the
1976 * only valid packets are the initial SYN, a
1977 * retransmit/copy of the initial SYN (possibly with
1978 * a subset of the original data), a valid ACK, a
1979 * FIN, or a RST.
1980 */
1981 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
1982 rstreason = BANDLIM_RST_OPENPORT;
1983 goto dropwithreset;
1984 } else if (thflags & TH_SYN) {
1985 /* non-initial SYN is ignored */
1986 if ((tcp_timer_active(tp, TT_DELACK) ||
1987 tcp_timer_active(tp, TT_REXMT)))
1988 goto drop;
1989 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) {
1990 goto drop;
1991 }
1992 }
1993 #endif
1994 break;
1995
1996 /*
1997 * If the state is SYN_SENT:
1998 * if seg contains a RST, then drop the connection.
1999 * if seg does not contain SYN, then drop it.
2000 * Otherwise this is an acceptable SYN segment
2001 * initialize tp->rcv_nxt and tp->irs
2002 * if seg contains ack then advance tp->snd_una
2003 * if seg contains an ECE and ECN support is enabled, the stream
2004 * is ECN capable.
2005 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
2006 * arrange for segment to be acked (eventually)
2007 * continue processing rest of data/controls, beginning with URG
2008 */
2009 case TCPS_SYN_SENT:
2010 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
2011 TCP_PROBE5(connect__refused, NULL, tp,
2012 m, tp, th);
2013 tp = tcp_drop(tp, ECONNREFUSED);
2014 }
2015 if (thflags & TH_RST)
2016 goto drop;
2017 if (!(thflags & TH_SYN))
2018 goto drop;
2019
2020 tp->irs = th->th_seq;
2021 tcp_rcvseqinit(tp);
2022 if (thflags & TH_ACK) {
2023 TCPSTAT_INC(tcps_connects);
2024 soisconnected(so);
2025 #ifdef MAC
2026 mac_socketpeer_set_from_mbuf(m, so);
2027 #endif
2028 /* Do window scaling on this connection? */
2029 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2030 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2031 tp->rcv_scale = tp->request_r_scale;
2032 }
2033 tp->rcv_adv += imin(tp->rcv_wnd,
2034 TCP_MAXWIN << tp->rcv_scale);
2035 tp->snd_una++; /* SYN is acked */
2036 /*
2037 * If there's data, delay ACK; if there's also a FIN
2038 * ACKNOW will be turned on later.
2039 */
2040 if (DELAY_ACK(tp, tlen) && tlen != 0)
2041 tcp_timer_activate(tp, TT_DELACK,
2042 tcp_delacktime);
2043 else
2044 tp->t_flags |= TF_ACKNOW;
2045
2046 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
2047 tp->t_flags |= TF_ECN_PERMIT;
2048 TCPSTAT_INC(tcps_ecn_shs);
2049 }
2050
2051 /*
2052 * Received <SYN,ACK> in SYN_SENT[*] state.
2053 * Transitions:
2054 * SYN_SENT --> ESTABLISHED
2055 * SYN_SENT* --> FIN_WAIT_1
2056 */
2057 tp->t_starttime = ticks;
2058 if (tp->t_flags & TF_NEEDFIN) {
2059 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2060 tp->t_flags &= ~TF_NEEDFIN;
2061 thflags &= ~TH_SYN;
2062 } else {
2063 tcp_state_change(tp, TCPS_ESTABLISHED);
2064 TCP_PROBE5(connect__established, NULL, tp,
2065 m, tp, th);
2066 cc_conn_init(tp);
2067 tcp_timer_activate(tp, TT_KEEP,
2068 TP_KEEPIDLE(tp));
2069 }
2070 } else {
2071 /*
2072 * Received initial SYN in SYN-SENT[*] state =>
2073 * simultaneous open.
2074 * If it succeeds, connection is * half-synchronized.
2075 * Otherwise, do 3-way handshake:
2076 * SYN-SENT -> SYN-RECEIVED
2077 * SYN-SENT* -> SYN-RECEIVED*
2078 */
2079 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
2080 tcp_timer_activate(tp, TT_REXMT, 0);
2081 tcp_state_change(tp, TCPS_SYN_RECEIVED);
2082 }
2083
2084 KASSERT(ti_locked == TI_RLOCKED, ("%s: trimthenstep6: "
2085 "ti_locked %d", __func__, ti_locked));
2086 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2087 INP_WLOCK_ASSERT(tp->t_inpcb);
2088
2089 /*
2090 * Advance th->th_seq to correspond to first data byte.
2091 * If data, trim to stay within window,
2092 * dropping FIN if necessary.
2093 */
2094 th->th_seq++;
2095 if (tlen > tp->rcv_wnd) {
2096 todrop = tlen - tp->rcv_wnd;
2097 m_adj(m, -todrop);
2098 tlen = tp->rcv_wnd;
2099 thflags &= ~TH_FIN;
2100 TCPSTAT_INC(tcps_rcvpackafterwin);
2101 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2102 }
2103 tp->snd_wl1 = th->th_seq - 1;
2104 tp->rcv_up = th->th_seq;
2105 /*
2106 * Client side of transaction: already sent SYN and data.
2107 * If the remote host used T/TCP to validate the SYN,
2108 * our data will be ACK'd; if so, enter normal data segment
2109 * processing in the middle of step 5, ack processing.
2110 * Otherwise, goto step 6.
2111 */
2112 if (thflags & TH_ACK)
2113 goto process_ACK;
2114
2115 goto step6;
2116
2117 /*
2118 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2119 * do normal processing.
2120 *
2121 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2122 */
2123 case TCPS_LAST_ACK:
2124 case TCPS_CLOSING:
2125 break; /* continue normal processing */
2126 }
2127
2128 /*
2129 * States other than LISTEN or SYN_SENT.
2130 * First check the RST flag and sequence number since reset segments
2131 * are exempt from the timestamp and connection count tests. This
2132 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2133 * below which allowed reset segments in half the sequence space
2134 * to fall though and be processed (which gives forged reset
2135 * segments with a random sequence number a 50 percent chance of
2136 * killing a connection).
2137 * Then check timestamp, if present.
2138 * Then check the connection count, if present.
2139 * Then check that at least some bytes of segment are within
2140 * receive window. If segment begins before rcv_nxt,
2141 * drop leading data (and SYN); if nothing left, just ack.
2142 */
2143 if (thflags & TH_RST) {
2144 /*
2145 * RFC5961 Section 3.2
2146 *
2147 * - RST drops connection only if SEG.SEQ == RCV.NXT.
2148 * - If RST is in window, we send challenge ACK.
2149 *
2150 * Note: to take into account delayed ACKs, we should
2151 * test against last_ack_sent instead of rcv_nxt.
2152 * Note 2: we handle special case of closed window, not
2153 * covered by the RFC.
2154 */
2155 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2156 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
2157 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
2158
2159 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2160 KASSERT(ti_locked == TI_RLOCKED,
2161 ("%s: TH_RST ti_locked %d, th %p tp %p",
2162 __func__, ti_locked, th, tp));
2163 KASSERT(tp->t_state != TCPS_SYN_SENT,
2164 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
2165 __func__, th, tp));
2166
2167 if (V_tcp_insecure_rst ||
2168 tp->last_ack_sent == th->th_seq) {
2169 TCPSTAT_INC(tcps_drops);
2170 /* Drop the connection. */
2171 switch (tp->t_state) {
2172 case TCPS_SYN_RECEIVED:
2173 so->so_error = ECONNREFUSED;
2174 goto close;
2175 case TCPS_ESTABLISHED:
2176 case TCPS_FIN_WAIT_1:
2177 case TCPS_FIN_WAIT_2:
2178 case TCPS_CLOSE_WAIT:
2179 case TCPS_CLOSING:
2180 case TCPS_LAST_ACK:
2181 so->so_error = ECONNRESET;
2182 close:
2183 /* FALLTHROUGH */
2184 default:
2185 tp = tcp_close(tp);
2186 }
2187 } else {
2188 TCPSTAT_INC(tcps_badrst);
2189 /* Send challenge ACK. */
2190 tcp_respond(tp, mtod(m, void *), th, m,
2191 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
2192 tp->last_ack_sent = tp->rcv_nxt;
2193 m = NULL;
2194 }
2195 }
2196 goto drop;
2197 }
2198
2199 /*
2200 * RFC5961 Section 4.2
2201 * Send challenge ACK for any SYN in synchronized state.
2202 */
2203 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT &&
2204 tp->t_state != TCPS_SYN_RECEIVED) {
2205 KASSERT(ti_locked == TI_RLOCKED,
2206 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2207 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2208
2209 TCPSTAT_INC(tcps_badsyn);
2210 if (V_tcp_insecure_syn &&
2211 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2212 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2213 tp = tcp_drop(tp, ECONNRESET);
2214 rstreason = BANDLIM_UNLIMITED;
2215 } else {
2216 /* Send challenge ACK. */
2217 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
2218 tp->snd_nxt, TH_ACK);
2219 tp->last_ack_sent = tp->rcv_nxt;
2220 m = NULL;
2221 }
2222 goto drop;
2223 }
2224
2225 /*
2226 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2227 * and it's less than ts_recent, drop it.
2228 */
2229 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2230 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2231
2232 /* Check to see if ts_recent is over 24 days old. */
2233 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2234 /*
2235 * Invalidate ts_recent. If this segment updates
2236 * ts_recent, the age will be reset later and ts_recent
2237 * will get a valid value. If it does not, setting
2238 * ts_recent to zero will at least satisfy the
2239 * requirement that zero be placed in the timestamp
2240 * echo reply when ts_recent isn't valid. The
2241 * age isn't reset until we get a valid ts_recent
2242 * because we don't want out-of-order segments to be
2243 * dropped when ts_recent is old.
2244 */
2245 tp->ts_recent = 0;
2246 } else {
2247 TCPSTAT_INC(tcps_rcvduppack);
2248 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2249 TCPSTAT_INC(tcps_pawsdrop);
2250 if (tlen)
2251 goto dropafterack;
2252 goto drop;
2253 }
2254 }
2255
2256 /*
2257 * In the SYN-RECEIVED state, validate that the packet belongs to
2258 * this connection before trimming the data to fit the receive
2259 * window. Check the sequence number versus IRS since we know
2260 * the sequence numbers haven't wrapped. This is a partial fix
2261 * for the "LAND" DoS attack.
2262 */
2263 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2264 rstreason = BANDLIM_RST_OPENPORT;
2265 goto dropwithreset;
2266 }
2267
2268 todrop = tp->rcv_nxt - th->th_seq;
2269 if (todrop > 0) {
2270 if (thflags & TH_SYN) {
2271 thflags &= ~TH_SYN;
2272 th->th_seq++;
2273 if (th->th_urp > 1)
2274 th->th_urp--;
2275 else
2276 thflags &= ~TH_URG;
2277 todrop--;
2278 }
2279 /*
2280 * Following if statement from Stevens, vol. 2, p. 960.
2281 */
2282 if (todrop > tlen
2283 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2284 /*
2285 * Any valid FIN must be to the left of the window.
2286 * At this point the FIN must be a duplicate or out
2287 * of sequence; drop it.
2288 */
2289 thflags &= ~TH_FIN;
2290
2291 /*
2292 * Send an ACK to resynchronize and drop any data.
2293 * But keep on processing for RST or ACK.
2294 */
2295 tp->t_flags |= TF_ACKNOW;
2296 todrop = tlen;
2297 TCPSTAT_INC(tcps_rcvduppack);
2298 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2299 } else {
2300 TCPSTAT_INC(tcps_rcvpartduppack);
2301 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2302 }
2303 drop_hdrlen += todrop; /* drop from the top afterwards */
2304 th->th_seq += todrop;
2305 tlen -= todrop;
2306 if (th->th_urp > todrop)
2307 th->th_urp -= todrop;
2308 else {
2309 thflags &= ~TH_URG;
2310 th->th_urp = 0;
2311 }
2312 }
2313
2314 /*
2315 * If new data are received on a connection after the
2316 * user processes are gone, then RST the other end.
2317 */
2318 if ((so->so_state & SS_NOFDREF) &&
2319 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2320 KASSERT(ti_locked == TI_RLOCKED, ("%s: SS_NOFDEREF && "
2321 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2322 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2323
2324 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2325 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2326 "after socket was closed, "
2327 "sending RST and removing tcpcb\n",
2328 s, __func__, tcpstates[tp->t_state], tlen);
2329 free(s, M_TCPLOG);
2330 }
2331 tp = tcp_close(tp);
2332 TCPSTAT_INC(tcps_rcvafterclose);
2333 rstreason = BANDLIM_UNLIMITED;
2334 goto dropwithreset;
2335 }
2336
2337 /*
2338 * If segment ends after window, drop trailing data
2339 * (and PUSH and FIN); if nothing left, just ACK.
2340 */
2341 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2342 if (todrop > 0) {
2343 TCPSTAT_INC(tcps_rcvpackafterwin);
2344 if (todrop >= tlen) {
2345 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2346 /*
2347 * If window is closed can only take segments at
2348 * window edge, and have to drop data and PUSH from
2349 * incoming segments. Continue processing, but
2350 * remember to ack. Otherwise, drop segment
2351 * and ack.
2352 */
2353 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2354 tp->t_flags |= TF_ACKNOW;
2355 TCPSTAT_INC(tcps_rcvwinprobe);
2356 } else
2357 goto dropafterack;
2358 } else
2359 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2360 m_adj(m, -todrop);
2361 tlen -= todrop;
2362 thflags &= ~(TH_PUSH|TH_FIN);
2363 }
2364
2365 /*
2366 * If last ACK falls within this segment's sequence numbers,
2367 * record its timestamp.
2368 * NOTE:
2369 * 1) That the test incorporates suggestions from the latest
2370 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2371 * 2) That updating only on newer timestamps interferes with
2372 * our earlier PAWS tests, so this check should be solely
2373 * predicated on the sequence space of this segment.
2374 * 3) That we modify the segment boundary check to be
2375 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2376 * instead of RFC1323's
2377 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2378 * This modified check allows us to overcome RFC1323's
2379 * limitations as described in Stevens TCP/IP Illustrated
2380 * Vol. 2 p.869. In such cases, we can still calculate the
2381 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2382 */
2383 if ((to.to_flags & TOF_TS) != 0 &&
2384 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2385 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2386 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2387 tp->ts_recent_age = tcp_ts_getticks();
2388 tp->ts_recent = to.to_tsval;
2389 }
2390
2391 /*
2392 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2393 * flag is on (half-synchronized state), then queue data for
2394 * later processing; else drop segment and return.
2395 */
2396 if ((thflags & TH_ACK) == 0) {
2397 if (tp->t_state == TCPS_SYN_RECEIVED ||
2398 (tp->t_flags & TF_NEEDSYN)) {
2399 #ifdef TCP_RFC7413
2400 if (tp->t_state == TCPS_SYN_RECEIVED &&
2401 tp->t_flags & TF_FASTOPEN) {
2402 tp->snd_wnd = tiwin;
2403 cc_conn_init(tp);
2404 }
2405 #endif
2406 goto step6;
2407 } else if (tp->t_flags & TF_ACKNOW)
2408 goto dropafterack;
2409 else
2410 goto drop;
2411 }
2412
2413 /*
2414 * Ack processing.
2415 */
2416 switch (tp->t_state) {
2417
2418 /*
2419 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2420 * ESTABLISHED state and continue processing.
2421 * The ACK was checked above.
2422 */
2423 case TCPS_SYN_RECEIVED:
2424
2425 TCPSTAT_INC(tcps_connects);
2426 soisconnected(so);
2427 /* Do window scaling? */
2428 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2429 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2430 tp->rcv_scale = tp->request_r_scale;
2431 tp->snd_wnd = tiwin;
2432 }
2433 /*
2434 * Make transitions:
2435 * SYN-RECEIVED -> ESTABLISHED
2436 * SYN-RECEIVED* -> FIN-WAIT-1
2437 */
2438 tp->t_starttime = ticks;
2439 if (tp->t_flags & TF_NEEDFIN) {
2440 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2441 tp->t_flags &= ~TF_NEEDFIN;
2442 } else {
2443 tcp_state_change(tp, TCPS_ESTABLISHED);
2444 TCP_PROBE5(accept__established, NULL, tp,
2445 m, tp, th);
2446 #ifdef TCP_RFC7413
2447 if (tp->t_tfo_pending) {
2448 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2449 tp->t_tfo_pending = NULL;
2450
2451 /*
2452 * Account for the ACK of our SYN prior to
2453 * regular ACK processing below.
2454 */
2455 tp->snd_una++;
2456 }
2457 /*
2458 * TFO connections call cc_conn_init() during SYN
2459 * processing. Calling it again here for such
2460 * connections is not harmless as it would undo the
2461 * snd_cwnd reduction that occurs when a TFO SYN|ACK
2462 * is retransmitted.
2463 */
2464 if (!(tp->t_flags & TF_FASTOPEN))
2465 #endif
2466 cc_conn_init(tp);
2467 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2468 }
2469 /*
2470 * If segment contains data or ACK, will call tcp_reass()
2471 * later; if not, do so now to pass queued data to user.
2472 */
2473 if (tlen == 0 && (thflags & TH_FIN) == 0)
2474 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2475 (struct mbuf *)0);
2476 tp->snd_wl1 = th->th_seq - 1;
2477 /* FALLTHROUGH */
2478
2479 /*
2480 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2481 * ACKs. If the ack is in the range
2482 * tp->snd_una < th->th_ack <= tp->snd_max
2483 * then advance tp->snd_una to th->th_ack and drop
2484 * data from the retransmission queue. If this ACK reflects
2485 * more up to date window information we update our window information.
2486 */
2487 case TCPS_ESTABLISHED:
2488 case TCPS_FIN_WAIT_1:
2489 case TCPS_FIN_WAIT_2:
2490 case TCPS_CLOSE_WAIT:
2491 case TCPS_CLOSING:
2492 case TCPS_LAST_ACK:
2493 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2494 TCPSTAT_INC(tcps_rcvacktoomuch);
2495 goto dropafterack;
2496 }
2497 if ((tp->t_flags & TF_SACK_PERMIT) &&
2498 ((to.to_flags & TOF_SACK) ||
2499 !TAILQ_EMPTY(&tp->snd_holes)))
2500 sack_changed = tcp_sack_doack(tp, &to, th->th_ack);
2501 else
2502 /*
2503 * Reset the value so that previous (valid) value
2504 * from the last ack with SACK doesn't get used.
2505 */
2506 tp->sackhint.sacked_bytes = 0;
2507
2508 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2509 hhook_run_tcp_est_in(tp, th, &to);
2510
2511 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2512 u_int maxseg;
2513
2514 maxseg = tcp_maxseg(tp);
2515 if (tlen == 0 &&
2516 (tiwin == tp->snd_wnd ||
2517 (tp->t_flags & TF_SACK_PERMIT))) {
2518 /*
2519 * If this is the first time we've seen a
2520 * FIN from the remote, this is not a
2521 * duplicate and it needs to be processed
2522 * normally. This happens during a
2523 * simultaneous close.
2524 */
2525 if ((thflags & TH_FIN) &&
2526 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2527 tp->t_dupacks = 0;
2528 break;
2529 }
2530 TCPSTAT_INC(tcps_rcvdupack);
2531 /*
2532 * If we have outstanding data (other than
2533 * a window probe), this is a completely
2534 * duplicate ack (ie, window info didn't
2535 * change and FIN isn't set),
2536 * the ack is the biggest we've
2537 * seen and we've seen exactly our rexmt
2538 * threshold of them, assume a packet
2539 * has been dropped and retransmit it.
2540 * Kludge snd_nxt & the congestion
2541 * window so we send only this one
2542 * packet.
2543 *
2544 * We know we're losing at the current
2545 * window size so do congestion avoidance
2546 * (set ssthresh to half the current window
2547 * and pull our congestion window back to
2548 * the new ssthresh).
2549 *
2550 * Dup acks mean that packets have left the
2551 * network (they're now cached at the receiver)
2552 * so bump cwnd by the amount in the receiver
2553 * to keep a constant cwnd packets in the
2554 * network.
2555 *
2556 * When using TCP ECN, notify the peer that
2557 * we reduced the cwnd.
2558 */
2559 /*
2560 * Following 2 kinds of acks should not affect
2561 * dupack counting:
2562 * 1) Old acks
2563 * 2) Acks with SACK but without any new SACK
2564 * information in them. These could result from
2565 * any anomaly in the network like a switch
2566 * duplicating packets or a possible DoS attack.
2567 */
2568 if (th->th_ack != tp->snd_una ||
2569 ((tp->t_flags & TF_SACK_PERMIT) &&
2570 !sack_changed))
2571 break;
2572 else if (!tcp_timer_active(tp, TT_REXMT))
2573 tp->t_dupacks = 0;
2574 else if (++tp->t_dupacks > tcprexmtthresh ||
2575 IN_FASTRECOVERY(tp->t_flags)) {
2576 cc_ack_received(tp, th, CC_DUPACK);
2577 if ((tp->t_flags & TF_SACK_PERMIT) &&
2578 IN_FASTRECOVERY(tp->t_flags)) {
2579 int awnd;
2580
2581 /*
2582 * Compute the amount of data in flight first.
2583 * We can inject new data into the pipe iff
2584 * we have less than 1/2 the original window's
2585 * worth of data in flight.
2586 */
2587 if (V_tcp_do_rfc6675_pipe)
2588 awnd = tcp_compute_pipe(tp);
2589 else
2590 awnd = (tp->snd_nxt - tp->snd_fack) +
2591 tp->sackhint.sack_bytes_rexmit;
2592
2593 if (awnd < tp->snd_ssthresh) {
2594 tp->snd_cwnd += maxseg;
2595 if (tp->snd_cwnd > tp->snd_ssthresh)
2596 tp->snd_cwnd = tp->snd_ssthresh;
2597 }
2598 } else
2599 tp->snd_cwnd += maxseg;
2600 (void) tp->t_fb->tfb_tcp_output(tp);
2601 goto drop;
2602 } else if (tp->t_dupacks == tcprexmtthresh) {
2603 tcp_seq onxt = tp->snd_nxt;
2604
2605 /*
2606 * If we're doing sack, check to
2607 * see if we're already in sack
2608 * recovery. If we're not doing sack,
2609 * check to see if we're in newreno
2610 * recovery.
2611 */
2612 if (tp->t_flags & TF_SACK_PERMIT) {
2613 if (IN_FASTRECOVERY(tp->t_flags)) {
2614 tp->t_dupacks = 0;
2615 break;
2616 }
2617 } else {
2618 if (SEQ_LEQ(th->th_ack,
2619 tp->snd_recover)) {
2620 tp->t_dupacks = 0;
2621 break;
2622 }
2623 }
2624 /* Congestion signal before ack. */
2625 cc_cong_signal(tp, th, CC_NDUPACK);
2626 cc_ack_received(tp, th, CC_DUPACK);
2627 tcp_timer_activate(tp, TT_REXMT, 0);
2628 tp->t_rtttime = 0;
2629 if (tp->t_flags & TF_SACK_PERMIT) {
2630 TCPSTAT_INC(
2631 tcps_sack_recovery_episode);
2632 tp->sack_newdata = tp->snd_nxt;
2633 tp->snd_cwnd = maxseg;
2634 (void) tp->t_fb->tfb_tcp_output(tp);
2635 goto drop;
2636 }
2637 tp->snd_nxt = th->th_ack;
2638 tp->snd_cwnd = maxseg;
2639 (void) tp->t_fb->tfb_tcp_output(tp);
2640 KASSERT(tp->snd_limited <= 2,
2641 ("%s: tp->snd_limited too big",
2642 __func__));
2643 tp->snd_cwnd = tp->snd_ssthresh +
2644 maxseg *
2645 (tp->t_dupacks - tp->snd_limited);
2646 if (SEQ_GT(onxt, tp->snd_nxt))
2647 tp->snd_nxt = onxt;
2648 goto drop;
2649 } else if (V_tcp_do_rfc3042) {
2650 /*
2651 * Process first and second duplicate
2652 * ACKs. Each indicates a segment
2653 * leaving the network, creating room
2654 * for more. Make sure we can send a
2655 * packet on reception of each duplicate
2656 * ACK by increasing snd_cwnd by one
2657 * segment. Restore the original
2658 * snd_cwnd after packet transmission.
2659 */
2660 cc_ack_received(tp, th, CC_DUPACK);
2661 u_long oldcwnd = tp->snd_cwnd;
2662 tcp_seq oldsndmax = tp->snd_max;
2663 u_int sent;
2664 int avail;
2665
2666 KASSERT(tp->t_dupacks == 1 ||
2667 tp->t_dupacks == 2,
2668 ("%s: dupacks not 1 or 2",
2669 __func__));
2670 if (tp->t_dupacks == 1)
2671 tp->snd_limited = 0;
2672 tp->snd_cwnd =
2673 (tp->snd_nxt - tp->snd_una) +
2674 (tp->t_dupacks - tp->snd_limited) *
2675 maxseg;
2676 /*
2677 * Only call tcp_output when there
2678 * is new data available to be sent.
2679 * Otherwise we would send pure ACKs.
2680 */
2681 SOCKBUF_LOCK(&so->so_snd);
2682 avail = sbavail(&so->so_snd) -
2683 (tp->snd_nxt - tp->snd_una);
2684 SOCKBUF_UNLOCK(&so->so_snd);
2685 if (avail > 0)
2686 (void) tp->t_fb->tfb_tcp_output(tp);
2687 sent = tp->snd_max - oldsndmax;
2688 if (sent > maxseg) {
2689 KASSERT((tp->t_dupacks == 2 &&
2690 tp->snd_limited == 0) ||
2691 (sent == maxseg + 1 &&
2692 tp->t_flags & TF_SENTFIN),
2693 ("%s: sent too much",
2694 __func__));
2695 tp->snd_limited = 2;
2696 } else if (sent > 0)
2697 ++tp->snd_limited;
2698 tp->snd_cwnd = oldcwnd;
2699 goto drop;
2700 }
2701 }
2702 break;
2703 } else {
2704 /*
2705 * This ack is advancing the left edge, reset the
2706 * counter.
2707 */
2708 tp->t_dupacks = 0;
2709 /*
2710 * If this ack also has new SACK info, increment the
2711 * counter as per rfc6675.
2712 */
2713 if ((tp->t_flags & TF_SACK_PERMIT) && sack_changed)
2714 tp->t_dupacks++;
2715 }
2716
2717 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2718 ("%s: th_ack <= snd_una", __func__));
2719
2720 /*
2721 * If the congestion window was inflated to account
2722 * for the other side's cached packets, retract it.
2723 */
2724 if (IN_FASTRECOVERY(tp->t_flags)) {
2725 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2726 if (tp->t_flags & TF_SACK_PERMIT)
2727 tcp_sack_partialack(tp, th);
2728 else
2729 tcp_newreno_partial_ack(tp, th);
2730 } else
2731 cc_post_recovery(tp, th);
2732 }
2733 /*
2734 * If we reach this point, ACK is not a duplicate,
2735 * i.e., it ACKs something we sent.
2736 */
2737 if (tp->t_flags & TF_NEEDSYN) {
2738 /*
2739 * T/TCP: Connection was half-synchronized, and our
2740 * SYN has been ACK'd (so connection is now fully
2741 * synchronized). Go to non-starred state,
2742 * increment snd_una for ACK of SYN, and check if
2743 * we can do window scaling.
2744 */
2745 tp->t_flags &= ~TF_NEEDSYN;
2746 tp->snd_una++;
2747 /* Do window scaling? */
2748 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2749 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2750 tp->rcv_scale = tp->request_r_scale;
2751 /* Send window already scaled. */
2752 }
2753 }
2754
2755 process_ACK:
2756 INP_WLOCK_ASSERT(tp->t_inpcb);
2757
2758 acked = BYTES_THIS_ACK(tp, th);
2759 KASSERT(acked >= 0, ("%s: acked unexepectedly negative "
2760 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__,
2761 tp->snd_una, th->th_ack, tp, m));
2762 TCPSTAT_INC(tcps_rcvackpack);
2763 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2764
2765 /*
2766 * If we just performed our first retransmit, and the ACK
2767 * arrives within our recovery window, then it was a mistake
2768 * to do the retransmit in the first place. Recover our
2769 * original cwnd and ssthresh, and proceed to transmit where
2770 * we left off.
2771 */
2772 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2773 (int)(ticks - tp->t_badrxtwin) < 0)
2774 cc_cong_signal(tp, th, CC_RTO_ERR);
2775
2776 /*
2777 * If we have a timestamp reply, update smoothed
2778 * round trip time. If no timestamp is present but
2779 * transmit timer is running and timed sequence
2780 * number was acked, update smoothed round trip time.
2781 * Since we now have an rtt measurement, cancel the
2782 * timer backoff (cf., Phil Karn's retransmit alg.).
2783 * Recompute the initial retransmit timer.
2784 *
2785 * Some boxes send broken timestamp replies
2786 * during the SYN+ACK phase, ignore
2787 * timestamps of 0 or we could calculate a
2788 * huge RTT and blow up the retransmit timer.
2789 */
2790 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2791 u_int t;
2792
2793 t = tcp_ts_getticks() - to.to_tsecr;
2794 if (!tp->t_rttlow || tp->t_rttlow > t)
2795 tp->t_rttlow = t;
2796 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2797 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2798 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2799 tp->t_rttlow = ticks - tp->t_rtttime;
2800 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2801 }
2802
2803 /*
2804 * If all outstanding data is acked, stop retransmit
2805 * timer and remember to restart (more output or persist).
2806 * If there is more data to be acked, restart retransmit
2807 * timer, using current (possibly backed-off) value.
2808 */
2809 if (th->th_ack == tp->snd_max) {
2810 tcp_timer_activate(tp, TT_REXMT, 0);
2811 needoutput = 1;
2812 } else if (!tcp_timer_active(tp, TT_PERSIST))
2813 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2814
2815 /*
2816 * If no data (only SYN) was ACK'd,
2817 * skip rest of ACK processing.
2818 */
2819 if (acked == 0)
2820 goto step6;
2821
2822 /*
2823 * Let the congestion control algorithm update congestion
2824 * control related information. This typically means increasing
2825 * the congestion window.
2826 */
2827 cc_ack_received(tp, th, CC_ACK);
2828
2829 SOCKBUF_LOCK(&so->so_snd);
2830 if (acked > sbavail(&so->so_snd)) {
2831 if (tp->snd_wnd >= sbavail(&so->so_snd))
2832 tp->snd_wnd -= sbavail(&so->so_snd);
2833 else
2834 tp->snd_wnd = 0;
2835 mfree = sbcut_locked(&so->so_snd,
2836 (int)sbavail(&so->so_snd));
2837 ourfinisacked = 1;
2838 } else {
2839 mfree = sbcut_locked(&so->so_snd, acked);
2840 if (tp->snd_wnd >= (u_long) acked)
2841 tp->snd_wnd -= acked;
2842 else
2843 tp->snd_wnd = 0;
2844 ourfinisacked = 0;
2845 }
2846 /* NB: sowwakeup_locked() does an implicit unlock. */
2847 sowwakeup_locked(so);
2848 m_freem(mfree);
2849 /* Detect una wraparound. */
2850 if (!IN_RECOVERY(tp->t_flags) &&
2851 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2852 SEQ_LEQ(th->th_ack, tp->snd_recover))
2853 tp->snd_recover = th->th_ack - 1;
2854 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2855 if (IN_RECOVERY(tp->t_flags) &&
2856 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2857 EXIT_RECOVERY(tp->t_flags);
2858 }
2859 tp->snd_una = th->th_ack;
2860 if (tp->t_flags & TF_SACK_PERMIT) {
2861 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2862 tp->snd_recover = tp->snd_una;
2863 }
2864 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2865 tp->snd_nxt = tp->snd_una;
2866
2867 switch (tp->t_state) {
2868
2869 /*
2870 * In FIN_WAIT_1 STATE in addition to the processing
2871 * for the ESTABLISHED state if our FIN is now acknowledged
2872 * then enter FIN_WAIT_2.
2873 */
2874 case TCPS_FIN_WAIT_1:
2875 if (ourfinisacked) {
2876 /*
2877 * If we can't receive any more
2878 * data, then closing user can proceed.
2879 * Starting the timer is contrary to the
2880 * specification, but if we don't get a FIN
2881 * we'll hang forever.
2882 *
2883 * XXXjl:
2884 * we should release the tp also, and use a
2885 * compressed state.
2886 */
2887 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2888 soisdisconnected(so);
2889 tcp_timer_activate(tp, TT_2MSL,
2890 (tcp_fast_finwait2_recycle ?
2891 tcp_finwait2_timeout :
2892 TP_MAXIDLE(tp)));
2893 }
2894 tcp_state_change(tp, TCPS_FIN_WAIT_2);
2895 }
2896 break;
2897
2898 /*
2899 * In CLOSING STATE in addition to the processing for
2900 * the ESTABLISHED state if the ACK acknowledges our FIN
2901 * then enter the TIME-WAIT state, otherwise ignore
2902 * the segment.
2903 */
2904 case TCPS_CLOSING:
2905 if (ourfinisacked) {
2906 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2907 tcp_twstart(tp);
2908 INP_INFO_RUNLOCK(&V_tcbinfo);
2909 m_freem(m);
2910 return;
2911 }
2912 break;
2913
2914 /*
2915 * In LAST_ACK, we may still be waiting for data to drain
2916 * and/or to be acked, as well as for the ack of our FIN.
2917 * If our FIN is now acknowledged, delete the TCB,
2918 * enter the closed state and return.
2919 */
2920 case TCPS_LAST_ACK:
2921 if (ourfinisacked) {
2922 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2923 tp = tcp_close(tp);
2924 goto drop;
2925 }
2926 break;
2927 }
2928 }
2929
2930 step6:
2931 INP_WLOCK_ASSERT(tp->t_inpcb);
2932
2933 /*
2934 * Update window information.
2935 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2936 */
2937 if ((thflags & TH_ACK) &&
2938 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2939 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2940 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2941 /* keep track of pure window updates */
2942 if (tlen == 0 &&
2943 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2944 TCPSTAT_INC(tcps_rcvwinupd);
2945 tp->snd_wnd = tiwin;
2946 tp->snd_wl1 = th->th_seq;
2947 tp->snd_wl2 = th->th_ack;
2948 if (tp->snd_wnd > tp->max_sndwnd)
2949 tp->max_sndwnd = tp->snd_wnd;
2950 needoutput = 1;
2951 }
2952
2953 /*
2954 * Process segments with URG.
2955 */
2956 if ((thflags & TH_URG) && th->th_urp &&
2957 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2958 /*
2959 * This is a kludge, but if we receive and accept
2960 * random urgent pointers, we'll crash in
2961 * soreceive. It's hard to imagine someone
2962 * actually wanting to send this much urgent data.
2963 */
2964 SOCKBUF_LOCK(&so->so_rcv);
2965 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
2966 th->th_urp = 0; /* XXX */
2967 thflags &= ~TH_URG; /* XXX */
2968 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2969 goto dodata; /* XXX */
2970 }
2971 /*
2972 * If this segment advances the known urgent pointer,
2973 * then mark the data stream. This should not happen
2974 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2975 * a FIN has been received from the remote side.
2976 * In these states we ignore the URG.
2977 *
2978 * According to RFC961 (Assigned Protocols),
2979 * the urgent pointer points to the last octet
2980 * of urgent data. We continue, however,
2981 * to consider it to indicate the first octet
2982 * of data past the urgent section as the original
2983 * spec states (in one of two places).
2984 */
2985 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2986 tp->rcv_up = th->th_seq + th->th_urp;
2987 so->so_oobmark = sbavail(&so->so_rcv) +
2988 (tp->rcv_up - tp->rcv_nxt) - 1;
2989 if (so->so_oobmark == 0)
2990 so->so_rcv.sb_state |= SBS_RCVATMARK;
2991 sohasoutofband(so);
2992 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2993 }
2994 SOCKBUF_UNLOCK(&so->so_rcv);
2995 /*
2996 * Remove out of band data so doesn't get presented to user.
2997 * This can happen independent of advancing the URG pointer,
2998 * but if two URG's are pending at once, some out-of-band
2999 * data may creep in... ick.
3000 */
3001 if (th->th_urp <= (u_long)tlen &&
3002 !(so->so_options & SO_OOBINLINE)) {
3003 /* hdr drop is delayed */
3004 tcp_pulloutofband(so, th, m, drop_hdrlen);
3005 }
3006 } else {
3007 /*
3008 * If no out of band data is expected,
3009 * pull receive urgent pointer along
3010 * with the receive window.
3011 */
3012 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
3013 tp->rcv_up = tp->rcv_nxt;
3014 }
3015 dodata: /* XXX */
3016 INP_WLOCK_ASSERT(tp->t_inpcb);
3017
3018 /*
3019 * Process the segment text, merging it into the TCP sequencing queue,
3020 * and arranging for acknowledgment of receipt if necessary.
3021 * This process logically involves adjusting tp->rcv_wnd as data
3022 * is presented to the user (this happens in tcp_usrreq.c,
3023 * case PRU_RCVD). If a FIN has already been received on this
3024 * connection then we just ignore the text.
3025 */
3026 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
3027 (tp->t_flags & TF_FASTOPEN));
3028 if ((tlen || (thflags & TH_FIN) || tfo_syn) &&
3029 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3030 tcp_seq save_start = th->th_seq;
3031 m_adj(m, drop_hdrlen); /* delayed header drop */
3032 /*
3033 * Insert segment which includes th into TCP reassembly queue
3034 * with control block tp. Set thflags to whether reassembly now
3035 * includes a segment with FIN. This handles the common case
3036 * inline (segment is the next to be received on an established
3037 * connection, and the queue is empty), avoiding linkage into
3038 * and removal from the queue and repetition of various
3039 * conversions.
3040 * Set DELACK for segments received in order, but ack
3041 * immediately when segments are out of order (so
3042 * fast retransmit can work).
3043 */
3044 if (th->th_seq == tp->rcv_nxt &&
3045 LIST_EMPTY(&tp->t_segq) &&
3046 (TCPS_HAVEESTABLISHED(tp->t_state) ||
3047 tfo_syn)) {
3048 if (DELAY_ACK(tp, tlen) || tfo_syn)
3049 tp->t_flags |= TF_DELACK;
3050 else
3051 tp->t_flags |= TF_ACKNOW;
3052 tp->rcv_nxt += tlen;
3053 thflags = th->th_flags & TH_FIN;
3054 TCPSTAT_INC(tcps_rcvpack);
3055 TCPSTAT_ADD(tcps_rcvbyte, tlen);
3056 SOCKBUF_LOCK(&so->so_rcv);
3057 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
3058 m_freem(m);
3059 else
3060 sbappendstream_locked(&so->so_rcv, m, 0);
3061 /* NB: sorwakeup_locked() does an implicit unlock. */
3062 sorwakeup_locked(so);
3063 } else {
3064 /*
3065 * XXX: Due to the header drop above "th" is
3066 * theoretically invalid by now. Fortunately
3067 * m_adj() doesn't actually frees any mbufs
3068 * when trimming from the head.
3069 */
3070 thflags = tcp_reass(tp, th, &tlen, m);
3071 tp->t_flags |= TF_ACKNOW;
3072 }
3073 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
3074 tcp_update_sack_list(tp, save_start, save_start + tlen);
3075 #if 0
3076 /*
3077 * Note the amount of data that peer has sent into
3078 * our window, in order to estimate the sender's
3079 * buffer size.
3080 * XXX: Unused.
3081 */
3082 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
3083 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
3084 else
3085 len = so->so_rcv.sb_hiwat;
3086 #endif
3087 } else {
3088 m_freem(m);
3089 thflags &= ~TH_FIN;
3090 }
3091
3092 /*
3093 * If FIN is received ACK the FIN and let the user know
3094 * that the connection is closing.
3095 */
3096 if (thflags & TH_FIN) {
3097 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3098 socantrcvmore(so);
3099 /*
3100 * If connection is half-synchronized
3101 * (ie NEEDSYN flag on) then delay ACK,
3102 * so it may be piggybacked when SYN is sent.
3103 * Otherwise, since we received a FIN then no
3104 * more input can be expected, send ACK now.
3105 */
3106 if (tp->t_flags & TF_NEEDSYN)
3107 tp->t_flags |= TF_DELACK;
3108 else
3109 tp->t_flags |= TF_ACKNOW;
3110 tp->rcv_nxt++;
3111 }
3112 switch (tp->t_state) {
3113
3114 /*
3115 * In SYN_RECEIVED and ESTABLISHED STATES
3116 * enter the CLOSE_WAIT state.
3117 */
3118 case TCPS_SYN_RECEIVED:
3119 tp->t_starttime = ticks;
3120 /* FALLTHROUGH */
3121 case TCPS_ESTABLISHED:
3122 tcp_state_change(tp, TCPS_CLOSE_WAIT);
3123 break;
3124
3125 /*
3126 * If still in FIN_WAIT_1 STATE FIN has not been acked so
3127 * enter the CLOSING state.
3128 */
3129 case TCPS_FIN_WAIT_1:
3130 tcp_state_change(tp, TCPS_CLOSING);
3131 break;
3132
3133 /*
3134 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3135 * starting the time-wait timer, turning off the other
3136 * standard timers.
3137 */
3138 case TCPS_FIN_WAIT_2:
3139 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
3140 KASSERT(ti_locked == TI_RLOCKED, ("%s: dodata "
3141 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
3142 ti_locked));
3143
3144 tcp_twstart(tp);
3145 INP_INFO_RUNLOCK(&V_tcbinfo);
3146 return;
3147 }
3148 }
3149 if (ti_locked == TI_RLOCKED)
3150 INP_INFO_RUNLOCK(&V_tcbinfo);
3151 ti_locked = TI_UNLOCKED;
3152
3153 #ifdef TCPDEBUG
3154 if (so->so_options & SO_DEBUG)
3155 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3156 &tcp_savetcp, 0);
3157 #endif
3158 TCP_PROBE3(debug__input, tp, th, m);
3159
3160 /*
3161 * Return any desired output.
3162 */
3163 if (needoutput || (tp->t_flags & TF_ACKNOW))
3164 (void) tp->t_fb->tfb_tcp_output(tp);
3165
3166 check_delack:
3167 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
3168 __func__, ti_locked));
3169 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3170 INP_WLOCK_ASSERT(tp->t_inpcb);
3171
3172 if (tp->t_flags & TF_DELACK) {
3173 tp->t_flags &= ~TF_DELACK;
3174 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3175 }
3176 INP_WUNLOCK(tp->t_inpcb);
3177 return;
3178
3179 dropafterack:
3180 /*
3181 * Generate an ACK dropping incoming segment if it occupies
3182 * sequence space, where the ACK reflects our state.
3183 *
3184 * We can now skip the test for the RST flag since all
3185 * paths to this code happen after packets containing
3186 * RST have been dropped.
3187 *
3188 * In the SYN-RECEIVED state, don't send an ACK unless the
3189 * segment we received passes the SYN-RECEIVED ACK test.
3190 * If it fails send a RST. This breaks the loop in the
3191 * "LAND" DoS attack, and also prevents an ACK storm
3192 * between two listening ports that have been sent forged
3193 * SYN segments, each with the source address of the other.
3194 */
3195 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3196 (SEQ_GT(tp->snd_una, th->th_ack) ||
3197 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3198 rstreason = BANDLIM_RST_OPENPORT;
3199 goto dropwithreset;
3200 }
3201 #ifdef TCPDEBUG
3202 if (so->so_options & SO_DEBUG)
3203 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3204 &tcp_savetcp, 0);
3205 #endif
3206 TCP_PROBE3(debug__input, tp, th, m);
3207 if (ti_locked == TI_RLOCKED)
3208 INP_INFO_RUNLOCK(&V_tcbinfo);
3209 ti_locked = TI_UNLOCKED;
3210
3211 tp->t_flags |= TF_ACKNOW;
3212 (void) tp->t_fb->tfb_tcp_output(tp);
3213 INP_WUNLOCK(tp->t_inpcb);
3214 m_freem(m);
3215 return;
3216
3217 dropwithreset:
3218 if (ti_locked == TI_RLOCKED)
3219 INP_INFO_RUNLOCK(&V_tcbinfo);
3220 ti_locked = TI_UNLOCKED;
3221
3222 if (tp != NULL) {
3223 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3224 INP_WUNLOCK(tp->t_inpcb);
3225 } else
3226 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3227 return;
3228
3229 drop:
3230 if (ti_locked == TI_RLOCKED) {
3231 INP_INFO_RUNLOCK(&V_tcbinfo);
3232 ti_locked = TI_UNLOCKED;
3233 }
3234 #ifdef INVARIANTS
3235 else
3236 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3237 #endif
3238
3239 /*
3240 * Drop space held by incoming segment and return.
3241 */
3242 #ifdef TCPDEBUG
3243 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3244 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3245 &tcp_savetcp, 0);
3246 #endif
3247 TCP_PROBE3(debug__input, tp, th, m);
3248 if (tp != NULL)
3249 INP_WUNLOCK(tp->t_inpcb);
3250 m_freem(m);
3251 }
3252
3253 /*
3254 * Issue RST and make ACK acceptable to originator of segment.
3255 * The mbuf must still include the original packet header.
3256 * tp may be NULL.
3257 */
3258 void
3259 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3260 int tlen, int rstreason)
3261 {
3262 #ifdef INET
3263 struct ip *ip;
3264 #endif
3265 #ifdef INET6
3266 struct ip6_hdr *ip6;
3267 #endif
3268
3269 if (tp != NULL) {
3270 INP_WLOCK_ASSERT(tp->t_inpcb);
3271 }
3272
3273 /* Don't bother if destination was broadcast/multicast. */
3274 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3275 goto drop;
3276 #ifdef INET6
3277 if (mtod(m, struct ip *)->ip_v == 6) {
3278 ip6 = mtod(m, struct ip6_hdr *);
3279 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3280 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3281 goto drop;
3282 /* IPv6 anycast check is done at tcp6_input() */
3283 }
3284 #endif
3285 #if defined(INET) && defined(INET6)
3286 else
3287 #endif
3288 #ifdef INET
3289 {
3290 ip = mtod(m, struct ip *);
3291 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3292 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3293 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3294 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3295 goto drop;
3296 }
3297 #endif
3298
3299 /* Perform bandwidth limiting. */
3300 if (badport_bandlim(rstreason) < 0)
3301 goto drop;
3302
3303 /* tcp_respond consumes the mbuf chain. */
3304 if (th->th_flags & TH_ACK) {
3305 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3306 th->th_ack, TH_RST);
3307 } else {
3308 if (th->th_flags & TH_SYN)
3309 tlen++;
3310 if (th->th_flags & TH_FIN)
3311 tlen++;
3312 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3313 (tcp_seq)0, TH_RST|TH_ACK);
3314 }
3315 return;
3316 drop:
3317 m_freem(m);
3318 }
3319
3320 /*
3321 * Parse TCP options and place in tcpopt.
3322 */
3323 void
3324 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3325 {
3326 int opt, optlen;
3327
3328 to->to_flags = 0;
3329 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3330 opt = cp[0];
3331 if (opt == TCPOPT_EOL)
3332 break;
3333 if (opt == TCPOPT_NOP)
3334 optlen = 1;
3335 else {
3336 if (cnt < 2)
3337 break;
3338 optlen = cp[1];
3339 if (optlen < 2 || optlen > cnt)
3340 break;
3341 }
3342 switch (opt) {
3343 case TCPOPT_MAXSEG:
3344 if (optlen != TCPOLEN_MAXSEG)
3345 continue;
3346 if (!(flags & TO_SYN))
3347 continue;
3348 to->to_flags |= TOF_MSS;
3349 bcopy((char *)cp + 2,
3350 (char *)&to->to_mss, sizeof(to->to_mss));
3351 to->to_mss = ntohs(to->to_mss);
3352 break;
3353 case TCPOPT_WINDOW:
3354 if (optlen != TCPOLEN_WINDOW)
3355 continue;
3356 if (!(flags & TO_SYN))
3357 continue;
3358 to->to_flags |= TOF_SCALE;
3359 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3360 break;
3361 case TCPOPT_TIMESTAMP:
3362 if (optlen != TCPOLEN_TIMESTAMP)
3363 continue;
3364 to->to_flags |= TOF_TS;
3365 bcopy((char *)cp + 2,
3366 (char *)&to->to_tsval, sizeof(to->to_tsval));
3367 to->to_tsval = ntohl(to->to_tsval);
3368 bcopy((char *)cp + 6,
3369 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3370 to->to_tsecr = ntohl(to->to_tsecr);
3371 break;
3372 case TCPOPT_SIGNATURE:
3373 /*
3374 * In order to reply to a host which has set the
3375 * TCP_SIGNATURE option in its initial SYN, we have
3376 * to record the fact that the option was observed
3377 * here for the syncache code to perform the correct
3378 * response.
3379 */
3380 if (optlen != TCPOLEN_SIGNATURE)
3381 continue;
3382 to->to_flags |= TOF_SIGNATURE;
3383 to->to_signature = cp + 2;
3384 break;
3385 case TCPOPT_SACK_PERMITTED:
3386 if (optlen != TCPOLEN_SACK_PERMITTED)
3387 continue;
3388 if (!(flags & TO_SYN))
3389 continue;
3390 if (!V_tcp_do_sack)
3391 continue;
3392 to->to_flags |= TOF_SACKPERM;
3393 break;
3394 case TCPOPT_SACK:
3395 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3396 continue;
3397 if (flags & TO_SYN)
3398 continue;
3399 to->to_flags |= TOF_SACK;
3400 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3401 to->to_sacks = cp + 2;
3402 TCPSTAT_INC(tcps_sack_rcv_blocks);
3403 break;
3404 #ifdef TCP_RFC7413
3405 case TCPOPT_FAST_OPEN:
3406 if ((optlen != TCPOLEN_FAST_OPEN_EMPTY) &&
3407 (optlen < TCPOLEN_FAST_OPEN_MIN) &&
3408 (optlen > TCPOLEN_FAST_OPEN_MAX))
3409 continue;
3410 if (!(flags & TO_SYN))
3411 continue;
3412 if (!V_tcp_fastopen_enabled)
3413 continue;
3414 to->to_flags |= TOF_FASTOPEN;
3415 to->to_tfo_len = optlen - 2;
3416 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL;
3417 break;
3418 #endif
3419 default:
3420 continue;
3421 }
3422 }
3423 }
3424
3425 /*
3426 * Pull out of band byte out of a segment so
3427 * it doesn't appear in the user's data queue.
3428 * It is still reflected in the segment length for
3429 * sequencing purposes.
3430 */
3431 void
3432 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3433 int off)
3434 {
3435 int cnt = off + th->th_urp - 1;
3436
3437 while (cnt >= 0) {
3438 if (m->m_len > cnt) {
3439 char *cp = mtod(m, caddr_t) + cnt;
3440 struct tcpcb *tp = sototcpcb(so);
3441
3442 INP_WLOCK_ASSERT(tp->t_inpcb);
3443
3444 tp->t_iobc = *cp;
3445 tp->t_oobflags |= TCPOOB_HAVEDATA;
3446 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3447 m->m_len--;
3448 if (m->m_flags & M_PKTHDR)
3449 m->m_pkthdr.len--;
3450 return;
3451 }
3452 cnt -= m->m_len;
3453 m = m->m_next;
3454 if (m == NULL)
3455 break;
3456 }
3457 panic("tcp_pulloutofband");
3458 }
3459
3460 /*
3461 * Collect new round-trip time estimate
3462 * and update averages and current timeout.
3463 */
3464 void
3465 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3466 {
3467 int delta;
3468
3469 INP_WLOCK_ASSERT(tp->t_inpcb);
3470
3471 TCPSTAT_INC(tcps_rttupdated);
3472 tp->t_rttupdated++;
3473 if (tp->t_srtt != 0) {
3474 /*
3475 * srtt is stored as fixed point with 5 bits after the
3476 * binary point (i.e., scaled by 8). The following magic
3477 * is equivalent to the smoothing algorithm in rfc793 with
3478 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3479 * point). Adjust rtt to origin 0.
3480 */
3481 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3482 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3483
3484 if ((tp->t_srtt += delta) <= 0)
3485 tp->t_srtt = 1;
3486
3487 /*
3488 * We accumulate a smoothed rtt variance (actually, a
3489 * smoothed mean difference), then set the retransmit
3490 * timer to smoothed rtt + 4 times the smoothed variance.
3491 * rttvar is stored as fixed point with 4 bits after the
3492 * binary point (scaled by 16). The following is
3493 * equivalent to rfc793 smoothing with an alpha of .75
3494 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3495 * rfc793's wired-in beta.
3496 */
3497 if (delta < 0)
3498 delta = -delta;
3499 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3500 if ((tp->t_rttvar += delta) <= 0)
3501 tp->t_rttvar = 1;
3502 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3503 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3504 } else {
3505 /*
3506 * No rtt measurement yet - use the unsmoothed rtt.
3507 * Set the variance to half the rtt (so our first
3508 * retransmit happens at 3*rtt).
3509 */
3510 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3511 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3512 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3513 }
3514 tp->t_rtttime = 0;
3515 tp->t_rxtshift = 0;
3516
3517 /*
3518 * the retransmit should happen at rtt + 4 * rttvar.
3519 * Because of the way we do the smoothing, srtt and rttvar
3520 * will each average +1/2 tick of bias. When we compute
3521 * the retransmit timer, we want 1/2 tick of rounding and
3522 * 1 extra tick because of +-1/2 tick uncertainty in the
3523 * firing of the timer. The bias will give us exactly the
3524 * 1.5 tick we need. But, because the bias is
3525 * statistical, we have to test that we don't drop below
3526 * the minimum feasible timer (which is 2 ticks).
3527 */
3528 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3529 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3530
3531 /*
3532 * We received an ack for a packet that wasn't retransmitted;
3533 * it is probably safe to discard any error indications we've
3534 * received recently. This isn't quite right, but close enough
3535 * for now (a route might have failed after we sent a segment,
3536 * and the return path might not be symmetrical).
3537 */
3538 tp->t_softerror = 0;
3539 }
3540
3541 /*
3542 * Determine a reasonable value for maxseg size.
3543 * If the route is known, check route for mtu.
3544 * If none, use an mss that can be handled on the outgoing interface
3545 * without forcing IP to fragment. If no route is found, route has no mtu,
3546 * or the destination isn't local, use a default, hopefully conservative
3547 * size (usually 512 or the default IP max size, but no more than the mtu
3548 * of the interface), as we can't discover anything about intervening
3549 * gateways or networks. We also initialize the congestion/slow start
3550 * window to be a single segment if the destination isn't local.
3551 * While looking at the routing entry, we also initialize other path-dependent
3552 * parameters from pre-set or cached values in the routing entry.
3553 *
3554 * NOTE that resulting t_maxseg doesn't include space for TCP options or
3555 * IP options, e.g. IPSEC data, since length of this data may vary, and
3556 * thus it is calculated for every segment separately in tcp_output().
3557 *
3558 * NOTE that this routine is only called when we process an incoming
3559 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3560 * settings are handled in tcp_mssopt().
3561 */
3562 void
3563 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3564 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3565 {
3566 int mss = 0;
3567 u_long maxmtu = 0;
3568 struct inpcb *inp = tp->t_inpcb;
3569 struct hc_metrics_lite metrics;
3570 #ifdef INET6
3571 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3572 size_t min_protoh = isipv6 ?
3573 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3574 sizeof (struct tcpiphdr);
3575 #else
3576 const size_t min_protoh = sizeof(struct tcpiphdr);
3577 #endif
3578
3579 INP_WLOCK_ASSERT(tp->t_inpcb);
3580
3581 if (mtuoffer != -1) {
3582 KASSERT(offer == -1, ("%s: conflict", __func__));
3583 offer = mtuoffer - min_protoh;
3584 }
3585
3586 /* Initialize. */
3587 #ifdef INET6
3588 if (isipv6) {
3589 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3590 tp->t_maxseg = V_tcp_v6mssdflt;
3591 }
3592 #endif
3593 #if defined(INET) && defined(INET6)
3594 else
3595 #endif
3596 #ifdef INET
3597 {
3598 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3599 tp->t_maxseg = V_tcp_mssdflt;
3600 }
3601 #endif
3602
3603 /*
3604 * No route to sender, stay with default mss and return.
3605 */
3606 if (maxmtu == 0) {
3607 /*
3608 * In case we return early we need to initialize metrics
3609 * to a defined state as tcp_hc_get() would do for us
3610 * if there was no cache hit.
3611 */
3612 if (metricptr != NULL)
3613 bzero(metricptr, sizeof(struct hc_metrics_lite));
3614 return;
3615 }
3616
3617 /* What have we got? */
3618 switch (offer) {
3619 case 0:
3620 /*
3621 * Offer == 0 means that there was no MSS on the SYN
3622 * segment, in this case we use tcp_mssdflt as
3623 * already assigned to t_maxseg above.
3624 */
3625 offer = tp->t_maxseg;
3626 break;
3627
3628 case -1:
3629 /*
3630 * Offer == -1 means that we didn't receive SYN yet.
3631 */
3632 /* FALLTHROUGH */
3633
3634 default:
3635 /*
3636 * Prevent DoS attack with too small MSS. Round up
3637 * to at least minmss.
3638 */
3639 offer = max(offer, V_tcp_minmss);
3640 }
3641
3642 /*
3643 * rmx information is now retrieved from tcp_hostcache.
3644 */
3645 tcp_hc_get(&inp->inp_inc, &metrics);
3646 if (metricptr != NULL)
3647 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3648
3649 /*
3650 * If there's a discovered mtu in tcp hostcache, use it.
3651 * Else, use the link mtu.
3652 */
3653 if (metrics.rmx_mtu)
3654 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3655 else {
3656 #ifdef INET6
3657 if (isipv6) {
3658 mss = maxmtu - min_protoh;
3659 if (!V_path_mtu_discovery &&
3660 !in6_localaddr(&inp->in6p_faddr))
3661 mss = min(mss, V_tcp_v6mssdflt);
3662 }
3663 #endif
3664 #if defined(INET) && defined(INET6)
3665 else
3666 #endif
3667 #ifdef INET
3668 {
3669 mss = maxmtu - min_protoh;
3670 if (!V_path_mtu_discovery &&
3671 !in_localaddr(inp->inp_faddr))
3672 mss = min(mss, V_tcp_mssdflt);
3673 }
3674 #endif
3675 /*
3676 * XXX - The above conditional (mss = maxmtu - min_protoh)
3677 * probably violates the TCP spec.
3678 * The problem is that, since we don't know the
3679 * other end's MSS, we are supposed to use a conservative
3680 * default. But, if we do that, then MTU discovery will
3681 * never actually take place, because the conservative
3682 * default is much less than the MTUs typically seen
3683 * on the Internet today. For the moment, we'll sweep
3684 * this under the carpet.
3685 *
3686 * The conservative default might not actually be a problem
3687 * if the only case this occurs is when sending an initial
3688 * SYN with options and data to a host we've never talked
3689 * to before. Then, they will reply with an MSS value which
3690 * will get recorded and the new parameters should get
3691 * recomputed. For Further Study.
3692 */
3693 }
3694 mss = min(mss, offer);
3695
3696 /*
3697 * Sanity check: make sure that maxseg will be large
3698 * enough to allow some data on segments even if the
3699 * all the option space is used (40bytes). Otherwise
3700 * funny things may happen in tcp_output.
3701 *
3702 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3703 */
3704 mss = max(mss, 64);
3705
3706 tp->t_maxseg = mss;
3707 }
3708
3709 void
3710 tcp_mss(struct tcpcb *tp, int offer)
3711 {
3712 int mss;
3713 u_long bufsize;
3714 struct inpcb *inp;
3715 struct socket *so;
3716 struct hc_metrics_lite metrics;
3717 struct tcp_ifcap cap;
3718
3719 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3720
3721 bzero(&cap, sizeof(cap));
3722 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3723
3724 mss = tp->t_maxseg;
3725 inp = tp->t_inpcb;
3726
3727 /*
3728 * If there's a pipesize, change the socket buffer to that size,
3729 * don't change if sb_hiwat is different than default (then it
3730 * has been changed on purpose with setsockopt).
3731 * Make the socket buffers an integral number of mss units;
3732 * if the mss is larger than the socket buffer, decrease the mss.
3733 */
3734 so = inp->inp_socket;
3735 SOCKBUF_LOCK(&so->so_snd);
3736 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3737 bufsize = metrics.rmx_sendpipe;
3738 else
3739 bufsize = so->so_snd.sb_hiwat;
3740 if (bufsize < mss)
3741 mss = bufsize;
3742 else {
3743 bufsize = roundup(bufsize, mss);
3744 if (bufsize > sb_max)
3745 bufsize = sb_max;
3746 if (bufsize > so->so_snd.sb_hiwat)
3747 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3748 }
3749 SOCKBUF_UNLOCK(&so->so_snd);
3750 /*
3751 * Sanity check: make sure that maxseg will be large
3752 * enough to allow some data on segments even if the
3753 * all the option space is used (40bytes). Otherwise
3754 * funny things may happen in tcp_output.
3755 *
3756 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3757 */
3758 tp->t_maxseg = max(mss, 64);
3759
3760 SOCKBUF_LOCK(&so->so_rcv);
3761 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3762 bufsize = metrics.rmx_recvpipe;
3763 else
3764 bufsize = so->so_rcv.sb_hiwat;
3765 if (bufsize > mss) {
3766 bufsize = roundup(bufsize, mss);
3767 if (bufsize > sb_max)
3768 bufsize = sb_max;
3769 if (bufsize > so->so_rcv.sb_hiwat)
3770 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3771 }
3772 SOCKBUF_UNLOCK(&so->so_rcv);
3773
3774 /* Check the interface for TSO capabilities. */
3775 if (cap.ifcap & CSUM_TSO) {
3776 tp->t_flags |= TF_TSO;
3777 tp->t_tsomax = cap.tsomax;
3778 tp->t_tsomaxsegcount = cap.tsomaxsegcount;
3779 tp->t_tsomaxsegsize = cap.tsomaxsegsize;
3780 }
3781 }
3782
3783 /*
3784 * Determine the MSS option to send on an outgoing SYN.
3785 */
3786 int
3787 tcp_mssopt(struct in_conninfo *inc)
3788 {
3789 int mss = 0;
3790 u_long maxmtu = 0;
3791 u_long thcmtu = 0;
3792 size_t min_protoh;
3793
3794 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3795
3796 #ifdef INET6
3797 if (inc->inc_flags & INC_ISIPV6) {
3798 mss = V_tcp_v6mssdflt;
3799 maxmtu = tcp_maxmtu6(inc, NULL);
3800 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3801 }
3802 #endif
3803 #if defined(INET) && defined(INET6)
3804 else
3805 #endif
3806 #ifdef INET
3807 {
3808 mss = V_tcp_mssdflt;
3809 maxmtu = tcp_maxmtu(inc, NULL);
3810 min_protoh = sizeof(struct tcpiphdr);
3811 }
3812 #endif
3813 #if defined(INET6) || defined(INET)
3814 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3815 #endif
3816
3817 if (maxmtu && thcmtu)
3818 mss = min(maxmtu, thcmtu) - min_protoh;
3819 else if (maxmtu || thcmtu)
3820 mss = max(maxmtu, thcmtu) - min_protoh;
3821
3822 return (mss);
3823 }
3824
3825
3826 /*
3827 * On a partial ack arrives, force the retransmission of the
3828 * next unacknowledged segment. Do not clear tp->t_dupacks.
3829 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3830 * be started again.
3831 */
3832 void
3833 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3834 {
3835 tcp_seq onxt = tp->snd_nxt;
3836 u_long ocwnd = tp->snd_cwnd;
3837 u_int maxseg = tcp_maxseg(tp);
3838
3839 INP_WLOCK_ASSERT(tp->t_inpcb);
3840
3841 tcp_timer_activate(tp, TT_REXMT, 0);
3842 tp->t_rtttime = 0;
3843 tp->snd_nxt = th->th_ack;
3844 /*
3845 * Set snd_cwnd to one segment beyond acknowledged offset.
3846 * (tp->snd_una has not yet been updated when this function is called.)
3847 */
3848 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th);
3849 tp->t_flags |= TF_ACKNOW;
3850 (void) tp->t_fb->tfb_tcp_output(tp);
3851 tp->snd_cwnd = ocwnd;
3852 if (SEQ_GT(onxt, tp->snd_nxt))
3853 tp->snd_nxt = onxt;
3854 /*
3855 * Partial window deflation. Relies on fact that tp->snd_una
3856 * not updated yet.
3857 */
3858 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3859 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3860 else
3861 tp->snd_cwnd = 0;
3862 tp->snd_cwnd += maxseg;
3863 }
3864
3865 int
3866 tcp_compute_pipe(struct tcpcb *tp)
3867 {
3868 return (tp->snd_max - tp->snd_una +
3869 tp->sackhint.sack_bytes_rexmit -
3870 tp->sackhint.sacked_bytes);
3871 }
Cache object: f1b48f92216ced7f2484f2cd722072f2
|