1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
8 * Copyright (c) 2010-2011 Juniper Networks, Inc.
9 * All rights reserved.
10 *
11 * Portions of this software were developed at the Centre for Advanced Internet
12 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
13 * James Healy and David Hayes, made possible in part by a grant from the Cisco
14 * University Research Program Fund at Community Foundation Silicon Valley.
15 *
16 * Portions of this software were developed at the Centre for Advanced
17 * Internet Architectures, Swinburne University of Technology, Melbourne,
18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
19 *
20 * Portions of this software were developed by Robert N. M. Watson under
21 * contract to Juniper Networks, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 *
47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52
53 #include "opt_inet.h"
54 #include "opt_inet6.h"
55 #include "opt_ipsec.h"
56 #include "opt_tcpdebug.h"
57
58 #include <sys/param.h>
59 #include <sys/kernel.h>
60 #include <sys/hhook.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/proc.h> /* for proc0 declaration */
64 #include <sys/protosw.h>
65 #include <sys/sdt.h>
66 #include <sys/signalvar.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/sysctl.h>
70 #include <sys/syslog.h>
71 #include <sys/systm.h>
72
73 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
74
75 #include <vm/uma.h>
76
77 #include <net/if.h>
78 #include <net/if_var.h>
79 #include <net/route.h>
80 #include <net/vnet.h>
81
82 #define TCPSTATES /* for logging */
83
84 #include <netinet/in.h>
85 #include <netinet/in_kdtrace.h>
86 #include <netinet/in_pcb.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
90 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
91 #include <netinet/ip_var.h>
92 #include <netinet/ip_options.h>
93 #include <netinet/ip6.h>
94 #include <netinet/icmp6.h>
95 #include <netinet6/in6_pcb.h>
96 #include <netinet6/in6_var.h>
97 #include <netinet6/ip6_var.h>
98 #include <netinet6/nd6.h>
99 #ifdef TCP_RFC7413
100 #include <netinet/tcp_fastopen.h>
101 #endif
102 #include <netinet/tcp.h>
103 #include <netinet/tcp_fsm.h>
104 #include <netinet/tcp_seq.h>
105 #include <netinet/tcp_timer.h>
106 #include <netinet/tcp_var.h>
107 #include <netinet6/tcp6_var.h>
108 #include <netinet/tcpip.h>
109 #include <netinet/cc/cc.h>
110 #ifdef TCPPCAP
111 #include <netinet/tcp_pcap.h>
112 #endif
113 #include <netinet/tcp_syncache.h>
114 #ifdef TCPDEBUG
115 #include <netinet/tcp_debug.h>
116 #endif /* TCPDEBUG */
117 #ifdef TCP_OFFLOAD
118 #include <netinet/tcp_offload.h>
119 #endif
120
121 #include <netipsec/ipsec_support.h>
122
123 #include <machine/in_cksum.h>
124
125 #include <security/mac/mac_framework.h>
126
127 const int tcprexmtthresh = 3;
128
129 int tcp_log_in_vain = 0;
130 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
131 &tcp_log_in_vain, 0,
132 "Log all incoming TCP segments to closed ports");
133
134 VNET_DEFINE(int, blackhole) = 0;
135 #define V_blackhole VNET(blackhole)
136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW,
137 &VNET_NAME(blackhole), 0,
138 "Do not send RST on segments to closed ports");
139
140 VNET_DEFINE(int, tcp_delack_enabled) = 1;
141 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW,
142 &VNET_NAME(tcp_delack_enabled), 0,
143 "Delay ACK to try and piggyback it onto a data packet");
144
145 VNET_DEFINE(int, drop_synfin) = 0;
146 #define V_drop_synfin VNET(drop_synfin)
147 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW,
148 &VNET_NAME(drop_synfin), 0,
149 "Drop TCP packets with SYN+FIN set");
150
151 VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0;
152 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_pipe, CTLFLAG_VNET | CTLFLAG_RW,
153 &VNET_NAME(tcp_do_rfc6675_pipe), 0,
154 "Use calculated pipe/in-flight bytes per RFC 6675");
155
156 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
157 #define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
158 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW,
159 &VNET_NAME(tcp_do_rfc3042), 0,
160 "Enable RFC 3042 (Limited Transmit)");
161
162 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW,
164 &VNET_NAME(tcp_do_rfc3390), 0,
165 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
166
167 VNET_DEFINE(int, tcp_initcwnd_segments) = 10;
168 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments,
169 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0,
170 "Slow-start flight size (initial congestion window) in number of segments");
171
172 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW,
174 &VNET_NAME(tcp_do_rfc3465), 0,
175 "Enable RFC 3465 (Appropriate Byte Counting)");
176
177 VNET_DEFINE(int, tcp_abc_l_var) = 2;
178 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW,
179 &VNET_NAME(tcp_abc_l_var), 2,
180 "Cap the max cwnd increment during slow-start to this number of segments");
181
182 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
183
184 VNET_DEFINE(int, tcp_do_ecn) = 2;
185 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
186 &VNET_NAME(tcp_do_ecn), 0,
187 "TCP ECN support");
188
189 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
190 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW,
191 &VNET_NAME(tcp_ecn_maxretries), 0,
192 "Max retries before giving up on ECN");
193
194 VNET_DEFINE(int, tcp_insecure_syn) = 0;
195 #define V_tcp_insecure_syn VNET(tcp_insecure_syn)
196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW,
197 &VNET_NAME(tcp_insecure_syn), 0,
198 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets");
199
200 VNET_DEFINE(int, tcp_insecure_rst) = 0;
201 #define V_tcp_insecure_rst VNET(tcp_insecure_rst)
202 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW,
203 &VNET_NAME(tcp_insecure_rst), 0,
204 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets");
205
206 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
207 #define V_tcp_recvspace VNET(tcp_recvspace)
208 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW,
209 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
210
211 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
212 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
213 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
214 &VNET_NAME(tcp_do_autorcvbuf), 0,
215 "Enable automatic receive buffer sizing");
216
217 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
218 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
219 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
220 &VNET_NAME(tcp_autorcvbuf_inc), 0,
221 "Incrementor step size of automatic receive buffer");
222
223 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
224 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
225 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
226 &VNET_NAME(tcp_autorcvbuf_max), 0,
227 "Max size of automatic receive buffer");
228
229 VNET_DEFINE(struct inpcbhead, tcb);
230 #define tcb6 tcb /* for KAME src sync over BSD*'s */
231 VNET_DEFINE(struct inpcbinfo, tcbinfo);
232
233 /*
234 * TCP statistics are stored in an array of counter(9)s, which size matches
235 * size of struct tcpstat. TCP running connection count is a regular array.
236 */
237 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
238 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
239 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
240 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]);
241 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD |
242 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES,
243 "TCP connection counts by TCP state");
244
245 static void
246 tcp_vnet_init(const void *unused)
247 {
248
249 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK);
250 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK);
251 }
252 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
253 tcp_vnet_init, NULL);
254
255 #ifdef VIMAGE
256 static void
257 tcp_vnet_uninit(const void *unused)
258 {
259
260 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES);
261 VNET_PCPUSTAT_FREE(tcpstat);
262 }
263 VNET_SYSUNINIT(tcp_vnet_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
264 tcp_vnet_uninit, NULL);
265 #endif /* VIMAGE */
266
267 /*
268 * Kernel module interface for updating tcpstat. The argument is an index
269 * into tcpstat treated as an array.
270 */
271 void
272 kmod_tcpstat_inc(int statnum)
273 {
274
275 counter_u64_add(VNET(tcpstat)[statnum], 1);
276 }
277
278 /*
279 * Wrapper for the TCP established input helper hook.
280 */
281 void
282 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
283 {
284 struct tcp_hhook_data hhook_data;
285
286 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
287 hhook_data.tp = tp;
288 hhook_data.th = th;
289 hhook_data.to = to;
290
291 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
292 tp->osd);
293 }
294 }
295
296 /*
297 * CC wrapper hook functions
298 */
299 void
300 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
301 {
302 INP_WLOCK_ASSERT(tp->t_inpcb);
303
304 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
305 if (tp->snd_cwnd <= tp->snd_wnd)
306 tp->ccv->flags |= CCF_CWND_LIMITED;
307 else
308 tp->ccv->flags &= ~CCF_CWND_LIMITED;
309
310 if (type == CC_ACK) {
311 if (tp->snd_cwnd > tp->snd_ssthresh) {
312 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
313 V_tcp_abc_l_var * tcp_maxseg(tp));
314 if (tp->t_bytes_acked >= tp->snd_cwnd) {
315 tp->t_bytes_acked -= tp->snd_cwnd;
316 tp->ccv->flags |= CCF_ABC_SENTAWND;
317 }
318 } else {
319 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
320 tp->t_bytes_acked = 0;
321 }
322 }
323
324 if (CC_ALGO(tp)->ack_received != NULL) {
325 /* XXXLAS: Find a way to live without this */
326 tp->ccv->curack = th->th_ack;
327 CC_ALGO(tp)->ack_received(tp->ccv, type);
328 }
329 }
330
331 void
332 cc_conn_init(struct tcpcb *tp)
333 {
334 struct hc_metrics_lite metrics;
335 struct inpcb *inp = tp->t_inpcb;
336 u_int maxseg;
337 int rtt;
338
339 INP_WLOCK_ASSERT(tp->t_inpcb);
340
341 tcp_hc_get(&inp->inp_inc, &metrics);
342 maxseg = tcp_maxseg(tp);
343
344 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
345 tp->t_srtt = rtt;
346 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
347 TCPSTAT_INC(tcps_usedrtt);
348 if (metrics.rmx_rttvar) {
349 tp->t_rttvar = metrics.rmx_rttvar;
350 TCPSTAT_INC(tcps_usedrttvar);
351 } else {
352 /* default variation is +- 1 rtt */
353 tp->t_rttvar =
354 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
355 }
356 TCPT_RANGESET(tp->t_rxtcur,
357 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
358 tp->t_rttmin, TCPTV_REXMTMAX);
359 }
360 if (metrics.rmx_ssthresh) {
361 /*
362 * There's some sort of gateway or interface
363 * buffer limit on the path. Use this to set
364 * the slow start threshold, but set the
365 * threshold to no less than 2*mss.
366 */
367 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh);
368 TCPSTAT_INC(tcps_usedssthresh);
369 }
370
371 /*
372 * Set the initial slow-start flight size.
373 *
374 * RFC5681 Section 3.1 specifies the default conservative values.
375 * RFC3390 specifies slightly more aggressive values.
376 * RFC6928 increases it to ten segments.
377 * Support for user specified value for initial flight size.
378 *
379 * If a SYN or SYN/ACK was lost and retransmitted, we have to
380 * reduce the initial CWND to one segment as congestion is likely
381 * requiring us to be cautious.
382 */
383 if (tp->snd_cwnd == 1)
384 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */
385 else if (V_tcp_initcwnd_segments)
386 tp->snd_cwnd = min(V_tcp_initcwnd_segments * maxseg,
387 max(2 * maxseg, V_tcp_initcwnd_segments * 1460));
388 else if (V_tcp_do_rfc3390)
389 tp->snd_cwnd = min(4 * maxseg, max(2 * maxseg, 4380));
390 else {
391 /* Per RFC5681 Section 3.1 */
392 if (maxseg > 2190)
393 tp->snd_cwnd = 2 * maxseg;
394 else if (maxseg > 1095)
395 tp->snd_cwnd = 3 * maxseg;
396 else
397 tp->snd_cwnd = 4 * maxseg;
398 }
399
400 if (CC_ALGO(tp)->conn_init != NULL)
401 CC_ALGO(tp)->conn_init(tp->ccv);
402 }
403
404 void inline
405 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
406 {
407 u_int maxseg;
408
409 INP_WLOCK_ASSERT(tp->t_inpcb);
410
411 switch(type) {
412 case CC_NDUPACK:
413 if (!IN_FASTRECOVERY(tp->t_flags)) {
414 tp->snd_recover = tp->snd_max;
415 if (tp->t_flags & TF_ECN_PERMIT)
416 tp->t_flags |= TF_ECN_SND_CWR;
417 }
418 break;
419 case CC_ECN:
420 if (!IN_CONGRECOVERY(tp->t_flags) ||
421 /*
422 * Allow ECN reaction on ACK to CWR, if
423 * that data segment was also CE marked.
424 */
425 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
426 EXIT_CONGRECOVERY(tp->t_flags);
427 TCPSTAT_INC(tcps_ecn_rcwnd);
428 tp->snd_recover = tp->snd_max + 1;
429 if (tp->t_flags & TF_ECN_PERMIT)
430 tp->t_flags |= TF_ECN_SND_CWR;
431 }
432 break;
433 case CC_RTO:
434 maxseg = tcp_maxseg(tp);
435 tp->t_dupacks = 0;
436 tp->t_bytes_acked = 0;
437 EXIT_RECOVERY(tp->t_flags);
438 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
439 maxseg) * maxseg;
440 tp->snd_cwnd = maxseg;
441 break;
442 case CC_RTO_ERR:
443 TCPSTAT_INC(tcps_sndrexmitbad);
444 /* RTO was unnecessary, so reset everything. */
445 tp->snd_cwnd = tp->snd_cwnd_prev;
446 tp->snd_ssthresh = tp->snd_ssthresh_prev;
447 tp->snd_recover = tp->snd_recover_prev;
448 if (tp->t_flags & TF_WASFRECOVERY)
449 ENTER_FASTRECOVERY(tp->t_flags);
450 if (tp->t_flags & TF_WASCRECOVERY)
451 ENTER_CONGRECOVERY(tp->t_flags);
452 tp->snd_nxt = tp->snd_max;
453 tp->t_flags &= ~TF_PREVVALID;
454 tp->t_badrxtwin = 0;
455 break;
456 }
457
458 if (CC_ALGO(tp)->cong_signal != NULL) {
459 if (th != NULL)
460 tp->ccv->curack = th->th_ack;
461 CC_ALGO(tp)->cong_signal(tp->ccv, type);
462 }
463 }
464
465 void inline
466 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
467 {
468 INP_WLOCK_ASSERT(tp->t_inpcb);
469
470 /* XXXLAS: KASSERT that we're in recovery? */
471
472 if (CC_ALGO(tp)->post_recovery != NULL) {
473 tp->ccv->curack = th->th_ack;
474 CC_ALGO(tp)->post_recovery(tp->ccv);
475 }
476 /* XXXLAS: EXIT_RECOVERY ? */
477 tp->t_bytes_acked = 0;
478 }
479
480 /*
481 * Indicate whether this ack should be delayed. We can delay the ack if
482 * following conditions are met:
483 * - There is no delayed ack timer in progress.
484 * - Our last ack wasn't a 0-sized window. We never want to delay
485 * the ack that opens up a 0-sized window.
486 * - LRO wasn't used for this segment. We make sure by checking that the
487 * segment size is not larger than the MSS.
488 */
489 #define DELAY_ACK(tp, tlen) \
490 ((!tcp_timer_active(tp, TT_DELACK) && \
491 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
492 (tlen <= tp->t_maxseg) && \
493 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
494
495 static void inline
496 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos)
497 {
498 INP_WLOCK_ASSERT(tp->t_inpcb);
499
500 if (CC_ALGO(tp)->ecnpkt_handler != NULL) {
501 switch (iptos & IPTOS_ECN_MASK) {
502 case IPTOS_ECN_CE:
503 tp->ccv->flags |= CCF_IPHDR_CE;
504 break;
505 case IPTOS_ECN_ECT0:
506 tp->ccv->flags &= ~CCF_IPHDR_CE;
507 break;
508 case IPTOS_ECN_ECT1:
509 tp->ccv->flags &= ~CCF_IPHDR_CE;
510 break;
511 }
512
513 if (th->th_flags & TH_CWR)
514 tp->ccv->flags |= CCF_TCPHDR_CWR;
515 else
516 tp->ccv->flags &= ~CCF_TCPHDR_CWR;
517
518 if (tp->t_flags & TF_DELACK)
519 tp->ccv->flags |= CCF_DELACK;
520 else
521 tp->ccv->flags &= ~CCF_DELACK;
522
523 CC_ALGO(tp)->ecnpkt_handler(tp->ccv);
524
525 if (tp->ccv->flags & CCF_ACKNOW)
526 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
527 }
528 }
529
530 /*
531 * TCP input handling is split into multiple parts:
532 * tcp6_input is a thin wrapper around tcp_input for the extended
533 * ip6_protox[] call format in ip6_input
534 * tcp_input handles primary segment validation, inpcb lookup and
535 * SYN processing on listen sockets
536 * tcp_do_segment processes the ACK and text of the segment for
537 * establishing, established and closing connections
538 */
539 #ifdef INET6
540 int
541 tcp6_input(struct mbuf **mp, int *offp, int proto)
542 {
543 struct mbuf *m = *mp;
544 struct in6_ifaddr *ia6;
545 struct ip6_hdr *ip6;
546
547 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
548
549 /*
550 * draft-itojun-ipv6-tcp-to-anycast
551 * better place to put this in?
552 */
553 ip6 = mtod(m, struct ip6_hdr *);
554 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
555 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
556 struct ip6_hdr *ip6;
557
558 ifa_free(&ia6->ia_ifa);
559 ip6 = mtod(m, struct ip6_hdr *);
560 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
561 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
562 return (IPPROTO_DONE);
563 }
564 if (ia6)
565 ifa_free(&ia6->ia_ifa);
566
567 *mp = m;
568 return (tcp_input(mp, offp, proto));
569 }
570 #endif /* INET6 */
571
572 int
573 tcp_input(struct mbuf **mp, int *offp, int proto)
574 {
575 struct mbuf *m = *mp;
576 struct tcphdr *th = NULL;
577 struct ip *ip = NULL;
578 struct inpcb *inp = NULL;
579 struct tcpcb *tp = NULL;
580 struct socket *so = NULL;
581 u_char *optp = NULL;
582 int off0;
583 int optlen = 0;
584 #ifdef INET
585 int len;
586 uint8_t ipttl;
587 #endif
588 int tlen = 0, off;
589 int drop_hdrlen;
590 int thflags;
591 int rstreason = 0; /* For badport_bandlim accounting purposes */
592 uint8_t iptos;
593 struct m_tag *fwd_tag = NULL;
594 #ifdef INET6
595 struct ip6_hdr *ip6 = NULL;
596 int isipv6;
597 #else
598 const void *ip6 = NULL;
599 #endif /* INET6 */
600 struct tcpopt to; /* options in this segment */
601 char *s = NULL; /* address and port logging */
602 int ti_locked;
603 #ifdef TCPDEBUG
604 /*
605 * The size of tcp_saveipgen must be the size of the max ip header,
606 * now IPv6.
607 */
608 u_char tcp_saveipgen[IP6_HDR_LEN];
609 struct tcphdr tcp_savetcp;
610 short ostate = 0;
611 #endif
612
613 #ifdef INET6
614 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
615 #endif
616
617 off0 = *offp;
618 m = *mp;
619 *mp = NULL;
620 to.to_flags = 0;
621 TCPSTAT_INC(tcps_rcvtotal);
622
623 #ifdef INET6
624 if (isipv6) {
625 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
626
627 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
628 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
629 if (m == NULL) {
630 TCPSTAT_INC(tcps_rcvshort);
631 return (IPPROTO_DONE);
632 }
633 }
634
635 ip6 = mtod(m, struct ip6_hdr *);
636 th = (struct tcphdr *)((caddr_t)ip6 + off0);
637 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
638 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
639 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
640 th->th_sum = m->m_pkthdr.csum_data;
641 else
642 th->th_sum = in6_cksum_pseudo(ip6, tlen,
643 IPPROTO_TCP, m->m_pkthdr.csum_data);
644 th->th_sum ^= 0xffff;
645 } else
646 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
647 if (th->th_sum) {
648 TCPSTAT_INC(tcps_rcvbadsum);
649 goto drop;
650 }
651
652 /*
653 * Be proactive about unspecified IPv6 address in source.
654 * As we use all-zero to indicate unbounded/unconnected pcb,
655 * unspecified IPv6 address can be used to confuse us.
656 *
657 * Note that packets with unspecified IPv6 destination is
658 * already dropped in ip6_input.
659 */
660 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
661 /* XXX stat */
662 goto drop;
663 }
664 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
665 }
666 #endif
667 #if defined(INET) && defined(INET6)
668 else
669 #endif
670 #ifdef INET
671 {
672 /*
673 * Get IP and TCP header together in first mbuf.
674 * Note: IP leaves IP header in first mbuf.
675 */
676 if (off0 > sizeof (struct ip)) {
677 ip_stripoptions(m);
678 off0 = sizeof(struct ip);
679 }
680 if (m->m_len < sizeof (struct tcpiphdr)) {
681 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
682 == NULL) {
683 TCPSTAT_INC(tcps_rcvshort);
684 return (IPPROTO_DONE);
685 }
686 }
687 ip = mtod(m, struct ip *);
688 th = (struct tcphdr *)((caddr_t)ip + off0);
689 tlen = ntohs(ip->ip_len) - off0;
690
691 iptos = ip->ip_tos;
692 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
693 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
694 th->th_sum = m->m_pkthdr.csum_data;
695 else
696 th->th_sum = in_pseudo(ip->ip_src.s_addr,
697 ip->ip_dst.s_addr,
698 htonl(m->m_pkthdr.csum_data + tlen +
699 IPPROTO_TCP));
700 th->th_sum ^= 0xffff;
701 } else {
702 struct ipovly *ipov = (struct ipovly *)ip;
703
704 /*
705 * Checksum extended TCP header and data.
706 */
707 len = off0 + tlen;
708 ipttl = ip->ip_ttl;
709 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
710 ipov->ih_len = htons(tlen);
711 th->th_sum = in_cksum(m, len);
712 /* Reset length for SDT probes. */
713 ip->ip_len = htons(len);
714 /* Reset TOS bits */
715 ip->ip_tos = iptos;
716 /* Re-initialization for later version check */
717 ip->ip_ttl = ipttl;
718 ip->ip_v = IPVERSION;
719 ip->ip_hl = off0 >> 2;
720 }
721
722 if (th->th_sum) {
723 TCPSTAT_INC(tcps_rcvbadsum);
724 goto drop;
725 }
726 }
727 #endif /* INET */
728
729 /*
730 * Check that TCP offset makes sense,
731 * pull out TCP options and adjust length. XXX
732 */
733 off = th->th_off << 2;
734 if (off < sizeof (struct tcphdr) || off > tlen) {
735 TCPSTAT_INC(tcps_rcvbadoff);
736 goto drop;
737 }
738 tlen -= off; /* tlen is used instead of ti->ti_len */
739 if (off > sizeof (struct tcphdr)) {
740 #ifdef INET6
741 if (isipv6) {
742 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE);
743 ip6 = mtod(m, struct ip6_hdr *);
744 th = (struct tcphdr *)((caddr_t)ip6 + off0);
745 }
746 #endif
747 #if defined(INET) && defined(INET6)
748 else
749 #endif
750 #ifdef INET
751 {
752 if (m->m_len < sizeof(struct ip) + off) {
753 if ((m = m_pullup(m, sizeof (struct ip) + off))
754 == NULL) {
755 TCPSTAT_INC(tcps_rcvshort);
756 return (IPPROTO_DONE);
757 }
758 ip = mtod(m, struct ip *);
759 th = (struct tcphdr *)((caddr_t)ip + off0);
760 }
761 }
762 #endif
763 optlen = off - sizeof (struct tcphdr);
764 optp = (u_char *)(th + 1);
765 }
766 thflags = th->th_flags;
767
768 /*
769 * Convert TCP protocol specific fields to host format.
770 */
771 tcp_fields_to_host(th);
772
773 /*
774 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
775 */
776 drop_hdrlen = off0 + off;
777
778 /*
779 * Locate pcb for segment; if we're likely to add or remove a
780 * connection then first acquire pcbinfo lock. There are three cases
781 * where we might discover later we need a write lock despite the
782 * flags: ACKs moving a connection out of the syncache, ACKs for a
783 * connection in TIMEWAIT and SYNs not targeting a listening socket.
784 */
785 if ((thflags & (TH_FIN | TH_RST)) != 0) {
786 INP_INFO_RLOCK(&V_tcbinfo);
787 ti_locked = TI_RLOCKED;
788 } else
789 ti_locked = TI_UNLOCKED;
790
791 /*
792 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
793 */
794 if (
795 #ifdef INET6
796 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
797 #ifdef INET
798 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
799 #endif
800 #endif
801 #if defined(INET) && !defined(INET6)
802 (m->m_flags & M_IP_NEXTHOP)
803 #endif
804 )
805 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
806
807 findpcb:
808 #ifdef INVARIANTS
809 if (ti_locked == TI_RLOCKED) {
810 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
811 } else {
812 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
813 }
814 #endif
815 #ifdef INET6
816 if (isipv6 && fwd_tag != NULL) {
817 struct sockaddr_in6 *next_hop6;
818
819 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
820 /*
821 * Transparently forwarded. Pretend to be the destination.
822 * Already got one like this?
823 */
824 inp = in6_pcblookup_mbuf(&V_tcbinfo,
825 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
826 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
827 if (!inp) {
828 /*
829 * It's new. Try to find the ambushing socket.
830 * Because we've rewritten the destination address,
831 * any hardware-generated hash is ignored.
832 */
833 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
834 th->th_sport, &next_hop6->sin6_addr,
835 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
836 th->th_dport, INPLOOKUP_WILDCARD |
837 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
838 }
839 } else if (isipv6) {
840 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
841 th->th_sport, &ip6->ip6_dst, th->th_dport,
842 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
843 m->m_pkthdr.rcvif, m);
844 }
845 #endif /* INET6 */
846 #if defined(INET6) && defined(INET)
847 else
848 #endif
849 #ifdef INET
850 if (fwd_tag != NULL) {
851 struct sockaddr_in *next_hop;
852
853 next_hop = (struct sockaddr_in *)(fwd_tag+1);
854 /*
855 * Transparently forwarded. Pretend to be the destination.
856 * already got one like this?
857 */
858 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
859 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
860 m->m_pkthdr.rcvif, m);
861 if (!inp) {
862 /*
863 * It's new. Try to find the ambushing socket.
864 * Because we've rewritten the destination address,
865 * any hardware-generated hash is ignored.
866 */
867 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
868 th->th_sport, next_hop->sin_addr,
869 next_hop->sin_port ? ntohs(next_hop->sin_port) :
870 th->th_dport, INPLOOKUP_WILDCARD |
871 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
872 }
873 } else
874 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
875 th->th_sport, ip->ip_dst, th->th_dport,
876 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
877 m->m_pkthdr.rcvif, m);
878 #endif /* INET */
879
880 /*
881 * If the INPCB does not exist then all data in the incoming
882 * segment is discarded and an appropriate RST is sent back.
883 * XXX MRT Send RST using which routing table?
884 */
885 if (inp == NULL) {
886 /*
887 * Log communication attempts to ports that are not
888 * in use.
889 */
890 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
891 tcp_log_in_vain == 2) {
892 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
893 log(LOG_INFO, "%s; %s: Connection attempt "
894 "to closed port\n", s, __func__);
895 }
896 /*
897 * When blackholing do not respond with a RST but
898 * completely ignore the segment and drop it.
899 */
900 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
901 V_blackhole == 2)
902 goto dropunlock;
903
904 rstreason = BANDLIM_RST_CLOSEDPORT;
905 goto dropwithreset;
906 }
907 INP_WLOCK_ASSERT(inp);
908 /*
909 * While waiting for inp lock during the lookup, another thread
910 * can have dropped the inpcb, in which case we need to loop back
911 * and try to find a new inpcb to deliver to.
912 */
913 if (inp->inp_flags & INP_DROPPED) {
914 INP_WUNLOCK(inp);
915 inp = NULL;
916 goto findpcb;
917 }
918 if ((inp->inp_flowtype == M_HASHTYPE_NONE) &&
919 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) &&
920 ((inp->inp_socket == NULL) ||
921 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) {
922 inp->inp_flowid = m->m_pkthdr.flowid;
923 inp->inp_flowtype = M_HASHTYPE_GET(m);
924 }
925 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
926 #ifdef INET6
927 if (isipv6 && IPSEC_ENABLED(ipv6) &&
928 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) {
929 goto dropunlock;
930 }
931 #ifdef INET
932 else
933 #endif
934 #endif /* INET6 */
935 #ifdef INET
936 if (IPSEC_ENABLED(ipv4) &&
937 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) {
938 goto dropunlock;
939 }
940 #endif /* INET */
941 #endif /* IPSEC */
942
943 /*
944 * Check the minimum TTL for socket.
945 */
946 if (inp->inp_ip_minttl != 0) {
947 #ifdef INET6
948 if (isipv6) {
949 if (inp->inp_ip_minttl > ip6->ip6_hlim)
950 goto dropunlock;
951 } else
952 #endif
953 if (inp->inp_ip_minttl > ip->ip_ttl)
954 goto dropunlock;
955 }
956
957 /*
958 * A previous connection in TIMEWAIT state is supposed to catch stray
959 * or duplicate segments arriving late. If this segment was a
960 * legitimate new connection attempt, the old INPCB gets removed and
961 * we can try again to find a listening socket.
962 *
963 * At this point, due to earlier optimism, we may hold only an inpcb
964 * lock, and not the inpcbinfo write lock. If so, we need to try to
965 * acquire it, or if that fails, acquire a reference on the inpcb,
966 * drop all locks, acquire a global write lock, and then re-acquire
967 * the inpcb lock. We may at that point discover that another thread
968 * has tried to free the inpcb, in which case we need to loop back
969 * and try to find a new inpcb to deliver to.
970 *
971 * XXXRW: It may be time to rethink timewait locking.
972 */
973 relocked:
974 if (inp->inp_flags & INP_TIMEWAIT) {
975 if (ti_locked == TI_UNLOCKED) {
976 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
977 in_pcbref(inp);
978 INP_WUNLOCK(inp);
979 INP_INFO_RLOCK(&V_tcbinfo);
980 ti_locked = TI_RLOCKED;
981 INP_WLOCK(inp);
982 if (in_pcbrele_wlocked(inp)) {
983 inp = NULL;
984 goto findpcb;
985 } else if (inp->inp_flags & INP_DROPPED) {
986 INP_WUNLOCK(inp);
987 inp = NULL;
988 goto findpcb;
989 }
990 } else
991 ti_locked = TI_RLOCKED;
992 }
993 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
994
995 tcp_dooptions(&to, optp, optlen,
996 (thflags & TH_SYN) ? TO_SYN : 0);
997 /*
998 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
999 */
1000 if (tcp_twcheck(inp, &to, th, m, tlen))
1001 goto findpcb;
1002 INP_INFO_RUNLOCK(&V_tcbinfo);
1003 return (IPPROTO_DONE);
1004 }
1005 /*
1006 * The TCPCB may no longer exist if the connection is winding
1007 * down or it is in the CLOSED state. Either way we drop the
1008 * segment and send an appropriate response.
1009 */
1010 tp = intotcpcb(inp);
1011 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
1012 rstreason = BANDLIM_RST_CLOSEDPORT;
1013 goto dropwithreset;
1014 }
1015
1016 #ifdef TCP_OFFLOAD
1017 if (tp->t_flags & TF_TOE) {
1018 tcp_offload_input(tp, m);
1019 m = NULL; /* consumed by the TOE driver */
1020 goto dropunlock;
1021 }
1022 #endif
1023
1024 /*
1025 * We've identified a valid inpcb, but it could be that we need an
1026 * inpcbinfo write lock but don't hold it. In this case, attempt to
1027 * acquire using the same strategy as the TIMEWAIT case above. If we
1028 * relock, we have to jump back to 'relocked' as the connection might
1029 * now be in TIMEWAIT.
1030 */
1031 #ifdef INVARIANTS
1032 if ((thflags & (TH_FIN | TH_RST)) != 0)
1033 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1034 #endif
1035 if (!((tp->t_state == TCPS_ESTABLISHED && (thflags & TH_SYN) == 0) ||
1036 (tp->t_state == TCPS_LISTEN && (thflags & TH_SYN) &&
1037 !(tp->t_flags & TF_FASTOPEN)))) {
1038 if (ti_locked == TI_UNLOCKED) {
1039 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
1040 in_pcbref(inp);
1041 INP_WUNLOCK(inp);
1042 INP_INFO_RLOCK(&V_tcbinfo);
1043 ti_locked = TI_RLOCKED;
1044 INP_WLOCK(inp);
1045 if (in_pcbrele_wlocked(inp)) {
1046 inp = NULL;
1047 goto findpcb;
1048 } else if (inp->inp_flags & INP_DROPPED) {
1049 INP_WUNLOCK(inp);
1050 inp = NULL;
1051 goto findpcb;
1052 }
1053 goto relocked;
1054 } else
1055 ti_locked = TI_RLOCKED;
1056 }
1057 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1058 }
1059
1060 #ifdef MAC
1061 INP_WLOCK_ASSERT(inp);
1062 if (mac_inpcb_check_deliver(inp, m))
1063 goto dropunlock;
1064 #endif
1065 so = inp->inp_socket;
1066 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1067 #ifdef TCPDEBUG
1068 if (so->so_options & SO_DEBUG) {
1069 ostate = tp->t_state;
1070 #ifdef INET6
1071 if (isipv6) {
1072 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1073 } else
1074 #endif
1075 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1076 tcp_savetcp = *th;
1077 }
1078 #endif /* TCPDEBUG */
1079 /*
1080 * When the socket is accepting connections (the INPCB is in LISTEN
1081 * state) we look into the SYN cache if this is a new connection
1082 * attempt or the completion of a previous one.
1083 */
1084 KASSERT(tp->t_state == TCPS_LISTEN || !(so->so_options & SO_ACCEPTCONN),
1085 ("%s: so accepting but tp %p not listening", __func__, tp));
1086 if (tp->t_state == TCPS_LISTEN && (so->so_options & SO_ACCEPTCONN)) {
1087 struct in_conninfo inc;
1088
1089 bzero(&inc, sizeof(inc));
1090 #ifdef INET6
1091 if (isipv6) {
1092 inc.inc_flags |= INC_ISIPV6;
1093 if (inp->inp_inc.inc_flags & INC_IPV6MINMTU)
1094 inc.inc_flags |= INC_IPV6MINMTU;
1095 inc.inc6_faddr = ip6->ip6_src;
1096 inc.inc6_laddr = ip6->ip6_dst;
1097 } else
1098 #endif
1099 {
1100 inc.inc_faddr = ip->ip_src;
1101 inc.inc_laddr = ip->ip_dst;
1102 }
1103 inc.inc_fport = th->th_sport;
1104 inc.inc_lport = th->th_dport;
1105 inc.inc_fibnum = so->so_fibnum;
1106
1107 /*
1108 * Check for an existing connection attempt in syncache if
1109 * the flag is only ACK. A successful lookup creates a new
1110 * socket appended to the listen queue in SYN_RECEIVED state.
1111 */
1112 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1113
1114 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1115 /*
1116 * Parse the TCP options here because
1117 * syncookies need access to the reflected
1118 * timestamp.
1119 */
1120 tcp_dooptions(&to, optp, optlen, 0);
1121 /*
1122 * NB: syncache_expand() doesn't unlock
1123 * inp and tcpinfo locks.
1124 */
1125 rstreason = syncache_expand(&inc, &to, th, &so, m);
1126 if (rstreason < 0) {
1127 /*
1128 * A failing TCP MD5 signature comparison
1129 * must result in the segment being dropped
1130 * and must not produce any response back
1131 * to the sender.
1132 */
1133 goto dropunlock;
1134 } else if (rstreason == 0) {
1135 /*
1136 * No syncache entry or ACK was not
1137 * for our SYN/ACK. Send a RST.
1138 * NB: syncache did its own logging
1139 * of the failure cause.
1140 */
1141 rstreason = BANDLIM_RST_OPENPORT;
1142 goto dropwithreset;
1143 }
1144 #ifdef TCP_RFC7413
1145 new_tfo_socket:
1146 #endif
1147 if (so == NULL) {
1148 /*
1149 * We completed the 3-way handshake
1150 * but could not allocate a socket
1151 * either due to memory shortage,
1152 * listen queue length limits or
1153 * global socket limits. Send RST
1154 * or wait and have the remote end
1155 * retransmit the ACK for another
1156 * try.
1157 */
1158 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1159 log(LOG_DEBUG, "%s; %s: Listen socket: "
1160 "Socket allocation failed due to "
1161 "limits or memory shortage, %s\n",
1162 s, __func__,
1163 V_tcp_sc_rst_sock_fail ?
1164 "sending RST" : "try again");
1165 if (V_tcp_sc_rst_sock_fail) {
1166 rstreason = BANDLIM_UNLIMITED;
1167 goto dropwithreset;
1168 } else
1169 goto dropunlock;
1170 }
1171 /*
1172 * Socket is created in state SYN_RECEIVED.
1173 * Unlock the listen socket, lock the newly
1174 * created socket and update the tp variable.
1175 */
1176 INP_WUNLOCK(inp); /* listen socket */
1177 inp = sotoinpcb(so);
1178 /*
1179 * New connection inpcb is already locked by
1180 * syncache_expand().
1181 */
1182 INP_WLOCK_ASSERT(inp);
1183 tp = intotcpcb(inp);
1184 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1185 ("%s: ", __func__));
1186 /*
1187 * Process the segment and the data it
1188 * contains. tcp_do_segment() consumes
1189 * the mbuf chain and unlocks the inpcb.
1190 */
1191 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1192 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1193 iptos, ti_locked);
1194 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1195 return (IPPROTO_DONE);
1196 }
1197 /*
1198 * Segment flag validation for new connection attempts:
1199 *
1200 * Our (SYN|ACK) response was rejected.
1201 * Check with syncache and remove entry to prevent
1202 * retransmits.
1203 *
1204 * NB: syncache_chkrst does its own logging of failure
1205 * causes.
1206 */
1207 if (thflags & TH_RST) {
1208 syncache_chkrst(&inc, th);
1209 goto dropunlock;
1210 }
1211 /*
1212 * We can't do anything without SYN.
1213 */
1214 if ((thflags & TH_SYN) == 0) {
1215 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1216 log(LOG_DEBUG, "%s; %s: Listen socket: "
1217 "SYN is missing, segment ignored\n",
1218 s, __func__);
1219 TCPSTAT_INC(tcps_badsyn);
1220 goto dropunlock;
1221 }
1222 /*
1223 * (SYN|ACK) is bogus on a listen socket.
1224 */
1225 if (thflags & TH_ACK) {
1226 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1227 log(LOG_DEBUG, "%s; %s: Listen socket: "
1228 "SYN|ACK invalid, segment rejected\n",
1229 s, __func__);
1230 syncache_badack(&inc); /* XXX: Not needed! */
1231 TCPSTAT_INC(tcps_badsyn);
1232 rstreason = BANDLIM_RST_OPENPORT;
1233 goto dropwithreset;
1234 }
1235 /*
1236 * If the drop_synfin option is enabled, drop all
1237 * segments with both the SYN and FIN bits set.
1238 * This prevents e.g. nmap from identifying the
1239 * TCP/IP stack.
1240 * XXX: Poor reasoning. nmap has other methods
1241 * and is constantly refining its stack detection
1242 * strategies.
1243 * XXX: This is a violation of the TCP specification
1244 * and was used by RFC1644.
1245 */
1246 if ((thflags & TH_FIN) && V_drop_synfin) {
1247 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1248 log(LOG_DEBUG, "%s; %s: Listen socket: "
1249 "SYN|FIN segment ignored (based on "
1250 "sysctl setting)\n", s, __func__);
1251 TCPSTAT_INC(tcps_badsyn);
1252 goto dropunlock;
1253 }
1254 /*
1255 * Segment's flags are (SYN) or (SYN|FIN).
1256 *
1257 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1258 * as they do not affect the state of the TCP FSM.
1259 * The data pointed to by TH_URG and th_urp is ignored.
1260 */
1261 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1262 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1263 KASSERT(thflags & (TH_SYN),
1264 ("%s: Listen socket: TH_SYN not set", __func__));
1265 #ifdef INET6
1266 /*
1267 * If deprecated address is forbidden,
1268 * we do not accept SYN to deprecated interface
1269 * address to prevent any new inbound connection from
1270 * getting established.
1271 * When we do not accept SYN, we send a TCP RST,
1272 * with deprecated source address (instead of dropping
1273 * it). We compromise it as it is much better for peer
1274 * to send a RST, and RST will be the final packet
1275 * for the exchange.
1276 *
1277 * If we do not forbid deprecated addresses, we accept
1278 * the SYN packet. RFC2462 does not suggest dropping
1279 * SYN in this case.
1280 * If we decipher RFC2462 5.5.4, it says like this:
1281 * 1. use of deprecated addr with existing
1282 * communication is okay - "SHOULD continue to be
1283 * used"
1284 * 2. use of it with new communication:
1285 * (2a) "SHOULD NOT be used if alternate address
1286 * with sufficient scope is available"
1287 * (2b) nothing mentioned otherwise.
1288 * Here we fall into (2b) case as we have no choice in
1289 * our source address selection - we must obey the peer.
1290 *
1291 * The wording in RFC2462 is confusing, and there are
1292 * multiple description text for deprecated address
1293 * handling - worse, they are not exactly the same.
1294 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1295 */
1296 if (isipv6 && !V_ip6_use_deprecated) {
1297 struct in6_ifaddr *ia6;
1298
1299 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
1300 if (ia6 != NULL &&
1301 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1302 ifa_free(&ia6->ia_ifa);
1303 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1304 log(LOG_DEBUG, "%s; %s: Listen socket: "
1305 "Connection attempt to deprecated "
1306 "IPv6 address rejected\n",
1307 s, __func__);
1308 rstreason = BANDLIM_RST_OPENPORT;
1309 goto dropwithreset;
1310 }
1311 if (ia6)
1312 ifa_free(&ia6->ia_ifa);
1313 }
1314 #endif /* INET6 */
1315 /*
1316 * Basic sanity checks on incoming SYN requests:
1317 * Don't respond if the destination is a link layer
1318 * broadcast according to RFC1122 4.2.3.10, p. 104.
1319 * If it is from this socket it must be forged.
1320 * Don't respond if the source or destination is a
1321 * global or subnet broad- or multicast address.
1322 * Note that it is quite possible to receive unicast
1323 * link-layer packets with a broadcast IP address. Use
1324 * in_broadcast() to find them.
1325 */
1326 if (m->m_flags & (M_BCAST|M_MCAST)) {
1327 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1328 log(LOG_DEBUG, "%s; %s: Listen socket: "
1329 "Connection attempt from broad- or multicast "
1330 "link layer address ignored\n", s, __func__);
1331 goto dropunlock;
1332 }
1333 #ifdef INET6
1334 if (isipv6) {
1335 if (th->th_dport == th->th_sport &&
1336 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1337 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1338 log(LOG_DEBUG, "%s; %s: Listen socket: "
1339 "Connection attempt to/from self "
1340 "ignored\n", s, __func__);
1341 goto dropunlock;
1342 }
1343 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1344 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1345 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1346 log(LOG_DEBUG, "%s; %s: Listen socket: "
1347 "Connection attempt from/to multicast "
1348 "address ignored\n", s, __func__);
1349 goto dropunlock;
1350 }
1351 }
1352 #endif
1353 #if defined(INET) && defined(INET6)
1354 else
1355 #endif
1356 #ifdef INET
1357 {
1358 if (th->th_dport == th->th_sport &&
1359 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1360 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1361 log(LOG_DEBUG, "%s; %s: Listen socket: "
1362 "Connection attempt from/to self "
1363 "ignored\n", s, __func__);
1364 goto dropunlock;
1365 }
1366 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1367 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1368 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1369 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1370 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1371 log(LOG_DEBUG, "%s; %s: Listen socket: "
1372 "Connection attempt from/to broad- "
1373 "or multicast address ignored\n",
1374 s, __func__);
1375 goto dropunlock;
1376 }
1377 }
1378 #endif
1379 /*
1380 * SYN appears to be valid. Create compressed TCP state
1381 * for syncache.
1382 */
1383 #ifdef TCPDEBUG
1384 if (so->so_options & SO_DEBUG)
1385 tcp_trace(TA_INPUT, ostate, tp,
1386 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1387 #endif
1388 TCP_PROBE3(debug__input, tp, th, m);
1389 tcp_dooptions(&to, optp, optlen, TO_SYN);
1390 #ifdef TCP_RFC7413
1391 if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL))
1392 goto new_tfo_socket;
1393 #else
1394 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL);
1395 #endif
1396 /*
1397 * Entry added to syncache and mbuf consumed.
1398 * Only the listen socket is unlocked by syncache_add().
1399 */
1400 if (ti_locked == TI_RLOCKED) {
1401 INP_INFO_RUNLOCK(&V_tcbinfo);
1402 ti_locked = TI_UNLOCKED;
1403 }
1404 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1405 return (IPPROTO_DONE);
1406 } else if (tp->t_state == TCPS_LISTEN) {
1407 /*
1408 * When a listen socket is torn down the SO_ACCEPTCONN
1409 * flag is removed first while connections are drained
1410 * from the accept queue in a unlock/lock cycle of the
1411 * ACCEPT_LOCK, opening a race condition allowing a SYN
1412 * attempt go through unhandled.
1413 */
1414 goto dropunlock;
1415 }
1416 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1417 if (tp->t_flags & TF_SIGNATURE) {
1418 tcp_dooptions(&to, optp, optlen, thflags);
1419 if ((to.to_flags & TOF_SIGNATURE) == 0) {
1420 TCPSTAT_INC(tcps_sig_err_nosigopt);
1421 goto dropunlock;
1422 }
1423 if (!TCPMD5_ENABLED() ||
1424 TCPMD5_INPUT(m, th, to.to_signature) != 0)
1425 goto dropunlock;
1426 }
1427 #endif
1428 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1429
1430 /*
1431 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1432 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1433 * the inpcb, and unlocks pcbinfo.
1434 */
1435 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1436 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1437 return (IPPROTO_DONE);
1438
1439 dropwithreset:
1440 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1441
1442 if (ti_locked == TI_RLOCKED) {
1443 INP_INFO_RUNLOCK(&V_tcbinfo);
1444 ti_locked = TI_UNLOCKED;
1445 }
1446 #ifdef INVARIANTS
1447 else {
1448 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1449 "ti_locked: %d", __func__, ti_locked));
1450 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1451 }
1452 #endif
1453
1454 if (inp != NULL) {
1455 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1456 INP_WUNLOCK(inp);
1457 } else
1458 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1459 m = NULL; /* mbuf chain got consumed. */
1460 goto drop;
1461
1462 dropunlock:
1463 if (m != NULL)
1464 TCP_PROBE5(receive, NULL, tp, m, tp, th);
1465
1466 if (ti_locked == TI_RLOCKED) {
1467 INP_INFO_RUNLOCK(&V_tcbinfo);
1468 ti_locked = TI_UNLOCKED;
1469 }
1470 #ifdef INVARIANTS
1471 else {
1472 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1473 "ti_locked: %d", __func__, ti_locked));
1474 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1475 }
1476 #endif
1477
1478 if (inp != NULL)
1479 INP_WUNLOCK(inp);
1480
1481 drop:
1482 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1483 if (s != NULL)
1484 free(s, M_TCPLOG);
1485 if (m != NULL)
1486 m_freem(m);
1487 return (IPPROTO_DONE);
1488 }
1489
1490 /*
1491 * Automatic sizing of receive socket buffer. Often the send
1492 * buffer size is not optimally adjusted to the actual network
1493 * conditions at hand (delay bandwidth product). Setting the
1494 * buffer size too small limits throughput on links with high
1495 * bandwidth and high delay (eg. trans-continental/oceanic links).
1496 *
1497 * On the receive side the socket buffer memory is only rarely
1498 * used to any significant extent. This allows us to be much
1499 * more aggressive in scaling the receive socket buffer. For
1500 * the case that the buffer space is actually used to a large
1501 * extent and we run out of kernel memory we can simply drop
1502 * the new segments; TCP on the sender will just retransmit it
1503 * later. Setting the buffer size too big may only consume too
1504 * much kernel memory if the application doesn't read() from
1505 * the socket or packet loss or reordering makes use of the
1506 * reassembly queue.
1507 *
1508 * The criteria to step up the receive buffer one notch are:
1509 * 1. Application has not set receive buffer size with
1510 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE.
1511 * 2. the number of bytes received during the time it takes
1512 * one timestamp to be reflected back to us (the RTT);
1513 * 3. received bytes per RTT is within seven eighth of the
1514 * current socket buffer size;
1515 * 4. receive buffer size has not hit maximal automatic size;
1516 *
1517 * This algorithm does one step per RTT at most and only if
1518 * we receive a bulk stream w/o packet losses or reorderings.
1519 * Shrinking the buffer during idle times is not necessary as
1520 * it doesn't consume any memory when idle.
1521 *
1522 * TODO: Only step up if the application is actually serving
1523 * the buffer to better manage the socket buffer resources.
1524 */
1525 int
1526 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so,
1527 struct tcpcb *tp, int tlen)
1528 {
1529 int newsize = 0;
1530
1531 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) &&
1532 tp->t_srtt != 0 && tp->rfbuf_ts != 0 &&
1533 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) >
1534 (tp->t_srtt >> TCP_RTT_SHIFT)) {
1535 if (tp->rfbuf_cnt > (so->so_rcv.sb_hiwat / 8 * 7) &&
1536 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) {
1537 newsize = min(so->so_rcv.sb_hiwat +
1538 V_tcp_autorcvbuf_inc, V_tcp_autorcvbuf_max);
1539 }
1540 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize);
1541
1542 /* Start over with next RTT. */
1543 tp->rfbuf_ts = 0;
1544 tp->rfbuf_cnt = 0;
1545 } else {
1546 tp->rfbuf_cnt += tlen; /* add up */
1547 }
1548 return (newsize);
1549 }
1550
1551 void
1552 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1553 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1554 int ti_locked)
1555 {
1556 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed;
1557 int rstreason, todrop, win;
1558 u_long tiwin;
1559 char *s;
1560 struct in_conninfo *inc;
1561 struct mbuf *mfree;
1562 struct tcpopt to;
1563 int tfo_syn;
1564
1565 #ifdef TCPDEBUG
1566 /*
1567 * The size of tcp_saveipgen must be the size of the max ip header,
1568 * now IPv6.
1569 */
1570 u_char tcp_saveipgen[IP6_HDR_LEN];
1571 struct tcphdr tcp_savetcp;
1572 short ostate = 0;
1573 #endif
1574 thflags = th->th_flags;
1575 inc = &tp->t_inpcb->inp_inc;
1576 tp->sackhint.last_sack_ack = 0;
1577 sack_changed = 0;
1578
1579 /*
1580 * If this is either a state-changing packet or current state isn't
1581 * established, we require a write lock on tcbinfo. Otherwise, we
1582 * allow the tcbinfo to be in either alocked or unlocked, as the
1583 * caller may have unnecessarily acquired a write lock due to a race.
1584 */
1585 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1586 tp->t_state != TCPS_ESTABLISHED) {
1587 KASSERT(ti_locked == TI_RLOCKED, ("%s ti_locked %d for "
1588 "SYN/FIN/RST/!EST", __func__, ti_locked));
1589 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1590 } else {
1591 #ifdef INVARIANTS
1592 if (ti_locked == TI_RLOCKED)
1593 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1594 else {
1595 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1596 "ti_locked: %d", __func__, ti_locked));
1597 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1598 }
1599 #endif
1600 }
1601 INP_WLOCK_ASSERT(tp->t_inpcb);
1602 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1603 __func__));
1604 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1605 __func__));
1606
1607 #ifdef TCPPCAP
1608 /* Save segment, if requested. */
1609 tcp_pcap_add(th, m, &(tp->t_inpkts));
1610 #endif
1611
1612 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
1613 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1614 log(LOG_DEBUG, "%s; %s: "
1615 "SYN|FIN segment ignored (based on "
1616 "sysctl setting)\n", s, __func__);
1617 free(s, M_TCPLOG);
1618 }
1619 goto drop;
1620 }
1621
1622 /*
1623 * If a segment with the ACK-bit set arrives in the SYN-SENT state
1624 * check SEQ.ACK first.
1625 */
1626 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
1627 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
1628 rstreason = BANDLIM_UNLIMITED;
1629 goto dropwithreset;
1630 }
1631
1632 /*
1633 * Segment received on connection.
1634 * Reset idle time and keep-alive timer.
1635 * XXX: This should be done after segment
1636 * validation to ignore broken/spoofed segs.
1637 */
1638 tp->t_rcvtime = ticks;
1639 if (TCPS_HAVEESTABLISHED(tp->t_state))
1640 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
1641
1642 /*
1643 * Scale up the window into a 32-bit value.
1644 * For the SYN_SENT state the scale is zero.
1645 */
1646 tiwin = th->th_win << tp->snd_scale;
1647
1648 /*
1649 * TCP ECN processing.
1650 */
1651 if (tp->t_flags & TF_ECN_PERMIT) {
1652 if (thflags & TH_CWR)
1653 tp->t_flags &= ~TF_ECN_SND_ECE;
1654 switch (iptos & IPTOS_ECN_MASK) {
1655 case IPTOS_ECN_CE:
1656 tp->t_flags |= TF_ECN_SND_ECE;
1657 TCPSTAT_INC(tcps_ecn_ce);
1658 break;
1659 case IPTOS_ECN_ECT0:
1660 TCPSTAT_INC(tcps_ecn_ect0);
1661 break;
1662 case IPTOS_ECN_ECT1:
1663 TCPSTAT_INC(tcps_ecn_ect1);
1664 break;
1665 }
1666
1667 /* Process a packet differently from RFC3168. */
1668 cc_ecnpkt_handler(tp, th, iptos);
1669
1670 /* Congestion experienced. */
1671 if (thflags & TH_ECE) {
1672 cc_cong_signal(tp, th, CC_ECN);
1673 }
1674 }
1675
1676 /*
1677 * Parse options on any incoming segment.
1678 */
1679 tcp_dooptions(&to, (u_char *)(th + 1),
1680 (th->th_off << 2) - sizeof(struct tcphdr),
1681 (thflags & TH_SYN) ? TO_SYN : 0);
1682
1683 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1684 if ((tp->t_flags & TF_SIGNATURE) != 0 &&
1685 (to.to_flags & TOF_SIGNATURE) == 0) {
1686 TCPSTAT_INC(tcps_sig_err_sigopt);
1687 /* XXX: should drop? */
1688 }
1689 #endif
1690 /*
1691 * If echoed timestamp is later than the current time,
1692 * fall back to non RFC1323 RTT calculation. Normalize
1693 * timestamp if syncookies were used when this connection
1694 * was established.
1695 */
1696 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1697 to.to_tsecr -= tp->ts_offset;
1698 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1699 to.to_tsecr = 0;
1700 }
1701 /*
1702 * Process options only when we get SYN/ACK back. The SYN case
1703 * for incoming connections is handled in tcp_syncache.
1704 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1705 * or <SYN,ACK>) segment itself is never scaled.
1706 * XXX this is traditional behavior, may need to be cleaned up.
1707 */
1708 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1709 if ((to.to_flags & TOF_SCALE) &&
1710 (tp->t_flags & TF_REQ_SCALE)) {
1711 tp->t_flags |= TF_RCVD_SCALE;
1712 tp->snd_scale = to.to_wscale;
1713 }
1714 /*
1715 * Initial send window. It will be updated with
1716 * the next incoming segment to the scaled value.
1717 */
1718 tp->snd_wnd = th->th_win;
1719 if (to.to_flags & TOF_TS) {
1720 tp->t_flags |= TF_RCVD_TSTMP;
1721 tp->ts_recent = to.to_tsval;
1722 tp->ts_recent_age = tcp_ts_getticks();
1723 }
1724 if (to.to_flags & TOF_MSS)
1725 tcp_mss(tp, to.to_mss);
1726 if ((tp->t_flags & TF_SACK_PERMIT) &&
1727 (to.to_flags & TOF_SACKPERM) == 0)
1728 tp->t_flags &= ~TF_SACK_PERMIT;
1729 }
1730
1731 /*
1732 * If timestamps were negotiated during SYN/ACK and a
1733 * segment without a timestamp is received, silently drop
1734 * the segment.
1735 * See section 3.2 of RFC 7323.
1736 */
1737 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1738 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1739 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1740 "segment silently dropped\n", s, __func__);
1741 free(s, M_TCPLOG);
1742 }
1743 goto drop;
1744 }
1745 /*
1746 * If timestamps were not negotiated during SYN/ACK and a
1747 * segment with a timestamp is received, ignore the
1748 * timestamp and process the packet normally.
1749 * See section 3.2 of RFC 7323.
1750 */
1751 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1752 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1753 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1754 "segment processed normally\n", s, __func__);
1755 free(s, M_TCPLOG);
1756 }
1757 }
1758
1759 /*
1760 * Header prediction: check for the two common cases
1761 * of a uni-directional data xfer. If the packet has
1762 * no control flags, is in-sequence, the window didn't
1763 * change and we're not retransmitting, it's a
1764 * candidate. If the length is zero and the ack moved
1765 * forward, we're the sender side of the xfer. Just
1766 * free the data acked & wake any higher level process
1767 * that was blocked waiting for space. If the length
1768 * is non-zero and the ack didn't move, we're the
1769 * receiver side. If we're getting packets in-order
1770 * (the reassembly queue is empty), add the data to
1771 * the socket buffer and note that we need a delayed ack.
1772 * Make sure that the hidden state-flags are also off.
1773 * Since we check for TCPS_ESTABLISHED first, it can only
1774 * be TH_NEEDSYN.
1775 */
1776 if (tp->t_state == TCPS_ESTABLISHED &&
1777 th->th_seq == tp->rcv_nxt &&
1778 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1779 tp->snd_nxt == tp->snd_max &&
1780 tiwin && tiwin == tp->snd_wnd &&
1781 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1782 SEGQ_EMPTY(tp) &&
1783 ((to.to_flags & TOF_TS) == 0 ||
1784 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1785
1786 /*
1787 * If last ACK falls within this segment's sequence numbers,
1788 * record the timestamp.
1789 * NOTE that the test is modified according to the latest
1790 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1791 */
1792 if ((to.to_flags & TOF_TS) != 0 &&
1793 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1794 tp->ts_recent_age = tcp_ts_getticks();
1795 tp->ts_recent = to.to_tsval;
1796 }
1797
1798 if (tlen == 0) {
1799 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1800 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1801 !IN_RECOVERY(tp->t_flags) &&
1802 (to.to_flags & TOF_SACK) == 0 &&
1803 TAILQ_EMPTY(&tp->snd_holes)) {
1804 /*
1805 * This is a pure ack for outstanding data.
1806 */
1807 if (ti_locked == TI_RLOCKED)
1808 INP_INFO_RUNLOCK(&V_tcbinfo);
1809 ti_locked = TI_UNLOCKED;
1810
1811 TCPSTAT_INC(tcps_predack);
1812
1813 /*
1814 * "bad retransmit" recovery.
1815 */
1816 if (tp->t_rxtshift == 1 &&
1817 tp->t_flags & TF_PREVVALID &&
1818 (int)(ticks - tp->t_badrxtwin) < 0) {
1819 cc_cong_signal(tp, th, CC_RTO_ERR);
1820 }
1821
1822 /*
1823 * Recalculate the transmit timer / rtt.
1824 *
1825 * Some boxes send broken timestamp replies
1826 * during the SYN+ACK phase, ignore
1827 * timestamps of 0 or we could calculate a
1828 * huge RTT and blow up the retransmit timer.
1829 */
1830 if ((to.to_flags & TOF_TS) != 0 &&
1831 to.to_tsecr) {
1832 u_int t;
1833
1834 t = tcp_ts_getticks() - to.to_tsecr;
1835 if (!tp->t_rttlow || tp->t_rttlow > t)
1836 tp->t_rttlow = t;
1837 tcp_xmit_timer(tp,
1838 TCP_TS_TO_TICKS(t) + 1);
1839 } else if (tp->t_rtttime &&
1840 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1841 if (!tp->t_rttlow ||
1842 tp->t_rttlow > ticks - tp->t_rtttime)
1843 tp->t_rttlow = ticks - tp->t_rtttime;
1844 tcp_xmit_timer(tp,
1845 ticks - tp->t_rtttime);
1846 }
1847 acked = BYTES_THIS_ACK(tp, th);
1848
1849 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1850 hhook_run_tcp_est_in(tp, th, &to);
1851
1852 TCPSTAT_INC(tcps_rcvackpack);
1853 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1854 sbdrop(&so->so_snd, acked);
1855 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1856 SEQ_LEQ(th->th_ack, tp->snd_recover))
1857 tp->snd_recover = th->th_ack - 1;
1858
1859 /*
1860 * Let the congestion control algorithm update
1861 * congestion control related information. This
1862 * typically means increasing the congestion
1863 * window.
1864 */
1865 cc_ack_received(tp, th, CC_ACK);
1866
1867 tp->snd_una = th->th_ack;
1868 /*
1869 * Pull snd_wl2 up to prevent seq wrap relative
1870 * to th_ack.
1871 */
1872 tp->snd_wl2 = th->th_ack;
1873 tp->t_dupacks = 0;
1874 m_freem(m);
1875
1876 /*
1877 * If all outstanding data are acked, stop
1878 * retransmit timer, otherwise restart timer
1879 * using current (possibly backed-off) value.
1880 * If process is waiting for space,
1881 * wakeup/selwakeup/signal. If data
1882 * are ready to send, let tcp_output
1883 * decide between more output or persist.
1884 */
1885 #ifdef TCPDEBUG
1886 if (so->so_options & SO_DEBUG)
1887 tcp_trace(TA_INPUT, ostate, tp,
1888 (void *)tcp_saveipgen,
1889 &tcp_savetcp, 0);
1890 #endif
1891 TCP_PROBE3(debug__input, tp, th, m);
1892 if (tp->snd_una == tp->snd_max)
1893 tcp_timer_activate(tp, TT_REXMT, 0);
1894 else if (!tcp_timer_active(tp, TT_PERSIST))
1895 tcp_timer_activate(tp, TT_REXMT,
1896 tp->t_rxtcur);
1897 sowwakeup(so);
1898 if (sbavail(&so->so_snd))
1899 (void) tp->t_fb->tfb_tcp_output(tp);
1900 goto check_delack;
1901 }
1902 } else if (th->th_ack == tp->snd_una &&
1903 tlen <= sbspace(&so->so_rcv)) {
1904 int newsize = 0; /* automatic sockbuf scaling */
1905
1906 /*
1907 * This is a pure, in-sequence data packet with
1908 * nothing on the reassembly queue and we have enough
1909 * buffer space to take it.
1910 */
1911 if (ti_locked == TI_RLOCKED)
1912 INP_INFO_RUNLOCK(&V_tcbinfo);
1913 ti_locked = TI_UNLOCKED;
1914
1915 /* Clean receiver SACK report if present */
1916 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1917 tcp_clean_sackreport(tp);
1918 TCPSTAT_INC(tcps_preddat);
1919 tp->rcv_nxt += tlen;
1920 /*
1921 * Pull snd_wl1 up to prevent seq wrap relative to
1922 * th_seq.
1923 */
1924 tp->snd_wl1 = th->th_seq;
1925 /*
1926 * Pull rcv_up up to prevent seq wrap relative to
1927 * rcv_nxt.
1928 */
1929 tp->rcv_up = tp->rcv_nxt;
1930 TCPSTAT_INC(tcps_rcvpack);
1931 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1932 #ifdef TCPDEBUG
1933 if (so->so_options & SO_DEBUG)
1934 tcp_trace(TA_INPUT, ostate, tp,
1935 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1936 #endif
1937 TCP_PROBE3(debug__input, tp, th, m);
1938
1939 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
1940
1941 /* Add data to socket buffer. */
1942 SOCKBUF_LOCK(&so->so_rcv);
1943 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1944 m_freem(m);
1945 } else {
1946 /*
1947 * Set new socket buffer size.
1948 * Give up when limit is reached.
1949 */
1950 if (newsize)
1951 if (!sbreserve_locked(&so->so_rcv,
1952 newsize, so, NULL))
1953 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1954 m_adj(m, drop_hdrlen); /* delayed header drop */
1955 sbappendstream_locked(&so->so_rcv, m, 0);
1956 }
1957 /* NB: sorwakeup_locked() does an implicit unlock. */
1958 sorwakeup_locked(so);
1959 if (DELAY_ACK(tp, tlen)) {
1960 tp->t_flags |= TF_DELACK;
1961 } else {
1962 tp->t_flags |= TF_ACKNOW;
1963 tp->t_fb->tfb_tcp_output(tp);
1964 }
1965 goto check_delack;
1966 }
1967 }
1968
1969 /*
1970 * Calculate amount of space in receive window,
1971 * and then do TCP input processing.
1972 * Receive window is amount of space in rcv queue,
1973 * but not less than advertised window.
1974 */
1975 win = sbspace(&so->so_rcv);
1976 if (win < 0)
1977 win = 0;
1978 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1979
1980 switch (tp->t_state) {
1981
1982 /*
1983 * If the state is SYN_RECEIVED:
1984 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1985 */
1986 case TCPS_SYN_RECEIVED:
1987 if ((thflags & TH_ACK) &&
1988 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1989 SEQ_GT(th->th_ack, tp->snd_max))) {
1990 rstreason = BANDLIM_RST_OPENPORT;
1991 goto dropwithreset;
1992 }
1993 #ifdef TCP_RFC7413
1994 if (tp->t_flags & TF_FASTOPEN) {
1995 /*
1996 * When a TFO connection is in SYN_RECEIVED, the
1997 * only valid packets are the initial SYN, a
1998 * retransmit/copy of the initial SYN (possibly with
1999 * a subset of the original data), a valid ACK, a
2000 * FIN, or a RST.
2001 */
2002 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
2003 rstreason = BANDLIM_RST_OPENPORT;
2004 goto dropwithreset;
2005 } else if (thflags & TH_SYN) {
2006 /* non-initial SYN is ignored */
2007 if ((tcp_timer_active(tp, TT_DELACK) ||
2008 tcp_timer_active(tp, TT_REXMT)))
2009 goto drop;
2010 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) {
2011 goto drop;
2012 }
2013 }
2014 #endif
2015 break;
2016
2017 /*
2018 * If the state is SYN_SENT:
2019 * if seg contains a RST, then drop the connection.
2020 * if seg does not contain SYN, then drop it.
2021 * Otherwise this is an acceptable SYN segment
2022 * initialize tp->rcv_nxt and tp->irs
2023 * if seg contains ack then advance tp->snd_una
2024 * if seg contains an ECE and ECN support is enabled, the stream
2025 * is ECN capable.
2026 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
2027 * arrange for segment to be acked (eventually)
2028 * continue processing rest of data/controls, beginning with URG
2029 */
2030 case TCPS_SYN_SENT:
2031 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
2032 TCP_PROBE5(connect__refused, NULL, tp,
2033 m, tp, th);
2034 tp = tcp_drop(tp, ECONNREFUSED);
2035 }
2036 if (thflags & TH_RST)
2037 goto drop;
2038 if (!(thflags & TH_SYN))
2039 goto drop;
2040
2041 tp->irs = th->th_seq;
2042 tcp_rcvseqinit(tp);
2043 if (thflags & TH_ACK) {
2044 TCPSTAT_INC(tcps_connects);
2045 soisconnected(so);
2046 #ifdef MAC
2047 mac_socketpeer_set_from_mbuf(m, so);
2048 #endif
2049 /* Do window scaling on this connection? */
2050 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2051 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2052 tp->rcv_scale = tp->request_r_scale;
2053 }
2054 tp->rcv_adv += imin(tp->rcv_wnd,
2055 TCP_MAXWIN << tp->rcv_scale);
2056 tp->snd_una++; /* SYN is acked */
2057 /*
2058 * If there's data, delay ACK; if there's also a FIN
2059 * ACKNOW will be turned on later.
2060 */
2061 if (DELAY_ACK(tp, tlen) && tlen != 0)
2062 tcp_timer_activate(tp, TT_DELACK,
2063 tcp_delacktime);
2064 else
2065 tp->t_flags |= TF_ACKNOW;
2066
2067 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
2068 tp->t_flags |= TF_ECN_PERMIT;
2069 TCPSTAT_INC(tcps_ecn_shs);
2070 }
2071
2072 /*
2073 * Received <SYN,ACK> in SYN_SENT[*] state.
2074 * Transitions:
2075 * SYN_SENT --> ESTABLISHED
2076 * SYN_SENT* --> FIN_WAIT_1
2077 */
2078 tp->t_starttime = ticks;
2079 if (tp->t_flags & TF_NEEDFIN) {
2080 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2081 tp->t_flags &= ~TF_NEEDFIN;
2082 thflags &= ~TH_SYN;
2083 } else {
2084 tcp_state_change(tp, TCPS_ESTABLISHED);
2085 TCP_PROBE5(connect__established, NULL, tp,
2086 m, tp, th);
2087 cc_conn_init(tp);
2088 tcp_timer_activate(tp, TT_KEEP,
2089 TP_KEEPIDLE(tp));
2090 }
2091 } else {
2092 /*
2093 * Received initial SYN in SYN-SENT[*] state =>
2094 * simultaneous open.
2095 * If it succeeds, connection is * half-synchronized.
2096 * Otherwise, do 3-way handshake:
2097 * SYN-SENT -> SYN-RECEIVED
2098 * SYN-SENT* -> SYN-RECEIVED*
2099 */
2100 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
2101 tcp_timer_activate(tp, TT_REXMT, 0);
2102 tcp_state_change(tp, TCPS_SYN_RECEIVED);
2103 }
2104
2105 KASSERT(ti_locked == TI_RLOCKED, ("%s: trimthenstep6: "
2106 "ti_locked %d", __func__, ti_locked));
2107 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2108 INP_WLOCK_ASSERT(tp->t_inpcb);
2109
2110 /*
2111 * Advance th->th_seq to correspond to first data byte.
2112 * If data, trim to stay within window,
2113 * dropping FIN if necessary.
2114 */
2115 th->th_seq++;
2116 if (tlen > tp->rcv_wnd) {
2117 todrop = tlen - tp->rcv_wnd;
2118 m_adj(m, -todrop);
2119 tlen = tp->rcv_wnd;
2120 thflags &= ~TH_FIN;
2121 TCPSTAT_INC(tcps_rcvpackafterwin);
2122 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2123 }
2124 tp->snd_wl1 = th->th_seq - 1;
2125 tp->rcv_up = th->th_seq;
2126 /*
2127 * Client side of transaction: already sent SYN and data.
2128 * If the remote host used T/TCP to validate the SYN,
2129 * our data will be ACK'd; if so, enter normal data segment
2130 * processing in the middle of step 5, ack processing.
2131 * Otherwise, goto step 6.
2132 */
2133 if (thflags & TH_ACK)
2134 goto process_ACK;
2135
2136 goto step6;
2137
2138 /*
2139 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2140 * do normal processing.
2141 *
2142 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2143 */
2144 case TCPS_LAST_ACK:
2145 case TCPS_CLOSING:
2146 break; /* continue normal processing */
2147 }
2148
2149 /*
2150 * States other than LISTEN or SYN_SENT.
2151 * First check the RST flag and sequence number since reset segments
2152 * are exempt from the timestamp and connection count tests. This
2153 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2154 * below which allowed reset segments in half the sequence space
2155 * to fall though and be processed (which gives forged reset
2156 * segments with a random sequence number a 50 percent chance of
2157 * killing a connection).
2158 * Then check timestamp, if present.
2159 * Then check the connection count, if present.
2160 * Then check that at least some bytes of segment are within
2161 * receive window. If segment begins before rcv_nxt,
2162 * drop leading data (and SYN); if nothing left, just ack.
2163 */
2164 if (thflags & TH_RST) {
2165 /*
2166 * RFC5961 Section 3.2
2167 *
2168 * - RST drops connection only if SEG.SEQ == RCV.NXT.
2169 * - If RST is in window, we send challenge ACK.
2170 *
2171 * Note: to take into account delayed ACKs, we should
2172 * test against last_ack_sent instead of rcv_nxt.
2173 * Note 2: we handle special case of closed window, not
2174 * covered by the RFC.
2175 */
2176 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2177 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
2178 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
2179
2180 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2181 KASSERT(ti_locked == TI_RLOCKED,
2182 ("%s: TH_RST ti_locked %d, th %p tp %p",
2183 __func__, ti_locked, th, tp));
2184 KASSERT(tp->t_state != TCPS_SYN_SENT,
2185 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
2186 __func__, th, tp));
2187
2188 if (V_tcp_insecure_rst ||
2189 tp->last_ack_sent == th->th_seq) {
2190 TCPSTAT_INC(tcps_drops);
2191 /* Drop the connection. */
2192 switch (tp->t_state) {
2193 case TCPS_SYN_RECEIVED:
2194 so->so_error = ECONNREFUSED;
2195 goto close;
2196 case TCPS_ESTABLISHED:
2197 case TCPS_FIN_WAIT_1:
2198 case TCPS_FIN_WAIT_2:
2199 case TCPS_CLOSE_WAIT:
2200 case TCPS_CLOSING:
2201 case TCPS_LAST_ACK:
2202 so->so_error = ECONNRESET;
2203 close:
2204 /* FALLTHROUGH */
2205 default:
2206 tp = tcp_close(tp);
2207 }
2208 } else {
2209 TCPSTAT_INC(tcps_badrst);
2210 /* Send challenge ACK. */
2211 tcp_respond(tp, mtod(m, void *), th, m,
2212 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
2213 tp->last_ack_sent = tp->rcv_nxt;
2214 m = NULL;
2215 }
2216 }
2217 goto drop;
2218 }
2219
2220 /*
2221 * RFC5961 Section 4.2
2222 * Send challenge ACK for any SYN in synchronized state.
2223 */
2224 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT &&
2225 tp->t_state != TCPS_SYN_RECEIVED) {
2226 KASSERT(ti_locked == TI_RLOCKED,
2227 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2228 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2229
2230 TCPSTAT_INC(tcps_badsyn);
2231 if (V_tcp_insecure_syn &&
2232 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2233 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2234 tp = tcp_drop(tp, ECONNRESET);
2235 rstreason = BANDLIM_UNLIMITED;
2236 } else {
2237 /* Send challenge ACK. */
2238 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
2239 tp->snd_nxt, TH_ACK);
2240 tp->last_ack_sent = tp->rcv_nxt;
2241 m = NULL;
2242 }
2243 goto drop;
2244 }
2245
2246 /*
2247 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2248 * and it's less than ts_recent, drop it.
2249 */
2250 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2251 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2252
2253 /* Check to see if ts_recent is over 24 days old. */
2254 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2255 /*
2256 * Invalidate ts_recent. If this segment updates
2257 * ts_recent, the age will be reset later and ts_recent
2258 * will get a valid value. If it does not, setting
2259 * ts_recent to zero will at least satisfy the
2260 * requirement that zero be placed in the timestamp
2261 * echo reply when ts_recent isn't valid. The
2262 * age isn't reset until we get a valid ts_recent
2263 * because we don't want out-of-order segments to be
2264 * dropped when ts_recent is old.
2265 */
2266 tp->ts_recent = 0;
2267 } else {
2268 TCPSTAT_INC(tcps_rcvduppack);
2269 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2270 TCPSTAT_INC(tcps_pawsdrop);
2271 if (tlen)
2272 goto dropafterack;
2273 goto drop;
2274 }
2275 }
2276
2277 /*
2278 * In the SYN-RECEIVED state, validate that the packet belongs to
2279 * this connection before trimming the data to fit the receive
2280 * window. Check the sequence number versus IRS since we know
2281 * the sequence numbers haven't wrapped. This is a partial fix
2282 * for the "LAND" DoS attack.
2283 */
2284 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2285 rstreason = BANDLIM_RST_OPENPORT;
2286 goto dropwithreset;
2287 }
2288
2289 todrop = tp->rcv_nxt - th->th_seq;
2290 if (todrop > 0) {
2291 if (thflags & TH_SYN) {
2292 thflags &= ~TH_SYN;
2293 th->th_seq++;
2294 if (th->th_urp > 1)
2295 th->th_urp--;
2296 else
2297 thflags &= ~TH_URG;
2298 todrop--;
2299 }
2300 /*
2301 * Following if statement from Stevens, vol. 2, p. 960.
2302 */
2303 if (todrop > tlen
2304 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2305 /*
2306 * Any valid FIN must be to the left of the window.
2307 * At this point the FIN must be a duplicate or out
2308 * of sequence; drop it.
2309 */
2310 thflags &= ~TH_FIN;
2311
2312 /*
2313 * Send an ACK to resynchronize and drop any data.
2314 * But keep on processing for RST or ACK.
2315 */
2316 tp->t_flags |= TF_ACKNOW;
2317 todrop = tlen;
2318 TCPSTAT_INC(tcps_rcvduppack);
2319 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2320 } else {
2321 TCPSTAT_INC(tcps_rcvpartduppack);
2322 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2323 }
2324 /*
2325 * DSACK - add SACK block for dropped range
2326 */
2327 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) {
2328 tcp_update_sack_list(tp, th->th_seq,
2329 th->th_seq + todrop);
2330 /*
2331 * ACK now, as the next in-sequence segment
2332 * will clear the DSACK block again
2333 */
2334 tp->t_flags |= TF_ACKNOW;
2335 }
2336 drop_hdrlen += todrop; /* drop from the top afterwards */
2337 th->th_seq += todrop;
2338 tlen -= todrop;
2339 if (th->th_urp > todrop)
2340 th->th_urp -= todrop;
2341 else {
2342 thflags &= ~TH_URG;
2343 th->th_urp = 0;
2344 }
2345 }
2346
2347 /*
2348 * If new data are received on a connection after the
2349 * user processes are gone, then RST the other end.
2350 */
2351 if ((so->so_state & SS_NOFDREF) &&
2352 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2353 KASSERT(ti_locked == TI_RLOCKED, ("%s: SS_NOFDEREF && "
2354 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2355 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2356
2357 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2358 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2359 "after socket was closed, "
2360 "sending RST and removing tcpcb\n",
2361 s, __func__, tcpstates[tp->t_state], tlen);
2362 free(s, M_TCPLOG);
2363 }
2364 tp = tcp_close(tp);
2365 TCPSTAT_INC(tcps_rcvafterclose);
2366 rstreason = BANDLIM_UNLIMITED;
2367 goto dropwithreset;
2368 }
2369
2370 /*
2371 * If segment ends after window, drop trailing data
2372 * (and PUSH and FIN); if nothing left, just ACK.
2373 */
2374 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2375 if (todrop > 0) {
2376 TCPSTAT_INC(tcps_rcvpackafterwin);
2377 if (todrop >= tlen) {
2378 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2379 /*
2380 * If window is closed can only take segments at
2381 * window edge, and have to drop data and PUSH from
2382 * incoming segments. Continue processing, but
2383 * remember to ack. Otherwise, drop segment
2384 * and ack.
2385 */
2386 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2387 tp->t_flags |= TF_ACKNOW;
2388 TCPSTAT_INC(tcps_rcvwinprobe);
2389 } else
2390 goto dropafterack;
2391 } else
2392 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2393 m_adj(m, -todrop);
2394 tlen -= todrop;
2395 thflags &= ~(TH_PUSH|TH_FIN);
2396 }
2397
2398 /*
2399 * If last ACK falls within this segment's sequence numbers,
2400 * record its timestamp.
2401 * NOTE:
2402 * 1) That the test incorporates suggestions from the latest
2403 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2404 * 2) That updating only on newer timestamps interferes with
2405 * our earlier PAWS tests, so this check should be solely
2406 * predicated on the sequence space of this segment.
2407 * 3) That we modify the segment boundary check to be
2408 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2409 * instead of RFC1323's
2410 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2411 * This modified check allows us to overcome RFC1323's
2412 * limitations as described in Stevens TCP/IP Illustrated
2413 * Vol. 2 p.869. In such cases, we can still calculate the
2414 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2415 */
2416 if ((to.to_flags & TOF_TS) != 0 &&
2417 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2418 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2419 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2420 tp->ts_recent_age = tcp_ts_getticks();
2421 tp->ts_recent = to.to_tsval;
2422 }
2423
2424 /*
2425 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2426 * flag is on (half-synchronized state), then queue data for
2427 * later processing; else drop segment and return.
2428 */
2429 if ((thflags & TH_ACK) == 0) {
2430 if (tp->t_state == TCPS_SYN_RECEIVED ||
2431 (tp->t_flags & TF_NEEDSYN)) {
2432 #ifdef TCP_RFC7413
2433 if (tp->t_state == TCPS_SYN_RECEIVED &&
2434 tp->t_flags & TF_FASTOPEN) {
2435 tp->snd_wnd = tiwin;
2436 cc_conn_init(tp);
2437 }
2438 #endif
2439 goto step6;
2440 } else if (tp->t_flags & TF_ACKNOW)
2441 goto dropafterack;
2442 else
2443 goto drop;
2444 }
2445
2446 /*
2447 * Ack processing.
2448 */
2449 switch (tp->t_state) {
2450
2451 /*
2452 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2453 * ESTABLISHED state and continue processing.
2454 * The ACK was checked above.
2455 */
2456 case TCPS_SYN_RECEIVED:
2457
2458 TCPSTAT_INC(tcps_connects);
2459 soisconnected(so);
2460 /* Do window scaling? */
2461 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2462 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2463 tp->rcv_scale = tp->request_r_scale;
2464 tp->snd_wnd = tiwin;
2465 }
2466 /*
2467 * Make transitions:
2468 * SYN-RECEIVED -> ESTABLISHED
2469 * SYN-RECEIVED* -> FIN-WAIT-1
2470 */
2471 tp->t_starttime = ticks;
2472 if (tp->t_flags & TF_NEEDFIN) {
2473 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2474 tp->t_flags &= ~TF_NEEDFIN;
2475 } else {
2476 tcp_state_change(tp, TCPS_ESTABLISHED);
2477 TCP_PROBE5(accept__established, NULL, tp,
2478 m, tp, th);
2479 #ifdef TCP_RFC7413
2480 if (tp->t_tfo_pending) {
2481 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2482 tp->t_tfo_pending = NULL;
2483
2484 /*
2485 * Account for the ACK of our SYN prior to
2486 * regular ACK processing below.
2487 */
2488 tp->snd_una++;
2489 }
2490 /*
2491 * TFO connections call cc_conn_init() during SYN
2492 * processing. Calling it again here for such
2493 * connections is not harmless as it would undo the
2494 * snd_cwnd reduction that occurs when a TFO SYN|ACK
2495 * is retransmitted.
2496 */
2497 if (!(tp->t_flags & TF_FASTOPEN))
2498 #endif
2499 cc_conn_init(tp);
2500 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2501 }
2502 /*
2503 * If segment contains data or ACK, will call tcp_reass()
2504 * later; if not, do so now to pass queued data to user.
2505 */
2506 if (tlen == 0 && (thflags & TH_FIN) == 0)
2507 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
2508 (struct mbuf *)0);
2509 tp->snd_wl1 = th->th_seq - 1;
2510 /* FALLTHROUGH */
2511
2512 /*
2513 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2514 * ACKs. If the ack is in the range
2515 * tp->snd_una < th->th_ack <= tp->snd_max
2516 * then advance tp->snd_una to th->th_ack and drop
2517 * data from the retransmission queue. If this ACK reflects
2518 * more up to date window information we update our window information.
2519 */
2520 case TCPS_ESTABLISHED:
2521 case TCPS_FIN_WAIT_1:
2522 case TCPS_FIN_WAIT_2:
2523 case TCPS_CLOSE_WAIT:
2524 case TCPS_CLOSING:
2525 case TCPS_LAST_ACK:
2526 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2527 TCPSTAT_INC(tcps_rcvacktoomuch);
2528 goto dropafterack;
2529 }
2530 if ((tp->t_flags & TF_SACK_PERMIT) &&
2531 ((to.to_flags & TOF_SACK) ||
2532 !TAILQ_EMPTY(&tp->snd_holes)))
2533 sack_changed = tcp_sack_doack(tp, &to, th->th_ack);
2534 else
2535 /*
2536 * Reset the value so that previous (valid) value
2537 * from the last ack with SACK doesn't get used.
2538 */
2539 tp->sackhint.sacked_bytes = 0;
2540
2541 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2542 hhook_run_tcp_est_in(tp, th, &to);
2543
2544 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2545 u_int maxseg;
2546
2547 maxseg = tcp_maxseg(tp);
2548 if (tlen == 0 &&
2549 (tiwin == tp->snd_wnd ||
2550 (tp->t_flags & TF_SACK_PERMIT))) {
2551 /*
2552 * If this is the first time we've seen a
2553 * FIN from the remote, this is not a
2554 * duplicate and it needs to be processed
2555 * normally. This happens during a
2556 * simultaneous close.
2557 */
2558 if ((thflags & TH_FIN) &&
2559 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2560 tp->t_dupacks = 0;
2561 break;
2562 }
2563 TCPSTAT_INC(tcps_rcvdupack);
2564 /*
2565 * If we have outstanding data (other than
2566 * a window probe), this is a completely
2567 * duplicate ack (ie, window info didn't
2568 * change and FIN isn't set),
2569 * the ack is the biggest we've
2570 * seen and we've seen exactly our rexmt
2571 * threshold of them, assume a packet
2572 * has been dropped and retransmit it.
2573 * Kludge snd_nxt & the congestion
2574 * window so we send only this one
2575 * packet.
2576 *
2577 * We know we're losing at the current
2578 * window size so do congestion avoidance
2579 * (set ssthresh to half the current window
2580 * and pull our congestion window back to
2581 * the new ssthresh).
2582 *
2583 * Dup acks mean that packets have left the
2584 * network (they're now cached at the receiver)
2585 * so bump cwnd by the amount in the receiver
2586 * to keep a constant cwnd packets in the
2587 * network.
2588 *
2589 * When using TCP ECN, notify the peer that
2590 * we reduced the cwnd.
2591 */
2592 /*
2593 * Following 2 kinds of acks should not affect
2594 * dupack counting:
2595 * 1) Old acks
2596 * 2) Acks with SACK but without any new SACK
2597 * information in them. These could result from
2598 * any anomaly in the network like a switch
2599 * duplicating packets or a possible DoS attack.
2600 */
2601 if (th->th_ack != tp->snd_una ||
2602 ((tp->t_flags & TF_SACK_PERMIT) &&
2603 !sack_changed))
2604 break;
2605 else if (!tcp_timer_active(tp, TT_REXMT))
2606 tp->t_dupacks = 0;
2607 else if (++tp->t_dupacks > tcprexmtthresh ||
2608 IN_FASTRECOVERY(tp->t_flags)) {
2609 cc_ack_received(tp, th, CC_DUPACK);
2610 if ((tp->t_flags & TF_SACK_PERMIT) &&
2611 IN_FASTRECOVERY(tp->t_flags)) {
2612 int awnd;
2613
2614 /*
2615 * Compute the amount of data in flight first.
2616 * We can inject new data into the pipe iff
2617 * we have less than 1/2 the original window's
2618 * worth of data in flight.
2619 */
2620 if (V_tcp_do_rfc6675_pipe)
2621 awnd = tcp_compute_pipe(tp);
2622 else
2623 awnd = (tp->snd_nxt - tp->snd_fack) +
2624 tp->sackhint.sack_bytes_rexmit;
2625
2626 if (awnd < tp->snd_ssthresh) {
2627 tp->snd_cwnd += maxseg;
2628 if (tp->snd_cwnd > tp->snd_ssthresh)
2629 tp->snd_cwnd = tp->snd_ssthresh;
2630 }
2631 } else
2632 tp->snd_cwnd += maxseg;
2633 (void) tp->t_fb->tfb_tcp_output(tp);
2634 goto drop;
2635 } else if (tp->t_dupacks == tcprexmtthresh) {
2636 tcp_seq onxt = tp->snd_nxt;
2637
2638 /*
2639 * If we're doing sack, check to
2640 * see if we're already in sack
2641 * recovery. If we're not doing sack,
2642 * check to see if we're in newreno
2643 * recovery.
2644 */
2645 if (tp->t_flags & TF_SACK_PERMIT) {
2646 if (IN_FASTRECOVERY(tp->t_flags)) {
2647 tp->t_dupacks = 0;
2648 break;
2649 }
2650 } else {
2651 if (SEQ_LEQ(th->th_ack,
2652 tp->snd_recover)) {
2653 tp->t_dupacks = 0;
2654 break;
2655 }
2656 }
2657 /* Congestion signal before ack. */
2658 cc_cong_signal(tp, th, CC_NDUPACK);
2659 cc_ack_received(tp, th, CC_DUPACK);
2660 tcp_timer_activate(tp, TT_REXMT, 0);
2661 tp->t_rtttime = 0;
2662 if (tp->t_flags & TF_SACK_PERMIT) {
2663 TCPSTAT_INC(
2664 tcps_sack_recovery_episode);
2665 tp->sack_newdata = tp->snd_nxt;
2666 tp->snd_cwnd = maxseg;
2667 (void) tp->t_fb->tfb_tcp_output(tp);
2668 goto drop;
2669 }
2670 tp->snd_nxt = th->th_ack;
2671 tp->snd_cwnd = maxseg;
2672 (void) tp->t_fb->tfb_tcp_output(tp);
2673 KASSERT(tp->snd_limited <= 2,
2674 ("%s: tp->snd_limited too big",
2675 __func__));
2676 tp->snd_cwnd = tp->snd_ssthresh +
2677 maxseg *
2678 (tp->t_dupacks - tp->snd_limited);
2679 if (SEQ_GT(onxt, tp->snd_nxt))
2680 tp->snd_nxt = onxt;
2681 goto drop;
2682 } else if (V_tcp_do_rfc3042) {
2683 /*
2684 * Process first and second duplicate
2685 * ACKs. Each indicates a segment
2686 * leaving the network, creating room
2687 * for more. Make sure we can send a
2688 * packet on reception of each duplicate
2689 * ACK by increasing snd_cwnd by one
2690 * segment. Restore the original
2691 * snd_cwnd after packet transmission.
2692 */
2693 cc_ack_received(tp, th, CC_DUPACK);
2694 u_long oldcwnd = tp->snd_cwnd;
2695 tcp_seq oldsndmax = tp->snd_max;
2696 u_int sent;
2697 int avail;
2698
2699 KASSERT(tp->t_dupacks == 1 ||
2700 tp->t_dupacks == 2,
2701 ("%s: dupacks not 1 or 2",
2702 __func__));
2703 if (tp->t_dupacks == 1)
2704 tp->snd_limited = 0;
2705 tp->snd_cwnd =
2706 (tp->snd_nxt - tp->snd_una) +
2707 (tp->t_dupacks - tp->snd_limited) *
2708 maxseg;
2709 /*
2710 * Only call tcp_output when there
2711 * is new data available to be sent.
2712 * Otherwise we would send pure ACKs.
2713 */
2714 SOCKBUF_LOCK(&so->so_snd);
2715 avail = sbavail(&so->so_snd) -
2716 (tp->snd_nxt - tp->snd_una);
2717 SOCKBUF_UNLOCK(&so->so_snd);
2718 if (avail > 0)
2719 (void) tp->t_fb->tfb_tcp_output(tp);
2720 sent = tp->snd_max - oldsndmax;
2721 if (sent > maxseg) {
2722 KASSERT((tp->t_dupacks == 2 &&
2723 tp->snd_limited == 0) ||
2724 (sent == maxseg + 1 &&
2725 tp->t_flags & TF_SENTFIN),
2726 ("%s: sent too much",
2727 __func__));
2728 tp->snd_limited = 2;
2729 } else if (sent > 0)
2730 ++tp->snd_limited;
2731 tp->snd_cwnd = oldcwnd;
2732 goto drop;
2733 }
2734 }
2735 break;
2736 } else {
2737 /*
2738 * This ack is advancing the left edge, reset the
2739 * counter.
2740 */
2741 tp->t_dupacks = 0;
2742 /*
2743 * If this ack also has new SACK info, increment the
2744 * counter as per rfc6675.
2745 */
2746 if ((tp->t_flags & TF_SACK_PERMIT) && sack_changed)
2747 tp->t_dupacks++;
2748 }
2749
2750 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2751 ("%s: th_ack <= snd_una", __func__));
2752
2753 /*
2754 * If the congestion window was inflated to account
2755 * for the other side's cached packets, retract it.
2756 */
2757 if (IN_FASTRECOVERY(tp->t_flags)) {
2758 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2759 if (tp->t_flags & TF_SACK_PERMIT)
2760 tcp_sack_partialack(tp, th);
2761 else
2762 tcp_newreno_partial_ack(tp, th);
2763 } else
2764 cc_post_recovery(tp, th);
2765 }
2766 /*
2767 * If we reach this point, ACK is not a duplicate,
2768 * i.e., it ACKs something we sent.
2769 */
2770 if (tp->t_flags & TF_NEEDSYN) {
2771 /*
2772 * T/TCP: Connection was half-synchronized, and our
2773 * SYN has been ACK'd (so connection is now fully
2774 * synchronized). Go to non-starred state,
2775 * increment snd_una for ACK of SYN, and check if
2776 * we can do window scaling.
2777 */
2778 tp->t_flags &= ~TF_NEEDSYN;
2779 tp->snd_una++;
2780 /* Do window scaling? */
2781 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2782 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2783 tp->rcv_scale = tp->request_r_scale;
2784 /* Send window already scaled. */
2785 }
2786 }
2787
2788 process_ACK:
2789 INP_WLOCK_ASSERT(tp->t_inpcb);
2790
2791 acked = BYTES_THIS_ACK(tp, th);
2792 KASSERT(acked >= 0, ("%s: acked unexepectedly negative "
2793 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__,
2794 tp->snd_una, th->th_ack, tp, m));
2795 TCPSTAT_INC(tcps_rcvackpack);
2796 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2797
2798 /*
2799 * If we just performed our first retransmit, and the ACK
2800 * arrives within our recovery window, then it was a mistake
2801 * to do the retransmit in the first place. Recover our
2802 * original cwnd and ssthresh, and proceed to transmit where
2803 * we left off.
2804 */
2805 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2806 (int)(ticks - tp->t_badrxtwin) < 0)
2807 cc_cong_signal(tp, th, CC_RTO_ERR);
2808
2809 /*
2810 * If we have a timestamp reply, update smoothed
2811 * round trip time. If no timestamp is present but
2812 * transmit timer is running and timed sequence
2813 * number was acked, update smoothed round trip time.
2814 * Since we now have an rtt measurement, cancel the
2815 * timer backoff (cf., Phil Karn's retransmit alg.).
2816 * Recompute the initial retransmit timer.
2817 *
2818 * Some boxes send broken timestamp replies
2819 * during the SYN+ACK phase, ignore
2820 * timestamps of 0 or we could calculate a
2821 * huge RTT and blow up the retransmit timer.
2822 */
2823 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2824 u_int t;
2825
2826 t = tcp_ts_getticks() - to.to_tsecr;
2827 if (!tp->t_rttlow || tp->t_rttlow > t)
2828 tp->t_rttlow = t;
2829 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2830 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2831 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2832 tp->t_rttlow = ticks - tp->t_rtttime;
2833 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2834 }
2835
2836 /*
2837 * If all outstanding data is acked, stop retransmit
2838 * timer and remember to restart (more output or persist).
2839 * If there is more data to be acked, restart retransmit
2840 * timer, using current (possibly backed-off) value.
2841 */
2842 if (th->th_ack == tp->snd_max) {
2843 tcp_timer_activate(tp, TT_REXMT, 0);
2844 needoutput = 1;
2845 } else if (!tcp_timer_active(tp, TT_PERSIST))
2846 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2847
2848 /*
2849 * If no data (only SYN) was ACK'd,
2850 * skip rest of ACK processing.
2851 */
2852 if (acked == 0)
2853 goto step6;
2854
2855 /*
2856 * Let the congestion control algorithm update congestion
2857 * control related information. This typically means increasing
2858 * the congestion window.
2859 */
2860 cc_ack_received(tp, th, CC_ACK);
2861
2862 SOCKBUF_LOCK(&so->so_snd);
2863 if (acked > sbavail(&so->so_snd)) {
2864 if (tp->snd_wnd >= sbavail(&so->so_snd))
2865 tp->snd_wnd -= sbavail(&so->so_snd);
2866 else
2867 tp->snd_wnd = 0;
2868 mfree = sbcut_locked(&so->so_snd,
2869 (int)sbavail(&so->so_snd));
2870 ourfinisacked = 1;
2871 } else {
2872 mfree = sbcut_locked(&so->so_snd, acked);
2873 if (tp->snd_wnd >= (u_long) acked)
2874 tp->snd_wnd -= acked;
2875 else
2876 tp->snd_wnd = 0;
2877 ourfinisacked = 0;
2878 }
2879 /* NB: sowwakeup_locked() does an implicit unlock. */
2880 sowwakeup_locked(so);
2881 m_freem(mfree);
2882 /* Detect una wraparound. */
2883 if (!IN_RECOVERY(tp->t_flags) &&
2884 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2885 SEQ_LEQ(th->th_ack, tp->snd_recover))
2886 tp->snd_recover = th->th_ack - 1;
2887 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2888 if (IN_RECOVERY(tp->t_flags) &&
2889 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2890 EXIT_RECOVERY(tp->t_flags);
2891 }
2892 tp->snd_una = th->th_ack;
2893 if (tp->t_flags & TF_SACK_PERMIT) {
2894 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2895 tp->snd_recover = tp->snd_una;
2896 }
2897 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2898 tp->snd_nxt = tp->snd_una;
2899
2900 switch (tp->t_state) {
2901
2902 /*
2903 * In FIN_WAIT_1 STATE in addition to the processing
2904 * for the ESTABLISHED state if our FIN is now acknowledged
2905 * then enter FIN_WAIT_2.
2906 */
2907 case TCPS_FIN_WAIT_1:
2908 if (ourfinisacked) {
2909 /*
2910 * If we can't receive any more
2911 * data, then closing user can proceed.
2912 * Starting the timer is contrary to the
2913 * specification, but if we don't get a FIN
2914 * we'll hang forever.
2915 *
2916 * XXXjl:
2917 * we should release the tp also, and use a
2918 * compressed state.
2919 */
2920 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2921 soisdisconnected(so);
2922 tcp_timer_activate(tp, TT_2MSL,
2923 (tcp_fast_finwait2_recycle ?
2924 tcp_finwait2_timeout :
2925 TP_MAXIDLE(tp)));
2926 }
2927 tcp_state_change(tp, TCPS_FIN_WAIT_2);
2928 }
2929 break;
2930
2931 /*
2932 * In CLOSING STATE in addition to the processing for
2933 * the ESTABLISHED state if the ACK acknowledges our FIN
2934 * then enter the TIME-WAIT state, otherwise ignore
2935 * the segment.
2936 */
2937 case TCPS_CLOSING:
2938 if (ourfinisacked) {
2939 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2940 tcp_twstart(tp);
2941 INP_INFO_RUNLOCK(&V_tcbinfo);
2942 m_freem(m);
2943 return;
2944 }
2945 break;
2946
2947 /*
2948 * In LAST_ACK, we may still be waiting for data to drain
2949 * and/or to be acked, as well as for the ack of our FIN.
2950 * If our FIN is now acknowledged, delete the TCB,
2951 * enter the closed state and return.
2952 */
2953 case TCPS_LAST_ACK:
2954 if (ourfinisacked) {
2955 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
2956 tp = tcp_close(tp);
2957 goto drop;
2958 }
2959 break;
2960 }
2961 }
2962
2963 step6:
2964 INP_WLOCK_ASSERT(tp->t_inpcb);
2965
2966 /*
2967 * Update window information.
2968 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2969 */
2970 if ((thflags & TH_ACK) &&
2971 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2972 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2973 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2974 /* keep track of pure window updates */
2975 if (tlen == 0 &&
2976 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2977 TCPSTAT_INC(tcps_rcvwinupd);
2978 tp->snd_wnd = tiwin;
2979 tp->snd_wl1 = th->th_seq;
2980 tp->snd_wl2 = th->th_ack;
2981 if (tp->snd_wnd > tp->max_sndwnd)
2982 tp->max_sndwnd = tp->snd_wnd;
2983 needoutput = 1;
2984 }
2985
2986 /*
2987 * Process segments with URG.
2988 */
2989 if ((thflags & TH_URG) && th->th_urp &&
2990 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2991 /*
2992 * This is a kludge, but if we receive and accept
2993 * random urgent pointers, we'll crash in
2994 * soreceive. It's hard to imagine someone
2995 * actually wanting to send this much urgent data.
2996 */
2997 SOCKBUF_LOCK(&so->so_rcv);
2998 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
2999 th->th_urp = 0; /* XXX */
3000 thflags &= ~TH_URG; /* XXX */
3001 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
3002 goto dodata; /* XXX */
3003 }
3004 /*
3005 * If this segment advances the known urgent pointer,
3006 * then mark the data stream. This should not happen
3007 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
3008 * a FIN has been received from the remote side.
3009 * In these states we ignore the URG.
3010 *
3011 * According to RFC961 (Assigned Protocols),
3012 * the urgent pointer points to the last octet
3013 * of urgent data. We continue, however,
3014 * to consider it to indicate the first octet
3015 * of data past the urgent section as the original
3016 * spec states (in one of two places).
3017 */
3018 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
3019 tp->rcv_up = th->th_seq + th->th_urp;
3020 so->so_oobmark = sbavail(&so->so_rcv) +
3021 (tp->rcv_up - tp->rcv_nxt) - 1;
3022 if (so->so_oobmark == 0)
3023 so->so_rcv.sb_state |= SBS_RCVATMARK;
3024 sohasoutofband(so);
3025 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
3026 }
3027 SOCKBUF_UNLOCK(&so->so_rcv);
3028 /*
3029 * Remove out of band data so doesn't get presented to user.
3030 * This can happen independent of advancing the URG pointer,
3031 * but if two URG's are pending at once, some out-of-band
3032 * data may creep in... ick.
3033 */
3034 if (th->th_urp <= (u_long)tlen &&
3035 !(so->so_options & SO_OOBINLINE)) {
3036 /* hdr drop is delayed */
3037 tcp_pulloutofband(so, th, m, drop_hdrlen);
3038 }
3039 } else {
3040 /*
3041 * If no out of band data is expected,
3042 * pull receive urgent pointer along
3043 * with the receive window.
3044 */
3045 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
3046 tp->rcv_up = tp->rcv_nxt;
3047 }
3048 dodata: /* XXX */
3049 INP_WLOCK_ASSERT(tp->t_inpcb);
3050
3051 /*
3052 * Process the segment text, merging it into the TCP sequencing queue,
3053 * and arranging for acknowledgment of receipt if necessary.
3054 * This process logically involves adjusting tp->rcv_wnd as data
3055 * is presented to the user (this happens in tcp_usrreq.c,
3056 * case PRU_RCVD). If a FIN has already been received on this
3057 * connection then we just ignore the text.
3058 */
3059 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
3060 (tp->t_flags & TF_FASTOPEN));
3061 if ((tlen || (thflags & TH_FIN) || tfo_syn) &&
3062 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3063 tcp_seq save_start = th->th_seq;
3064 tcp_seq save_rnxt = tp->rcv_nxt;
3065 int save_tlen = tlen;
3066 m_adj(m, drop_hdrlen); /* delayed header drop */
3067 /*
3068 * Insert segment which includes th into TCP reassembly queue
3069 * with control block tp. Set thflags to whether reassembly now
3070 * includes a segment with FIN. This handles the common case
3071 * inline (segment is the next to be received on an established
3072 * connection, and the queue is empty), avoiding linkage into
3073 * and removal from the queue and repetition of various
3074 * conversions.
3075 * Set DELACK for segments received in order, but ack
3076 * immediately when segments are out of order (so
3077 * fast retransmit can work).
3078 */
3079 if (th->th_seq == tp->rcv_nxt &&
3080 SEGQ_EMPTY(tp) &&
3081 (TCPS_HAVEESTABLISHED(tp->t_state) ||
3082 tfo_syn)) {
3083 if (DELAY_ACK(tp, tlen) || tfo_syn)
3084 tp->t_flags |= TF_DELACK;
3085 else
3086 tp->t_flags |= TF_ACKNOW;
3087 tp->rcv_nxt += tlen;
3088 thflags = th->th_flags & TH_FIN;
3089 TCPSTAT_INC(tcps_rcvpack);
3090 TCPSTAT_ADD(tcps_rcvbyte, tlen);
3091 SOCKBUF_LOCK(&so->so_rcv);
3092 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
3093 m_freem(m);
3094 else
3095 sbappendstream_locked(&so->so_rcv, m, 0);
3096 /* NB: sorwakeup_locked() does an implicit unlock. */
3097 sorwakeup_locked(so);
3098 } else {
3099 /*
3100 * XXX: Due to the header drop above "th" is
3101 * theoretically invalid by now. Fortunately
3102 * m_adj() doesn't actually frees any mbufs
3103 * when trimming from the head.
3104 */
3105 tcp_seq temp = save_start;
3106 thflags = tcp_reass(tp, th, &temp, &tlen, m);
3107 tp->t_flags |= TF_ACKNOW;
3108 }
3109 if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) {
3110 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
3111 /*
3112 * DSACK actually handled in the fastpath
3113 * above.
3114 */
3115 tcp_update_sack_list(tp, save_start,
3116 save_start + save_tlen);
3117 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
3118 if ((tp->rcv_numsacks >= 1) &&
3119 (tp->sackblks[0].end == save_start)) {
3120 /*
3121 * Partial overlap, recorded at todrop
3122 * above.
3123 */
3124 tcp_update_sack_list(tp,
3125 tp->sackblks[0].start,
3126 tp->sackblks[0].end);
3127 } else {
3128 tcp_update_dsack_list(tp, save_start,
3129 save_start + save_tlen);
3130 }
3131 } else if (tlen >= save_tlen) {
3132 /* Update of sackblks. */
3133 tcp_update_dsack_list(tp, save_start,
3134 save_start + save_tlen);
3135 } else if (tlen > 0) {
3136 tcp_update_dsack_list(tp, save_start,
3137 save_start + tlen);
3138 }
3139 }
3140 #if 0
3141 /*
3142 * Note the amount of data that peer has sent into
3143 * our window, in order to estimate the sender's
3144 * buffer size.
3145 * XXX: Unused.
3146 */
3147 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
3148 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
3149 else
3150 len = so->so_rcv.sb_hiwat;
3151 #endif
3152 } else {
3153 m_freem(m);
3154 thflags &= ~TH_FIN;
3155 }
3156
3157 /*
3158 * If FIN is received ACK the FIN and let the user know
3159 * that the connection is closing.
3160 */
3161 if (thflags & TH_FIN) {
3162 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3163 socantrcvmore(so);
3164 /*
3165 * If connection is half-synchronized
3166 * (ie NEEDSYN flag on) then delay ACK,
3167 * so it may be piggybacked when SYN is sent.
3168 * Otherwise, since we received a FIN then no
3169 * more input can be expected, send ACK now.
3170 */
3171 if (tp->t_flags & TF_NEEDSYN)
3172 tp->t_flags |= TF_DELACK;
3173 else
3174 tp->t_flags |= TF_ACKNOW;
3175 tp->rcv_nxt++;
3176 }
3177 switch (tp->t_state) {
3178
3179 /*
3180 * In SYN_RECEIVED and ESTABLISHED STATES
3181 * enter the CLOSE_WAIT state.
3182 */
3183 case TCPS_SYN_RECEIVED:
3184 tp->t_starttime = ticks;
3185 /* FALLTHROUGH */
3186 case TCPS_ESTABLISHED:
3187 tcp_state_change(tp, TCPS_CLOSE_WAIT);
3188 break;
3189
3190 /*
3191 * If still in FIN_WAIT_1 STATE FIN has not been acked so
3192 * enter the CLOSING state.
3193 */
3194 case TCPS_FIN_WAIT_1:
3195 tcp_state_change(tp, TCPS_CLOSING);
3196 break;
3197
3198 /*
3199 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3200 * starting the time-wait timer, turning off the other
3201 * standard timers.
3202 */
3203 case TCPS_FIN_WAIT_2:
3204 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
3205 KASSERT(ti_locked == TI_RLOCKED, ("%s: dodata "
3206 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
3207 ti_locked));
3208
3209 tcp_twstart(tp);
3210 INP_INFO_RUNLOCK(&V_tcbinfo);
3211 return;
3212 }
3213 }
3214 if (ti_locked == TI_RLOCKED)
3215 INP_INFO_RUNLOCK(&V_tcbinfo);
3216 ti_locked = TI_UNLOCKED;
3217
3218 #ifdef TCPDEBUG
3219 if (so->so_options & SO_DEBUG)
3220 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3221 &tcp_savetcp, 0);
3222 #endif
3223 TCP_PROBE3(debug__input, tp, th, m);
3224
3225 /*
3226 * Return any desired output.
3227 */
3228 if (needoutput || (tp->t_flags & TF_ACKNOW))
3229 (void) tp->t_fb->tfb_tcp_output(tp);
3230
3231 check_delack:
3232 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
3233 __func__, ti_locked));
3234 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3235 INP_WLOCK_ASSERT(tp->t_inpcb);
3236
3237 if (tp->t_flags & TF_DELACK) {
3238 tp->t_flags &= ~TF_DELACK;
3239 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3240 }
3241 INP_WUNLOCK(tp->t_inpcb);
3242 return;
3243
3244 dropafterack:
3245 /*
3246 * Generate an ACK dropping incoming segment if it occupies
3247 * sequence space, where the ACK reflects our state.
3248 *
3249 * We can now skip the test for the RST flag since all
3250 * paths to this code happen after packets containing
3251 * RST have been dropped.
3252 *
3253 * In the SYN-RECEIVED state, don't send an ACK unless the
3254 * segment we received passes the SYN-RECEIVED ACK test.
3255 * If it fails send a RST. This breaks the loop in the
3256 * "LAND" DoS attack, and also prevents an ACK storm
3257 * between two listening ports that have been sent forged
3258 * SYN segments, each with the source address of the other.
3259 */
3260 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3261 (SEQ_GT(tp->snd_una, th->th_ack) ||
3262 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3263 rstreason = BANDLIM_RST_OPENPORT;
3264 goto dropwithreset;
3265 }
3266 #ifdef TCPDEBUG
3267 if (so->so_options & SO_DEBUG)
3268 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3269 &tcp_savetcp, 0);
3270 #endif
3271 TCP_PROBE3(debug__input, tp, th, m);
3272 if (ti_locked == TI_RLOCKED)
3273 INP_INFO_RUNLOCK(&V_tcbinfo);
3274 ti_locked = TI_UNLOCKED;
3275
3276 tp->t_flags |= TF_ACKNOW;
3277 (void) tp->t_fb->tfb_tcp_output(tp);
3278 INP_WUNLOCK(tp->t_inpcb);
3279 m_freem(m);
3280 return;
3281
3282 dropwithreset:
3283 if (ti_locked == TI_RLOCKED)
3284 INP_INFO_RUNLOCK(&V_tcbinfo);
3285 ti_locked = TI_UNLOCKED;
3286
3287 if (tp != NULL) {
3288 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3289 INP_WUNLOCK(tp->t_inpcb);
3290 } else
3291 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3292 return;
3293
3294 drop:
3295 if (ti_locked == TI_RLOCKED) {
3296 INP_INFO_RUNLOCK(&V_tcbinfo);
3297 ti_locked = TI_UNLOCKED;
3298 }
3299 #ifdef INVARIANTS
3300 else
3301 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3302 #endif
3303
3304 /*
3305 * Drop space held by incoming segment and return.
3306 */
3307 #ifdef TCPDEBUG
3308 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3309 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3310 &tcp_savetcp, 0);
3311 #endif
3312 TCP_PROBE3(debug__input, tp, th, m);
3313 if (tp != NULL)
3314 INP_WUNLOCK(tp->t_inpcb);
3315 m_freem(m);
3316 }
3317
3318 /*
3319 * Issue RST and make ACK acceptable to originator of segment.
3320 * The mbuf must still include the original packet header.
3321 * tp may be NULL.
3322 */
3323 void
3324 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3325 int tlen, int rstreason)
3326 {
3327 #ifdef INET
3328 struct ip *ip;
3329 #endif
3330 #ifdef INET6
3331 struct ip6_hdr *ip6;
3332 #endif
3333
3334 if (tp != NULL) {
3335 INP_WLOCK_ASSERT(tp->t_inpcb);
3336 }
3337
3338 /* Don't bother if destination was broadcast/multicast. */
3339 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3340 goto drop;
3341 #ifdef INET6
3342 if (mtod(m, struct ip *)->ip_v == 6) {
3343 ip6 = mtod(m, struct ip6_hdr *);
3344 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3345 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3346 goto drop;
3347 /* IPv6 anycast check is done at tcp6_input() */
3348 }
3349 #endif
3350 #if defined(INET) && defined(INET6)
3351 else
3352 #endif
3353 #ifdef INET
3354 {
3355 ip = mtod(m, struct ip *);
3356 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3357 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3358 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3359 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3360 goto drop;
3361 }
3362 #endif
3363
3364 /* Perform bandwidth limiting. */
3365 if (badport_bandlim(rstreason) < 0)
3366 goto drop;
3367
3368 /* tcp_respond consumes the mbuf chain. */
3369 if (th->th_flags & TH_ACK) {
3370 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3371 th->th_ack, TH_RST);
3372 } else {
3373 if (th->th_flags & TH_SYN)
3374 tlen++;
3375 if (th->th_flags & TH_FIN)
3376 tlen++;
3377 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3378 (tcp_seq)0, TH_RST|TH_ACK);
3379 }
3380 return;
3381 drop:
3382 m_freem(m);
3383 }
3384
3385 /*
3386 * Parse TCP options and place in tcpopt.
3387 */
3388 void
3389 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3390 {
3391 int opt, optlen;
3392
3393 to->to_flags = 0;
3394 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3395 opt = cp[0];
3396 if (opt == TCPOPT_EOL)
3397 break;
3398 if (opt == TCPOPT_NOP)
3399 optlen = 1;
3400 else {
3401 if (cnt < 2)
3402 break;
3403 optlen = cp[1];
3404 if (optlen < 2 || optlen > cnt)
3405 break;
3406 }
3407 switch (opt) {
3408 case TCPOPT_MAXSEG:
3409 if (optlen != TCPOLEN_MAXSEG)
3410 continue;
3411 if (!(flags & TO_SYN))
3412 continue;
3413 to->to_flags |= TOF_MSS;
3414 bcopy((char *)cp + 2,
3415 (char *)&to->to_mss, sizeof(to->to_mss));
3416 to->to_mss = ntohs(to->to_mss);
3417 break;
3418 case TCPOPT_WINDOW:
3419 if (optlen != TCPOLEN_WINDOW)
3420 continue;
3421 if (!(flags & TO_SYN))
3422 continue;
3423 to->to_flags |= TOF_SCALE;
3424 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3425 break;
3426 case TCPOPT_TIMESTAMP:
3427 if (optlen != TCPOLEN_TIMESTAMP)
3428 continue;
3429 to->to_flags |= TOF_TS;
3430 bcopy((char *)cp + 2,
3431 (char *)&to->to_tsval, sizeof(to->to_tsval));
3432 to->to_tsval = ntohl(to->to_tsval);
3433 bcopy((char *)cp + 6,
3434 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3435 to->to_tsecr = ntohl(to->to_tsecr);
3436 break;
3437 case TCPOPT_SIGNATURE:
3438 /*
3439 * In order to reply to a host which has set the
3440 * TCP_SIGNATURE option in its initial SYN, we have
3441 * to record the fact that the option was observed
3442 * here for the syncache code to perform the correct
3443 * response.
3444 */
3445 if (optlen != TCPOLEN_SIGNATURE)
3446 continue;
3447 to->to_flags |= TOF_SIGNATURE;
3448 to->to_signature = cp + 2;
3449 break;
3450 case TCPOPT_SACK_PERMITTED:
3451 if (optlen != TCPOLEN_SACK_PERMITTED)
3452 continue;
3453 if (!(flags & TO_SYN))
3454 continue;
3455 if (!V_tcp_do_sack)
3456 continue;
3457 to->to_flags |= TOF_SACKPERM;
3458 break;
3459 case TCPOPT_SACK:
3460 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3461 continue;
3462 if (flags & TO_SYN)
3463 continue;
3464 to->to_flags |= TOF_SACK;
3465 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3466 to->to_sacks = cp + 2;
3467 TCPSTAT_INC(tcps_sack_rcv_blocks);
3468 break;
3469 #ifdef TCP_RFC7413
3470 case TCPOPT_FAST_OPEN:
3471 if ((optlen != TCPOLEN_FAST_OPEN_EMPTY) &&
3472 (optlen < TCPOLEN_FAST_OPEN_MIN) &&
3473 (optlen > TCPOLEN_FAST_OPEN_MAX))
3474 continue;
3475 if (!(flags & TO_SYN))
3476 continue;
3477 if (!V_tcp_fastopen_enabled)
3478 continue;
3479 to->to_flags |= TOF_FASTOPEN;
3480 to->to_tfo_len = optlen - 2;
3481 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL;
3482 break;
3483 #endif
3484 default:
3485 continue;
3486 }
3487 }
3488 }
3489
3490 /*
3491 * Pull out of band byte out of a segment so
3492 * it doesn't appear in the user's data queue.
3493 * It is still reflected in the segment length for
3494 * sequencing purposes.
3495 */
3496 void
3497 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3498 int off)
3499 {
3500 int cnt = off + th->th_urp - 1;
3501
3502 while (cnt >= 0) {
3503 if (m->m_len > cnt) {
3504 char *cp = mtod(m, caddr_t) + cnt;
3505 struct tcpcb *tp = sototcpcb(so);
3506
3507 INP_WLOCK_ASSERT(tp->t_inpcb);
3508
3509 tp->t_iobc = *cp;
3510 tp->t_oobflags |= TCPOOB_HAVEDATA;
3511 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3512 m->m_len--;
3513 if (m->m_flags & M_PKTHDR)
3514 m->m_pkthdr.len--;
3515 return;
3516 }
3517 cnt -= m->m_len;
3518 m = m->m_next;
3519 if (m == NULL)
3520 break;
3521 }
3522 panic("tcp_pulloutofband");
3523 }
3524
3525 /*
3526 * Collect new round-trip time estimate
3527 * and update averages and current timeout.
3528 */
3529 void
3530 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3531 {
3532 int delta;
3533
3534 INP_WLOCK_ASSERT(tp->t_inpcb);
3535
3536 TCPSTAT_INC(tcps_rttupdated);
3537 tp->t_rttupdated++;
3538 if (tp->t_srtt != 0) {
3539 /*
3540 * srtt is stored as fixed point with 5 bits after the
3541 * binary point (i.e., scaled by 8). The following magic
3542 * is equivalent to the smoothing algorithm in rfc793 with
3543 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3544 * point). Adjust rtt to origin 0.
3545 */
3546 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3547 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3548
3549 if ((tp->t_srtt += delta) <= 0)
3550 tp->t_srtt = 1;
3551
3552 /*
3553 * We accumulate a smoothed rtt variance (actually, a
3554 * smoothed mean difference), then set the retransmit
3555 * timer to smoothed rtt + 4 times the smoothed variance.
3556 * rttvar is stored as fixed point with 4 bits after the
3557 * binary point (scaled by 16). The following is
3558 * equivalent to rfc793 smoothing with an alpha of .75
3559 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3560 * rfc793's wired-in beta.
3561 */
3562 if (delta < 0)
3563 delta = -delta;
3564 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3565 if ((tp->t_rttvar += delta) <= 0)
3566 tp->t_rttvar = 1;
3567 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3568 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3569 } else {
3570 /*
3571 * No rtt measurement yet - use the unsmoothed rtt.
3572 * Set the variance to half the rtt (so our first
3573 * retransmit happens at 3*rtt).
3574 */
3575 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3576 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3577 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3578 }
3579 tp->t_rtttime = 0;
3580 tp->t_rxtshift = 0;
3581
3582 /*
3583 * the retransmit should happen at rtt + 4 * rttvar.
3584 * Because of the way we do the smoothing, srtt and rttvar
3585 * will each average +1/2 tick of bias. When we compute
3586 * the retransmit timer, we want 1/2 tick of rounding and
3587 * 1 extra tick because of +-1/2 tick uncertainty in the
3588 * firing of the timer. The bias will give us exactly the
3589 * 1.5 tick we need. But, because the bias is
3590 * statistical, we have to test that we don't drop below
3591 * the minimum feasible timer (which is 2 ticks).
3592 */
3593 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3594 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3595
3596 /*
3597 * We received an ack for a packet that wasn't retransmitted;
3598 * it is probably safe to discard any error indications we've
3599 * received recently. This isn't quite right, but close enough
3600 * for now (a route might have failed after we sent a segment,
3601 * and the return path might not be symmetrical).
3602 */
3603 tp->t_softerror = 0;
3604 }
3605
3606 /*
3607 * Determine a reasonable value for maxseg size.
3608 * If the route is known, check route for mtu.
3609 * If none, use an mss that can be handled on the outgoing interface
3610 * without forcing IP to fragment. If no route is found, route has no mtu,
3611 * or the destination isn't local, use a default, hopefully conservative
3612 * size (usually 512 or the default IP max size, but no more than the mtu
3613 * of the interface), as we can't discover anything about intervening
3614 * gateways or networks. We also initialize the congestion/slow start
3615 * window to be a single segment if the destination isn't local.
3616 * While looking at the routing entry, we also initialize other path-dependent
3617 * parameters from pre-set or cached values in the routing entry.
3618 *
3619 * NOTE that resulting t_maxseg doesn't include space for TCP options or
3620 * IP options, e.g. IPSEC data, since length of this data may vary, and
3621 * thus it is calculated for every segment separately in tcp_output().
3622 *
3623 * NOTE that this routine is only called when we process an incoming
3624 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3625 * settings are handled in tcp_mssopt().
3626 */
3627 void
3628 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3629 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3630 {
3631 int mss = 0;
3632 u_long maxmtu = 0;
3633 struct inpcb *inp = tp->t_inpcb;
3634 struct hc_metrics_lite metrics;
3635 #ifdef INET6
3636 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3637 size_t min_protoh = isipv6 ?
3638 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3639 sizeof (struct tcpiphdr);
3640 #else
3641 const size_t min_protoh = sizeof(struct tcpiphdr);
3642 #endif
3643
3644 INP_WLOCK_ASSERT(tp->t_inpcb);
3645
3646 if (mtuoffer != -1) {
3647 KASSERT(offer == -1, ("%s: conflict", __func__));
3648 offer = mtuoffer - min_protoh;
3649 }
3650
3651 /* Initialize. */
3652 #ifdef INET6
3653 if (isipv6) {
3654 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3655 tp->t_maxseg = V_tcp_v6mssdflt;
3656 }
3657 #endif
3658 #if defined(INET) && defined(INET6)
3659 else
3660 #endif
3661 #ifdef INET
3662 {
3663 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3664 tp->t_maxseg = V_tcp_mssdflt;
3665 }
3666 #endif
3667
3668 /*
3669 * No route to sender, stay with default mss and return.
3670 */
3671 if (maxmtu == 0) {
3672 /*
3673 * In case we return early we need to initialize metrics
3674 * to a defined state as tcp_hc_get() would do for us
3675 * if there was no cache hit.
3676 */
3677 if (metricptr != NULL)
3678 bzero(metricptr, sizeof(struct hc_metrics_lite));
3679 return;
3680 }
3681
3682 /* What have we got? */
3683 switch (offer) {
3684 case 0:
3685 /*
3686 * Offer == 0 means that there was no MSS on the SYN
3687 * segment, in this case we use tcp_mssdflt as
3688 * already assigned to t_maxseg above.
3689 */
3690 offer = tp->t_maxseg;
3691 break;
3692
3693 case -1:
3694 /*
3695 * Offer == -1 means that we didn't receive SYN yet.
3696 */
3697 /* FALLTHROUGH */
3698
3699 default:
3700 /*
3701 * Prevent DoS attack with too small MSS. Round up
3702 * to at least minmss.
3703 */
3704 offer = max(offer, V_tcp_minmss);
3705 }
3706
3707 /*
3708 * rmx information is now retrieved from tcp_hostcache.
3709 */
3710 tcp_hc_get(&inp->inp_inc, &metrics);
3711 if (metricptr != NULL)
3712 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3713
3714 /*
3715 * If there's a discovered mtu in tcp hostcache, use it.
3716 * Else, use the link mtu.
3717 */
3718 if (metrics.rmx_mtu)
3719 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3720 else {
3721 #ifdef INET6
3722 if (isipv6) {
3723 mss = maxmtu - min_protoh;
3724 if (!V_path_mtu_discovery &&
3725 !in6_localaddr(&inp->in6p_faddr))
3726 mss = min(mss, V_tcp_v6mssdflt);
3727 }
3728 #endif
3729 #if defined(INET) && defined(INET6)
3730 else
3731 #endif
3732 #ifdef INET
3733 {
3734 mss = maxmtu - min_protoh;
3735 if (!V_path_mtu_discovery &&
3736 !in_localaddr(inp->inp_faddr))
3737 mss = min(mss, V_tcp_mssdflt);
3738 }
3739 #endif
3740 /*
3741 * XXX - The above conditional (mss = maxmtu - min_protoh)
3742 * probably violates the TCP spec.
3743 * The problem is that, since we don't know the
3744 * other end's MSS, we are supposed to use a conservative
3745 * default. But, if we do that, then MTU discovery will
3746 * never actually take place, because the conservative
3747 * default is much less than the MTUs typically seen
3748 * on the Internet today. For the moment, we'll sweep
3749 * this under the carpet.
3750 *
3751 * The conservative default might not actually be a problem
3752 * if the only case this occurs is when sending an initial
3753 * SYN with options and data to a host we've never talked
3754 * to before. Then, they will reply with an MSS value which
3755 * will get recorded and the new parameters should get
3756 * recomputed. For Further Study.
3757 */
3758 }
3759 mss = min(mss, offer);
3760
3761 /*
3762 * Sanity check: make sure that maxseg will be large
3763 * enough to allow some data on segments even if the
3764 * all the option space is used (40bytes). Otherwise
3765 * funny things may happen in tcp_output.
3766 *
3767 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3768 */
3769 mss = max(mss, 64);
3770
3771 tp->t_maxseg = mss;
3772 }
3773
3774 void
3775 tcp_mss(struct tcpcb *tp, int offer)
3776 {
3777 int mss;
3778 u_long bufsize;
3779 struct inpcb *inp;
3780 struct socket *so;
3781 struct hc_metrics_lite metrics;
3782 struct tcp_ifcap cap;
3783
3784 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3785
3786 bzero(&cap, sizeof(cap));
3787 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3788
3789 mss = tp->t_maxseg;
3790 inp = tp->t_inpcb;
3791
3792 /*
3793 * If there's a pipesize, change the socket buffer to that size,
3794 * don't change if sb_hiwat is different than default (then it
3795 * has been changed on purpose with setsockopt).
3796 * Make the socket buffers an integral number of mss units;
3797 * if the mss is larger than the socket buffer, decrease the mss.
3798 */
3799 so = inp->inp_socket;
3800 SOCKBUF_LOCK(&so->so_snd);
3801 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3802 bufsize = metrics.rmx_sendpipe;
3803 else
3804 bufsize = so->so_snd.sb_hiwat;
3805 if (bufsize < mss)
3806 mss = bufsize;
3807 else {
3808 bufsize = roundup(bufsize, mss);
3809 if (bufsize > sb_max)
3810 bufsize = sb_max;
3811 if (bufsize > so->so_snd.sb_hiwat)
3812 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3813 }
3814 SOCKBUF_UNLOCK(&so->so_snd);
3815 /*
3816 * Sanity check: make sure that maxseg will be large
3817 * enough to allow some data on segments even if the
3818 * all the option space is used (40bytes). Otherwise
3819 * funny things may happen in tcp_output.
3820 *
3821 * XXXGL: shouldn't we reserve space for IP/IPv6 options?
3822 */
3823 tp->t_maxseg = max(mss, 64);
3824
3825 SOCKBUF_LOCK(&so->so_rcv);
3826 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3827 bufsize = metrics.rmx_recvpipe;
3828 else
3829 bufsize = so->so_rcv.sb_hiwat;
3830 if (bufsize > mss) {
3831 bufsize = roundup(bufsize, mss);
3832 if (bufsize > sb_max)
3833 bufsize = sb_max;
3834 if (bufsize > so->so_rcv.sb_hiwat)
3835 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3836 }
3837 SOCKBUF_UNLOCK(&so->so_rcv);
3838
3839 /* Check the interface for TSO capabilities. */
3840 if (cap.ifcap & CSUM_TSO) {
3841 tp->t_flags |= TF_TSO;
3842 tp->t_tsomax = cap.tsomax;
3843 tp->t_tsomaxsegcount = cap.tsomaxsegcount;
3844 tp->t_tsomaxsegsize = cap.tsomaxsegsize;
3845 }
3846 }
3847
3848 /*
3849 * Determine the MSS option to send on an outgoing SYN.
3850 */
3851 int
3852 tcp_mssopt(struct in_conninfo *inc)
3853 {
3854 int mss = 0;
3855 u_long maxmtu = 0;
3856 u_long thcmtu = 0;
3857 size_t min_protoh;
3858
3859 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3860
3861 #ifdef INET6
3862 if (inc->inc_flags & INC_ISIPV6) {
3863 mss = V_tcp_v6mssdflt;
3864 maxmtu = tcp_maxmtu6(inc, NULL);
3865 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3866 }
3867 #endif
3868 #if defined(INET) && defined(INET6)
3869 else
3870 #endif
3871 #ifdef INET
3872 {
3873 mss = V_tcp_mssdflt;
3874 maxmtu = tcp_maxmtu(inc, NULL);
3875 min_protoh = sizeof(struct tcpiphdr);
3876 }
3877 #endif
3878 #if defined(INET6) || defined(INET)
3879 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3880 #endif
3881
3882 if (maxmtu && thcmtu)
3883 mss = min(maxmtu, thcmtu) - min_protoh;
3884 else if (maxmtu || thcmtu)
3885 mss = max(maxmtu, thcmtu) - min_protoh;
3886
3887 return (mss);
3888 }
3889
3890
3891 /*
3892 * On a partial ack arrives, force the retransmission of the
3893 * next unacknowledged segment. Do not clear tp->t_dupacks.
3894 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3895 * be started again.
3896 */
3897 void
3898 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3899 {
3900 tcp_seq onxt = tp->snd_nxt;
3901 u_long ocwnd = tp->snd_cwnd;
3902 u_int maxseg = tcp_maxseg(tp);
3903
3904 INP_WLOCK_ASSERT(tp->t_inpcb);
3905
3906 tcp_timer_activate(tp, TT_REXMT, 0);
3907 tp->t_rtttime = 0;
3908 tp->snd_nxt = th->th_ack;
3909 /*
3910 * Set snd_cwnd to one segment beyond acknowledged offset.
3911 * (tp->snd_una has not yet been updated when this function is called.)
3912 */
3913 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th);
3914 tp->t_flags |= TF_ACKNOW;
3915 (void) tp->t_fb->tfb_tcp_output(tp);
3916 tp->snd_cwnd = ocwnd;
3917 if (SEQ_GT(onxt, tp->snd_nxt))
3918 tp->snd_nxt = onxt;
3919 /*
3920 * Partial window deflation. Relies on fact that tp->snd_una
3921 * not updated yet.
3922 */
3923 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3924 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3925 else
3926 tp->snd_cwnd = 0;
3927 tp->snd_cwnd += maxseg;
3928 }
3929
3930 int
3931 tcp_compute_pipe(struct tcpcb *tp)
3932 {
3933 return (tp->snd_max - tp->snd_una +
3934 tp->sackhint.sack_bytes_rexmit -
3935 tp->sackhint.sacked_bytes);
3936 }
Cache object: 5a866000d643792da3c646beded98039
|