1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
8 * Copyright (c) 2010-2011 Juniper Networks, Inc.
9 * All rights reserved.
10 *
11 * Portions of this software were developed at the Centre for Advanced Internet
12 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
13 * James Healy and David Hayes, made possible in part by a grant from the Cisco
14 * University Research Program Fund at Community Foundation Silicon Valley.
15 *
16 * Portions of this software were developed at the Centre for Advanced
17 * Internet Architectures, Swinburne University of Technology, Melbourne,
18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
19 *
20 * Portions of this software were developed by Robert N. M. Watson under
21 * contract to Juniper Networks, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 *
47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/10.1/sys/netinet/tcp_input.c 271667 2014-09-16 09:48:35Z delphij $");
52
53 #include "opt_ipfw.h" /* for ipfw_fwd */
54 #include "opt_inet.h"
55 #include "opt_inet6.h"
56 #include "opt_ipsec.h"
57 #include "opt_kdtrace.h"
58 #include "opt_tcpdebug.h"
59
60 #include <sys/param.h>
61 #include <sys/kernel.h>
62 #include <sys/hhook.h>
63 #include <sys/malloc.h>
64 #include <sys/mbuf.h>
65 #include <sys/proc.h> /* for proc0 declaration */
66 #include <sys/protosw.h>
67 #include <sys/sdt.h>
68 #include <sys/signalvar.h>
69 #include <sys/socket.h>
70 #include <sys/socketvar.h>
71 #include <sys/sysctl.h>
72 #include <sys/syslog.h>
73 #include <sys/systm.h>
74
75 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
76
77 #include <vm/uma.h>
78
79 #include <net/if.h>
80 #include <net/route.h>
81 #include <net/vnet.h>
82
83 #define TCPSTATES /* for logging */
84
85 #include <netinet/cc.h>
86 #include <netinet/in.h>
87 #include <netinet/in_kdtrace.h>
88 #include <netinet/in_pcb.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip.h>
92 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
93 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
94 #include <netinet/ip_var.h>
95 #include <netinet/ip_options.h>
96 #include <netinet/ip6.h>
97 #include <netinet/icmp6.h>
98 #include <netinet6/in6_pcb.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet6/nd6.h>
101 #include <netinet/tcp_fsm.h>
102 #include <netinet/tcp_seq.h>
103 #include <netinet/tcp_timer.h>
104 #include <netinet/tcp_var.h>
105 #include <netinet6/tcp6_var.h>
106 #include <netinet/tcpip.h>
107 #include <netinet/tcp_syncache.h>
108 #ifdef TCPDEBUG
109 #include <netinet/tcp_debug.h>
110 #endif /* TCPDEBUG */
111 #ifdef TCP_OFFLOAD
112 #include <netinet/tcp_offload.h>
113 #endif
114
115 #ifdef IPSEC
116 #include <netipsec/ipsec.h>
117 #include <netipsec/ipsec6.h>
118 #endif /*IPSEC*/
119
120 #include <machine/in_cksum.h>
121
122 #include <security/mac/mac_framework.h>
123
124 const int tcprexmtthresh = 3;
125
126 int tcp_log_in_vain = 0;
127 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
128 &tcp_log_in_vain, 0,
129 "Log all incoming TCP segments to closed ports");
130
131 VNET_DEFINE(int, blackhole) = 0;
132 #define V_blackhole VNET(blackhole)
133 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
134 &VNET_NAME(blackhole), 0,
135 "Do not send RST on segments to closed ports");
136
137 VNET_DEFINE(int, tcp_delack_enabled) = 1;
138 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
139 &VNET_NAME(tcp_delack_enabled), 0,
140 "Delay ACK to try and piggyback it onto a data packet");
141
142 VNET_DEFINE(int, drop_synfin) = 0;
143 #define V_drop_synfin VNET(drop_synfin)
144 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
145 &VNET_NAME(drop_synfin), 0,
146 "Drop TCP packets with SYN+FIN set");
147
148 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
149 #define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
150 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
151 &VNET_NAME(tcp_do_rfc3042), 0,
152 "Enable RFC 3042 (Limited Transmit)");
153
154 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
155 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
156 &VNET_NAME(tcp_do_rfc3390), 0,
157 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
158
159 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, experimental, CTLFLAG_RW, 0,
160 "Experimental TCP extensions");
161
162 VNET_DEFINE(int, tcp_do_initcwnd10) = 1;
163 SYSCTL_VNET_INT(_net_inet_tcp_experimental, OID_AUTO, initcwnd10, CTLFLAG_RW,
164 &VNET_NAME(tcp_do_initcwnd10), 0,
165 "Enable RFC 6928 (Increasing initial CWND to 10)");
166
167 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
168 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW,
169 &VNET_NAME(tcp_do_rfc3465), 0,
170 "Enable RFC 3465 (Appropriate Byte Counting)");
171
172 VNET_DEFINE(int, tcp_abc_l_var) = 2;
173 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW,
174 &VNET_NAME(tcp_abc_l_var), 2,
175 "Cap the max cwnd increment during slow-start to this number of segments");
176
177 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
178
179 VNET_DEFINE(int, tcp_do_ecn) = 0;
180 SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW,
181 &VNET_NAME(tcp_do_ecn), 0,
182 "TCP ECN support");
183
184 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
185 SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW,
186 &VNET_NAME(tcp_ecn_maxretries), 0,
187 "Max retries before giving up on ECN");
188
189 VNET_DEFINE(int, tcp_insecure_rst) = 0;
190 #define V_tcp_insecure_rst VNET(tcp_insecure_rst)
191 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
192 &VNET_NAME(tcp_insecure_rst), 0,
193 "Follow the old (insecure) criteria for accepting RST packets");
194
195 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
196 #define V_tcp_recvspace VNET(tcp_recvspace)
197 SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
198 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
199
200 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
201 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
202 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
203 &VNET_NAME(tcp_do_autorcvbuf), 0,
204 "Enable automatic receive buffer sizing");
205
206 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
207 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
208 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
209 &VNET_NAME(tcp_autorcvbuf_inc), 0,
210 "Incrementor step size of automatic receive buffer");
211
212 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
213 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
214 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
215 &VNET_NAME(tcp_autorcvbuf_max), 0,
216 "Max size of automatic receive buffer");
217
218 VNET_DEFINE(struct inpcbhead, tcb);
219 #define tcb6 tcb /* for KAME src sync over BSD*'s */
220 VNET_DEFINE(struct inpcbinfo, tcbinfo);
221
222 static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
223 static void tcp_do_segment(struct mbuf *, struct tcphdr *,
224 struct socket *, struct tcpcb *, int, int, uint8_t,
225 int);
226 static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
227 struct tcpcb *, int, int);
228 static void tcp_pulloutofband(struct socket *,
229 struct tcphdr *, struct mbuf *, int);
230 static void tcp_xmit_timer(struct tcpcb *, int);
231 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
232 static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th,
233 uint16_t type);
234 static void inline cc_conn_init(struct tcpcb *tp);
235 static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th);
236 static void inline hhook_run_tcp_est_in(struct tcpcb *tp,
237 struct tcphdr *th, struct tcpopt *to);
238
239 /*
240 * TCP statistics are stored in an "array" of counter(9)s.
241 */
242 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
243 VNET_PCPUSTAT_SYSINIT(tcpstat);
244 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
245 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
246
247 #ifdef VIMAGE
248 VNET_PCPUSTAT_SYSUNINIT(tcpstat);
249 #endif /* VIMAGE */
250 /*
251 * Kernel module interface for updating tcpstat. The argument is an index
252 * into tcpstat treated as an array.
253 */
254 void
255 kmod_tcpstat_inc(int statnum)
256 {
257
258 counter_u64_add(VNET(tcpstat)[statnum], 1);
259 }
260
261 /*
262 * Wrapper for the TCP established input helper hook.
263 */
264 static void inline
265 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
266 {
267 struct tcp_hhook_data hhook_data;
268
269 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
270 hhook_data.tp = tp;
271 hhook_data.th = th;
272 hhook_data.to = to;
273
274 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
275 tp->osd);
276 }
277 }
278
279 /*
280 * CC wrapper hook functions
281 */
282 static void inline
283 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
284 {
285 INP_WLOCK_ASSERT(tp->t_inpcb);
286
287 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
288 if (tp->snd_cwnd <= tp->snd_wnd)
289 tp->ccv->flags |= CCF_CWND_LIMITED;
290 else
291 tp->ccv->flags &= ~CCF_CWND_LIMITED;
292
293 if (type == CC_ACK) {
294 if (tp->snd_cwnd > tp->snd_ssthresh) {
295 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
296 V_tcp_abc_l_var * tp->t_maxseg);
297 if (tp->t_bytes_acked >= tp->snd_cwnd) {
298 tp->t_bytes_acked -= tp->snd_cwnd;
299 tp->ccv->flags |= CCF_ABC_SENTAWND;
300 }
301 } else {
302 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
303 tp->t_bytes_acked = 0;
304 }
305 }
306
307 if (CC_ALGO(tp)->ack_received != NULL) {
308 /* XXXLAS: Find a way to live without this */
309 tp->ccv->curack = th->th_ack;
310 CC_ALGO(tp)->ack_received(tp->ccv, type);
311 }
312 }
313
314 static void inline
315 cc_conn_init(struct tcpcb *tp)
316 {
317 struct hc_metrics_lite metrics;
318 struct inpcb *inp = tp->t_inpcb;
319 int rtt;
320
321 INP_WLOCK_ASSERT(tp->t_inpcb);
322
323 tcp_hc_get(&inp->inp_inc, &metrics);
324
325 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
326 tp->t_srtt = rtt;
327 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
328 TCPSTAT_INC(tcps_usedrtt);
329 if (metrics.rmx_rttvar) {
330 tp->t_rttvar = metrics.rmx_rttvar;
331 TCPSTAT_INC(tcps_usedrttvar);
332 } else {
333 /* default variation is +- 1 rtt */
334 tp->t_rttvar =
335 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
336 }
337 TCPT_RANGESET(tp->t_rxtcur,
338 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
339 tp->t_rttmin, TCPTV_REXMTMAX);
340 }
341 if (metrics.rmx_ssthresh) {
342 /*
343 * There's some sort of gateway or interface
344 * buffer limit on the path. Use this to set
345 * the slow start threshhold, but set the
346 * threshold to no less than 2*mss.
347 */
348 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh);
349 TCPSTAT_INC(tcps_usedssthresh);
350 }
351
352 /*
353 * Set the initial slow-start flight size.
354 *
355 * RFC5681 Section 3.1 specifies the default conservative values.
356 * RFC3390 specifies slightly more aggressive values.
357 * RFC6928 increases it to ten segments.
358 *
359 * If a SYN or SYN/ACK was lost and retransmitted, we have to
360 * reduce the initial CWND to one segment as congestion is likely
361 * requiring us to be cautious.
362 */
363 if (tp->snd_cwnd == 1)
364 tp->snd_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
365 else if (V_tcp_do_initcwnd10)
366 tp->snd_cwnd = min(10 * tp->t_maxseg,
367 max(2 * tp->t_maxseg, 14600));
368 else if (V_tcp_do_rfc3390)
369 tp->snd_cwnd = min(4 * tp->t_maxseg,
370 max(2 * tp->t_maxseg, 4380));
371 else {
372 /* Per RFC5681 Section 3.1 */
373 if (tp->t_maxseg > 2190)
374 tp->snd_cwnd = 2 * tp->t_maxseg;
375 else if (tp->t_maxseg > 1095)
376 tp->snd_cwnd = 3 * tp->t_maxseg;
377 else
378 tp->snd_cwnd = 4 * tp->t_maxseg;
379 }
380
381 if (CC_ALGO(tp)->conn_init != NULL)
382 CC_ALGO(tp)->conn_init(tp->ccv);
383 }
384
385 void inline
386 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
387 {
388 INP_WLOCK_ASSERT(tp->t_inpcb);
389
390 switch(type) {
391 case CC_NDUPACK:
392 if (!IN_FASTRECOVERY(tp->t_flags)) {
393 tp->snd_recover = tp->snd_max;
394 if (tp->t_flags & TF_ECN_PERMIT)
395 tp->t_flags |= TF_ECN_SND_CWR;
396 }
397 break;
398 case CC_ECN:
399 if (!IN_CONGRECOVERY(tp->t_flags)) {
400 TCPSTAT_INC(tcps_ecn_rcwnd);
401 tp->snd_recover = tp->snd_max;
402 if (tp->t_flags & TF_ECN_PERMIT)
403 tp->t_flags |= TF_ECN_SND_CWR;
404 }
405 break;
406 case CC_RTO:
407 tp->t_dupacks = 0;
408 tp->t_bytes_acked = 0;
409 EXIT_RECOVERY(tp->t_flags);
410 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
411 tp->t_maxseg) * tp->t_maxseg;
412 tp->snd_cwnd = tp->t_maxseg;
413 break;
414 case CC_RTO_ERR:
415 TCPSTAT_INC(tcps_sndrexmitbad);
416 /* RTO was unnecessary, so reset everything. */
417 tp->snd_cwnd = tp->snd_cwnd_prev;
418 tp->snd_ssthresh = tp->snd_ssthresh_prev;
419 tp->snd_recover = tp->snd_recover_prev;
420 if (tp->t_flags & TF_WASFRECOVERY)
421 ENTER_FASTRECOVERY(tp->t_flags);
422 if (tp->t_flags & TF_WASCRECOVERY)
423 ENTER_CONGRECOVERY(tp->t_flags);
424 tp->snd_nxt = tp->snd_max;
425 tp->t_flags &= ~TF_PREVVALID;
426 tp->t_badrxtwin = 0;
427 break;
428 }
429
430 if (CC_ALGO(tp)->cong_signal != NULL) {
431 if (th != NULL)
432 tp->ccv->curack = th->th_ack;
433 CC_ALGO(tp)->cong_signal(tp->ccv, type);
434 }
435 }
436
437 static void inline
438 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
439 {
440 INP_WLOCK_ASSERT(tp->t_inpcb);
441
442 /* XXXLAS: KASSERT that we're in recovery? */
443
444 if (CC_ALGO(tp)->post_recovery != NULL) {
445 tp->ccv->curack = th->th_ack;
446 CC_ALGO(tp)->post_recovery(tp->ccv);
447 }
448 /* XXXLAS: EXIT_RECOVERY ? */
449 tp->t_bytes_acked = 0;
450 }
451
452 #ifdef TCP_SIGNATURE
453 static inline int
454 tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen,
455 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
456 {
457 int ret;
458
459 tcp_fields_to_net(th);
460 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag);
461 tcp_fields_to_host(th);
462 return (ret);
463 }
464 #endif
465
466 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
467 #ifdef INET6
468 #define ND6_HINT(tp) \
469 do { \
470 if ((tp) && (tp)->t_inpcb && \
471 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
472 nd6_nud_hint(NULL, NULL, 0); \
473 } while (0)
474 #else
475 #define ND6_HINT(tp)
476 #endif
477
478 /*
479 * Indicate whether this ack should be delayed. We can delay the ack if
480 * - there is no delayed ack timer in progress and
481 * - our last ack wasn't a 0-sized window. We never want to delay
482 * the ack that opens up a 0-sized window and
483 * - delayed acks are enabled or
484 * - this is a half-synchronized T/TCP connection.
485 * - the segment size is not larger than the MSS and LRO wasn't used
486 * for this segment.
487 */
488 #define DELAY_ACK(tp, tlen) \
489 ((!tcp_timer_active(tp, TT_DELACK) && \
490 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
491 (tlen <= tp->t_maxopd) && \
492 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
493
494 /*
495 * TCP input handling is split into multiple parts:
496 * tcp6_input is a thin wrapper around tcp_input for the extended
497 * ip6_protox[] call format in ip6_input
498 * tcp_input handles primary segment validation, inpcb lookup and
499 * SYN processing on listen sockets
500 * tcp_do_segment processes the ACK and text of the segment for
501 * establishing, established and closing connections
502 */
503 #ifdef INET6
504 int
505 tcp6_input(struct mbuf **mp, int *offp, int proto)
506 {
507 struct mbuf *m = *mp;
508 struct in6_ifaddr *ia6;
509
510 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
511
512 /*
513 * draft-itojun-ipv6-tcp-to-anycast
514 * better place to put this in?
515 */
516 ia6 = ip6_getdstifaddr(m);
517 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
518 struct ip6_hdr *ip6;
519
520 ifa_free(&ia6->ia_ifa);
521 ip6 = mtod(m, struct ip6_hdr *);
522 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
523 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
524 return IPPROTO_DONE;
525 }
526 if (ia6)
527 ifa_free(&ia6->ia_ifa);
528
529 tcp_input(m, *offp);
530 return IPPROTO_DONE;
531 }
532 #endif /* INET6 */
533
534 void
535 tcp_input(struct mbuf *m, int off0)
536 {
537 struct tcphdr *th = NULL;
538 struct ip *ip = NULL;
539 struct inpcb *inp = NULL;
540 struct tcpcb *tp = NULL;
541 struct socket *so = NULL;
542 u_char *optp = NULL;
543 int optlen = 0;
544 #ifdef INET
545 int len;
546 #endif
547 int tlen = 0, off;
548 int drop_hdrlen;
549 int thflags;
550 int rstreason = 0; /* For badport_bandlim accounting purposes */
551 #ifdef TCP_SIGNATURE
552 uint8_t sig_checked = 0;
553 #endif
554 uint8_t iptos = 0;
555 struct m_tag *fwd_tag = NULL;
556 #ifdef INET6
557 struct ip6_hdr *ip6 = NULL;
558 int isipv6;
559 #else
560 const void *ip6 = NULL;
561 #endif /* INET6 */
562 struct tcpopt to; /* options in this segment */
563 char *s = NULL; /* address and port logging */
564 int ti_locked;
565 #define TI_UNLOCKED 1
566 #define TI_WLOCKED 2
567
568 #ifdef TCPDEBUG
569 /*
570 * The size of tcp_saveipgen must be the size of the max ip header,
571 * now IPv6.
572 */
573 u_char tcp_saveipgen[IP6_HDR_LEN];
574 struct tcphdr tcp_savetcp;
575 short ostate = 0;
576 #endif
577
578 #ifdef INET6
579 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
580 #endif
581
582 to.to_flags = 0;
583 TCPSTAT_INC(tcps_rcvtotal);
584
585 #ifdef INET6
586 if (isipv6) {
587 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
588
589 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
590 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
591 if (m == NULL) {
592 TCPSTAT_INC(tcps_rcvshort);
593 return;
594 }
595 }
596
597 ip6 = mtod(m, struct ip6_hdr *);
598 th = (struct tcphdr *)((caddr_t)ip6 + off0);
599 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
600 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
601 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
602 th->th_sum = m->m_pkthdr.csum_data;
603 else
604 th->th_sum = in6_cksum_pseudo(ip6, tlen,
605 IPPROTO_TCP, m->m_pkthdr.csum_data);
606 th->th_sum ^= 0xffff;
607 } else
608 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
609 if (th->th_sum) {
610 TCPSTAT_INC(tcps_rcvbadsum);
611 goto drop;
612 }
613
614 /*
615 * Be proactive about unspecified IPv6 address in source.
616 * As we use all-zero to indicate unbounded/unconnected pcb,
617 * unspecified IPv6 address can be used to confuse us.
618 *
619 * Note that packets with unspecified IPv6 destination is
620 * already dropped in ip6_input.
621 */
622 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
623 /* XXX stat */
624 goto drop;
625 }
626 }
627 #endif
628 #if defined(INET) && defined(INET6)
629 else
630 #endif
631 #ifdef INET
632 {
633 /*
634 * Get IP and TCP header together in first mbuf.
635 * Note: IP leaves IP header in first mbuf.
636 */
637 if (off0 > sizeof (struct ip)) {
638 ip_stripoptions(m);
639 off0 = sizeof(struct ip);
640 }
641 if (m->m_len < sizeof (struct tcpiphdr)) {
642 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
643 == NULL) {
644 TCPSTAT_INC(tcps_rcvshort);
645 return;
646 }
647 }
648 ip = mtod(m, struct ip *);
649 th = (struct tcphdr *)((caddr_t)ip + off0);
650 tlen = ntohs(ip->ip_len) - off0;
651
652 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
653 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
654 th->th_sum = m->m_pkthdr.csum_data;
655 else
656 th->th_sum = in_pseudo(ip->ip_src.s_addr,
657 ip->ip_dst.s_addr,
658 htonl(m->m_pkthdr.csum_data + tlen +
659 IPPROTO_TCP));
660 th->th_sum ^= 0xffff;
661 } else {
662 struct ipovly *ipov = (struct ipovly *)ip;
663
664 /*
665 * Checksum extended TCP header and data.
666 */
667 len = off0 + tlen;
668 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
669 ipov->ih_len = htons(tlen);
670 th->th_sum = in_cksum(m, len);
671 /* Reset length for SDT probes. */
672 ip->ip_len = htons(tlen + off0);
673 }
674
675 if (th->th_sum) {
676 TCPSTAT_INC(tcps_rcvbadsum);
677 goto drop;
678 }
679 /* Re-initialization for later version check */
680 ip->ip_v = IPVERSION;
681 }
682 #endif /* INET */
683
684 #ifdef INET6
685 if (isipv6)
686 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
687 #endif
688 #if defined(INET) && defined(INET6)
689 else
690 #endif
691 #ifdef INET
692 iptos = ip->ip_tos;
693 #endif
694
695 /*
696 * Check that TCP offset makes sense,
697 * pull out TCP options and adjust length. XXX
698 */
699 off = th->th_off << 2;
700 if (off < sizeof (struct tcphdr) || off > tlen) {
701 TCPSTAT_INC(tcps_rcvbadoff);
702 goto drop;
703 }
704 tlen -= off; /* tlen is used instead of ti->ti_len */
705 if (off > sizeof (struct tcphdr)) {
706 #ifdef INET6
707 if (isipv6) {
708 IP6_EXTHDR_CHECK(m, off0, off, );
709 ip6 = mtod(m, struct ip6_hdr *);
710 th = (struct tcphdr *)((caddr_t)ip6 + off0);
711 }
712 #endif
713 #if defined(INET) && defined(INET6)
714 else
715 #endif
716 #ifdef INET
717 {
718 if (m->m_len < sizeof(struct ip) + off) {
719 if ((m = m_pullup(m, sizeof (struct ip) + off))
720 == NULL) {
721 TCPSTAT_INC(tcps_rcvshort);
722 return;
723 }
724 ip = mtod(m, struct ip *);
725 th = (struct tcphdr *)((caddr_t)ip + off0);
726 }
727 }
728 #endif
729 optlen = off - sizeof (struct tcphdr);
730 optp = (u_char *)(th + 1);
731 }
732 thflags = th->th_flags;
733
734 /*
735 * Convert TCP protocol specific fields to host format.
736 */
737 tcp_fields_to_host(th);
738
739 /*
740 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
741 */
742 drop_hdrlen = off0 + off;
743
744 /*
745 * Locate pcb for segment; if we're likely to add or remove a
746 * connection then first acquire pcbinfo lock. There are two cases
747 * where we might discover later we need a write lock despite the
748 * flags: ACKs moving a connection out of the syncache, and ACKs for
749 * a connection in TIMEWAIT.
750 */
751 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0) {
752 INP_INFO_WLOCK(&V_tcbinfo);
753 ti_locked = TI_WLOCKED;
754 } else
755 ti_locked = TI_UNLOCKED;
756
757 /*
758 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
759 */
760 if (
761 #ifdef INET6
762 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
763 #ifdef INET
764 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
765 #endif
766 #endif
767 #if defined(INET) && !defined(INET6)
768 (m->m_flags & M_IP_NEXTHOP)
769 #endif
770 )
771 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
772
773 findpcb:
774 #ifdef INVARIANTS
775 if (ti_locked == TI_WLOCKED) {
776 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
777 } else {
778 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
779 }
780 #endif
781 #ifdef INET6
782 if (isipv6 && fwd_tag != NULL) {
783 struct sockaddr_in6 *next_hop6;
784
785 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
786 /*
787 * Transparently forwarded. Pretend to be the destination.
788 * Already got one like this?
789 */
790 inp = in6_pcblookup_mbuf(&V_tcbinfo,
791 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
792 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
793 if (!inp) {
794 /*
795 * It's new. Try to find the ambushing socket.
796 * Because we've rewritten the destination address,
797 * any hardware-generated hash is ignored.
798 */
799 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
800 th->th_sport, &next_hop6->sin6_addr,
801 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
802 th->th_dport, INPLOOKUP_WILDCARD |
803 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
804 }
805 } else if (isipv6) {
806 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
807 th->th_sport, &ip6->ip6_dst, th->th_dport,
808 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
809 m->m_pkthdr.rcvif, m);
810 }
811 #endif /* INET6 */
812 #if defined(INET6) && defined(INET)
813 else
814 #endif
815 #ifdef INET
816 if (fwd_tag != NULL) {
817 struct sockaddr_in *next_hop;
818
819 next_hop = (struct sockaddr_in *)(fwd_tag+1);
820 /*
821 * Transparently forwarded. Pretend to be the destination.
822 * already got one like this?
823 */
824 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
825 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
826 m->m_pkthdr.rcvif, m);
827 if (!inp) {
828 /*
829 * It's new. Try to find the ambushing socket.
830 * Because we've rewritten the destination address,
831 * any hardware-generated hash is ignored.
832 */
833 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
834 th->th_sport, next_hop->sin_addr,
835 next_hop->sin_port ? ntohs(next_hop->sin_port) :
836 th->th_dport, INPLOOKUP_WILDCARD |
837 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
838 }
839 } else
840 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
841 th->th_sport, ip->ip_dst, th->th_dport,
842 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
843 m->m_pkthdr.rcvif, m);
844 #endif /* INET */
845
846 /*
847 * If the INPCB does not exist then all data in the incoming
848 * segment is discarded and an appropriate RST is sent back.
849 * XXX MRT Send RST using which routing table?
850 */
851 if (inp == NULL) {
852 /*
853 * Log communication attempts to ports that are not
854 * in use.
855 */
856 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
857 tcp_log_in_vain == 2) {
858 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
859 log(LOG_INFO, "%s; %s: Connection attempt "
860 "to closed port\n", s, __func__);
861 }
862 /*
863 * When blackholing do not respond with a RST but
864 * completely ignore the segment and drop it.
865 */
866 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
867 V_blackhole == 2)
868 goto dropunlock;
869
870 rstreason = BANDLIM_RST_CLOSEDPORT;
871 goto dropwithreset;
872 }
873 INP_WLOCK_ASSERT(inp);
874 if (!(inp->inp_flags & INP_HW_FLOWID)
875 && (m->m_flags & M_FLOWID)
876 && ((inp->inp_socket == NULL)
877 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) {
878 inp->inp_flags |= INP_HW_FLOWID;
879 inp->inp_flags &= ~INP_SW_FLOWID;
880 inp->inp_flowid = m->m_pkthdr.flowid;
881 }
882 #ifdef IPSEC
883 #ifdef INET6
884 if (isipv6 && ipsec6_in_reject(m, inp)) {
885 IPSEC6STAT_INC(ips_in_polvio);
886 goto dropunlock;
887 } else
888 #endif /* INET6 */
889 if (ipsec4_in_reject(m, inp) != 0) {
890 IPSECSTAT_INC(ips_in_polvio);
891 goto dropunlock;
892 }
893 #endif /* IPSEC */
894
895 /*
896 * Check the minimum TTL for socket.
897 */
898 if (inp->inp_ip_minttl != 0) {
899 #ifdef INET6
900 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
901 goto dropunlock;
902 else
903 #endif
904 if (inp->inp_ip_minttl > ip->ip_ttl)
905 goto dropunlock;
906 }
907
908 /*
909 * A previous connection in TIMEWAIT state is supposed to catch stray
910 * or duplicate segments arriving late. If this segment was a
911 * legitimate new connection attempt, the old INPCB gets removed and
912 * we can try again to find a listening socket.
913 *
914 * At this point, due to earlier optimism, we may hold only an inpcb
915 * lock, and not the inpcbinfo write lock. If so, we need to try to
916 * acquire it, or if that fails, acquire a reference on the inpcb,
917 * drop all locks, acquire a global write lock, and then re-acquire
918 * the inpcb lock. We may at that point discover that another thread
919 * has tried to free the inpcb, in which case we need to loop back
920 * and try to find a new inpcb to deliver to.
921 *
922 * XXXRW: It may be time to rethink timewait locking.
923 */
924 relocked:
925 if (inp->inp_flags & INP_TIMEWAIT) {
926 if (ti_locked == TI_UNLOCKED) {
927 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
928 in_pcbref(inp);
929 INP_WUNLOCK(inp);
930 INP_INFO_WLOCK(&V_tcbinfo);
931 ti_locked = TI_WLOCKED;
932 INP_WLOCK(inp);
933 if (in_pcbrele_wlocked(inp)) {
934 inp = NULL;
935 goto findpcb;
936 }
937 } else
938 ti_locked = TI_WLOCKED;
939 }
940 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
941
942 if (thflags & TH_SYN)
943 tcp_dooptions(&to, optp, optlen, TO_SYN);
944 /*
945 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
946 */
947 if (tcp_twcheck(inp, &to, th, m, tlen))
948 goto findpcb;
949 INP_INFO_WUNLOCK(&V_tcbinfo);
950 return;
951 }
952 /*
953 * The TCPCB may no longer exist if the connection is winding
954 * down or it is in the CLOSED state. Either way we drop the
955 * segment and send an appropriate response.
956 */
957 tp = intotcpcb(inp);
958 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
959 rstreason = BANDLIM_RST_CLOSEDPORT;
960 goto dropwithreset;
961 }
962
963 #ifdef TCP_OFFLOAD
964 if (tp->t_flags & TF_TOE) {
965 tcp_offload_input(tp, m);
966 m = NULL; /* consumed by the TOE driver */
967 goto dropunlock;
968 }
969 #endif
970
971 /*
972 * We've identified a valid inpcb, but it could be that we need an
973 * inpcbinfo write lock but don't hold it. In this case, attempt to
974 * acquire using the same strategy as the TIMEWAIT case above. If we
975 * relock, we have to jump back to 'relocked' as the connection might
976 * now be in TIMEWAIT.
977 */
978 #ifdef INVARIANTS
979 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0)
980 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
981 #endif
982 if (tp->t_state != TCPS_ESTABLISHED) {
983 if (ti_locked == TI_UNLOCKED) {
984 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
985 in_pcbref(inp);
986 INP_WUNLOCK(inp);
987 INP_INFO_WLOCK(&V_tcbinfo);
988 ti_locked = TI_WLOCKED;
989 INP_WLOCK(inp);
990 if (in_pcbrele_wlocked(inp)) {
991 inp = NULL;
992 goto findpcb;
993 }
994 goto relocked;
995 } else
996 ti_locked = TI_WLOCKED;
997 }
998 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
999 }
1000
1001 #ifdef MAC
1002 INP_WLOCK_ASSERT(inp);
1003 if (mac_inpcb_check_deliver(inp, m))
1004 goto dropunlock;
1005 #endif
1006 so = inp->inp_socket;
1007 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1008 #ifdef TCPDEBUG
1009 if (so->so_options & SO_DEBUG) {
1010 ostate = tp->t_state;
1011 #ifdef INET6
1012 if (isipv6) {
1013 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1014 } else
1015 #endif
1016 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1017 tcp_savetcp = *th;
1018 }
1019 #endif /* TCPDEBUG */
1020 /*
1021 * When the socket is accepting connections (the INPCB is in LISTEN
1022 * state) we look into the SYN cache if this is a new connection
1023 * attempt or the completion of a previous one. Because listen
1024 * sockets are never in TCPS_ESTABLISHED, the V_tcbinfo lock will be
1025 * held in this case.
1026 */
1027 if (so->so_options & SO_ACCEPTCONN) {
1028 struct in_conninfo inc;
1029
1030 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
1031 "tp not listening", __func__));
1032 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1033
1034 bzero(&inc, sizeof(inc));
1035 #ifdef INET6
1036 if (isipv6) {
1037 inc.inc_flags |= INC_ISIPV6;
1038 inc.inc6_faddr = ip6->ip6_src;
1039 inc.inc6_laddr = ip6->ip6_dst;
1040 } else
1041 #endif
1042 {
1043 inc.inc_faddr = ip->ip_src;
1044 inc.inc_laddr = ip->ip_dst;
1045 }
1046 inc.inc_fport = th->th_sport;
1047 inc.inc_lport = th->th_dport;
1048 inc.inc_fibnum = so->so_fibnum;
1049
1050 /*
1051 * Check for an existing connection attempt in syncache if
1052 * the flag is only ACK. A successful lookup creates a new
1053 * socket appended to the listen queue in SYN_RECEIVED state.
1054 */
1055 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1056 /*
1057 * Parse the TCP options here because
1058 * syncookies need access to the reflected
1059 * timestamp.
1060 */
1061 tcp_dooptions(&to, optp, optlen, 0);
1062 /*
1063 * NB: syncache_expand() doesn't unlock
1064 * inp and tcpinfo locks.
1065 */
1066 if (!syncache_expand(&inc, &to, th, &so, m)) {
1067 /*
1068 * No syncache entry or ACK was not
1069 * for our SYN/ACK. Send a RST.
1070 * NB: syncache did its own logging
1071 * of the failure cause.
1072 */
1073 rstreason = BANDLIM_RST_OPENPORT;
1074 goto dropwithreset;
1075 }
1076 if (so == NULL) {
1077 /*
1078 * We completed the 3-way handshake
1079 * but could not allocate a socket
1080 * either due to memory shortage,
1081 * listen queue length limits or
1082 * global socket limits. Send RST
1083 * or wait and have the remote end
1084 * retransmit the ACK for another
1085 * try.
1086 */
1087 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1088 log(LOG_DEBUG, "%s; %s: Listen socket: "
1089 "Socket allocation failed due to "
1090 "limits or memory shortage, %s\n",
1091 s, __func__,
1092 V_tcp_sc_rst_sock_fail ?
1093 "sending RST" : "try again");
1094 if (V_tcp_sc_rst_sock_fail) {
1095 rstreason = BANDLIM_UNLIMITED;
1096 goto dropwithreset;
1097 } else
1098 goto dropunlock;
1099 }
1100 /*
1101 * Socket is created in state SYN_RECEIVED.
1102 * Unlock the listen socket, lock the newly
1103 * created socket and update the tp variable.
1104 */
1105 INP_WUNLOCK(inp); /* listen socket */
1106 inp = sotoinpcb(so);
1107 INP_WLOCK(inp); /* new connection */
1108 tp = intotcpcb(inp);
1109 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1110 ("%s: ", __func__));
1111 #ifdef TCP_SIGNATURE
1112 if (sig_checked == 0) {
1113 tcp_dooptions(&to, optp, optlen,
1114 (thflags & TH_SYN) ? TO_SYN : 0);
1115 if (!tcp_signature_verify_input(m, off0, tlen,
1116 optlen, &to, th, tp->t_flags)) {
1117
1118 /*
1119 * In SYN_SENT state if it receives an
1120 * RST, it is allowed for further
1121 * processing.
1122 */
1123 if ((thflags & TH_RST) == 0 ||
1124 (tp->t_state == TCPS_SYN_SENT) == 0)
1125 goto dropunlock;
1126 }
1127 sig_checked = 1;
1128 }
1129 #endif
1130
1131 /*
1132 * Process the segment and the data it
1133 * contains. tcp_do_segment() consumes
1134 * the mbuf chain and unlocks the inpcb.
1135 */
1136 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1137 iptos, ti_locked);
1138 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1139 return;
1140 }
1141 /*
1142 * Segment flag validation for new connection attempts:
1143 *
1144 * Our (SYN|ACK) response was rejected.
1145 * Check with syncache and remove entry to prevent
1146 * retransmits.
1147 *
1148 * NB: syncache_chkrst does its own logging of failure
1149 * causes.
1150 */
1151 if (thflags & TH_RST) {
1152 syncache_chkrst(&inc, th);
1153 goto dropunlock;
1154 }
1155 /*
1156 * We can't do anything without SYN.
1157 */
1158 if ((thflags & TH_SYN) == 0) {
1159 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1160 log(LOG_DEBUG, "%s; %s: Listen socket: "
1161 "SYN is missing, segment ignored\n",
1162 s, __func__);
1163 TCPSTAT_INC(tcps_badsyn);
1164 goto dropunlock;
1165 }
1166 /*
1167 * (SYN|ACK) is bogus on a listen socket.
1168 */
1169 if (thflags & TH_ACK) {
1170 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1171 log(LOG_DEBUG, "%s; %s: Listen socket: "
1172 "SYN|ACK invalid, segment rejected\n",
1173 s, __func__);
1174 syncache_badack(&inc); /* XXX: Not needed! */
1175 TCPSTAT_INC(tcps_badsyn);
1176 rstreason = BANDLIM_RST_OPENPORT;
1177 goto dropwithreset;
1178 }
1179 /*
1180 * If the drop_synfin option is enabled, drop all
1181 * segments with both the SYN and FIN bits set.
1182 * This prevents e.g. nmap from identifying the
1183 * TCP/IP stack.
1184 * XXX: Poor reasoning. nmap has other methods
1185 * and is constantly refining its stack detection
1186 * strategies.
1187 * XXX: This is a violation of the TCP specification
1188 * and was used by RFC1644.
1189 */
1190 if ((thflags & TH_FIN) && V_drop_synfin) {
1191 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1192 log(LOG_DEBUG, "%s; %s: Listen socket: "
1193 "SYN|FIN segment ignored (based on "
1194 "sysctl setting)\n", s, __func__);
1195 TCPSTAT_INC(tcps_badsyn);
1196 goto dropunlock;
1197 }
1198 /*
1199 * Segment's flags are (SYN) or (SYN|FIN).
1200 *
1201 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1202 * as they do not affect the state of the TCP FSM.
1203 * The data pointed to by TH_URG and th_urp is ignored.
1204 */
1205 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1206 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1207 KASSERT(thflags & (TH_SYN),
1208 ("%s: Listen socket: TH_SYN not set", __func__));
1209 #ifdef INET6
1210 /*
1211 * If deprecated address is forbidden,
1212 * we do not accept SYN to deprecated interface
1213 * address to prevent any new inbound connection from
1214 * getting established.
1215 * When we do not accept SYN, we send a TCP RST,
1216 * with deprecated source address (instead of dropping
1217 * it). We compromise it as it is much better for peer
1218 * to send a RST, and RST will be the final packet
1219 * for the exchange.
1220 *
1221 * If we do not forbid deprecated addresses, we accept
1222 * the SYN packet. RFC2462 does not suggest dropping
1223 * SYN in this case.
1224 * If we decipher RFC2462 5.5.4, it says like this:
1225 * 1. use of deprecated addr with existing
1226 * communication is okay - "SHOULD continue to be
1227 * used"
1228 * 2. use of it with new communication:
1229 * (2a) "SHOULD NOT be used if alternate address
1230 * with sufficient scope is available"
1231 * (2b) nothing mentioned otherwise.
1232 * Here we fall into (2b) case as we have no choice in
1233 * our source address selection - we must obey the peer.
1234 *
1235 * The wording in RFC2462 is confusing, and there are
1236 * multiple description text for deprecated address
1237 * handling - worse, they are not exactly the same.
1238 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1239 */
1240 if (isipv6 && !V_ip6_use_deprecated) {
1241 struct in6_ifaddr *ia6;
1242
1243 ia6 = ip6_getdstifaddr(m);
1244 if (ia6 != NULL &&
1245 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1246 ifa_free(&ia6->ia_ifa);
1247 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1248 log(LOG_DEBUG, "%s; %s: Listen socket: "
1249 "Connection attempt to deprecated "
1250 "IPv6 address rejected\n",
1251 s, __func__);
1252 rstreason = BANDLIM_RST_OPENPORT;
1253 goto dropwithreset;
1254 }
1255 if (ia6)
1256 ifa_free(&ia6->ia_ifa);
1257 }
1258 #endif /* INET6 */
1259 /*
1260 * Basic sanity checks on incoming SYN requests:
1261 * Don't respond if the destination is a link layer
1262 * broadcast according to RFC1122 4.2.3.10, p. 104.
1263 * If it is from this socket it must be forged.
1264 * Don't respond if the source or destination is a
1265 * global or subnet broad- or multicast address.
1266 * Note that it is quite possible to receive unicast
1267 * link-layer packets with a broadcast IP address. Use
1268 * in_broadcast() to find them.
1269 */
1270 if (m->m_flags & (M_BCAST|M_MCAST)) {
1271 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1272 log(LOG_DEBUG, "%s; %s: Listen socket: "
1273 "Connection attempt from broad- or multicast "
1274 "link layer address ignored\n", s, __func__);
1275 goto dropunlock;
1276 }
1277 #ifdef INET6
1278 if (isipv6) {
1279 if (th->th_dport == th->th_sport &&
1280 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1281 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1282 log(LOG_DEBUG, "%s; %s: Listen socket: "
1283 "Connection attempt to/from self "
1284 "ignored\n", s, __func__);
1285 goto dropunlock;
1286 }
1287 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1288 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1289 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1290 log(LOG_DEBUG, "%s; %s: Listen socket: "
1291 "Connection attempt from/to multicast "
1292 "address ignored\n", s, __func__);
1293 goto dropunlock;
1294 }
1295 }
1296 #endif
1297 #if defined(INET) && defined(INET6)
1298 else
1299 #endif
1300 #ifdef INET
1301 {
1302 if (th->th_dport == th->th_sport &&
1303 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1304 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1305 log(LOG_DEBUG, "%s; %s: Listen socket: "
1306 "Connection attempt from/to self "
1307 "ignored\n", s, __func__);
1308 goto dropunlock;
1309 }
1310 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1311 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1312 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1313 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1314 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1315 log(LOG_DEBUG, "%s; %s: Listen socket: "
1316 "Connection attempt from/to broad- "
1317 "or multicast address ignored\n",
1318 s, __func__);
1319 goto dropunlock;
1320 }
1321 }
1322 #endif
1323 /*
1324 * SYN appears to be valid. Create compressed TCP state
1325 * for syncache.
1326 */
1327 #ifdef TCPDEBUG
1328 if (so->so_options & SO_DEBUG)
1329 tcp_trace(TA_INPUT, ostate, tp,
1330 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1331 #endif
1332 tcp_dooptions(&to, optp, optlen, TO_SYN);
1333 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL);
1334 /*
1335 * Entry added to syncache and mbuf consumed.
1336 * Everything already unlocked by syncache_add().
1337 */
1338 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1339 return;
1340 } else if (tp->t_state == TCPS_LISTEN) {
1341 /*
1342 * When a listen socket is torn down the SO_ACCEPTCONN
1343 * flag is removed first while connections are drained
1344 * from the accept queue in a unlock/lock cycle of the
1345 * ACCEPT_LOCK, opening a race condition allowing a SYN
1346 * attempt go through unhandled.
1347 */
1348 goto dropunlock;
1349 }
1350
1351 #ifdef TCP_SIGNATURE
1352 if (sig_checked == 0) {
1353 tcp_dooptions(&to, optp, optlen,
1354 (thflags & TH_SYN) ? TO_SYN : 0);
1355 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to,
1356 th, tp->t_flags)) {
1357
1358 /*
1359 * In SYN_SENT state if it receives an RST, it is
1360 * allowed for further processing.
1361 */
1362 if ((thflags & TH_RST) == 0 ||
1363 (tp->t_state == TCPS_SYN_SENT) == 0)
1364 goto dropunlock;
1365 }
1366 sig_checked = 1;
1367 }
1368 #endif
1369
1370 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th);
1371
1372 /*
1373 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1374 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1375 * the inpcb, and unlocks pcbinfo.
1376 */
1377 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1378 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1379 return;
1380
1381 dropwithreset:
1382 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th);
1383
1384 if (ti_locked == TI_WLOCKED) {
1385 INP_INFO_WUNLOCK(&V_tcbinfo);
1386 ti_locked = TI_UNLOCKED;
1387 }
1388 #ifdef INVARIANTS
1389 else {
1390 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1391 "ti_locked: %d", __func__, ti_locked));
1392 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1393 }
1394 #endif
1395
1396 if (inp != NULL) {
1397 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1398 INP_WUNLOCK(inp);
1399 } else
1400 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1401 m = NULL; /* mbuf chain got consumed. */
1402 goto drop;
1403
1404 dropunlock:
1405 if (m != NULL)
1406 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th);
1407
1408 if (ti_locked == TI_WLOCKED) {
1409 INP_INFO_WUNLOCK(&V_tcbinfo);
1410 ti_locked = TI_UNLOCKED;
1411 }
1412 #ifdef INVARIANTS
1413 else {
1414 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1415 "ti_locked: %d", __func__, ti_locked));
1416 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1417 }
1418 #endif
1419
1420 if (inp != NULL)
1421 INP_WUNLOCK(inp);
1422
1423 drop:
1424 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1425 if (s != NULL)
1426 free(s, M_TCPLOG);
1427 if (m != NULL)
1428 m_freem(m);
1429 }
1430
1431 static void
1432 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1433 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1434 int ti_locked)
1435 {
1436 int thflags, acked, ourfinisacked, needoutput = 0;
1437 int rstreason, todrop, win;
1438 u_long tiwin;
1439 char *s;
1440 struct in_conninfo *inc;
1441 struct mbuf *mfree;
1442 struct tcpopt to;
1443
1444 #ifdef TCPDEBUG
1445 /*
1446 * The size of tcp_saveipgen must be the size of the max ip header,
1447 * now IPv6.
1448 */
1449 u_char tcp_saveipgen[IP6_HDR_LEN];
1450 struct tcphdr tcp_savetcp;
1451 short ostate = 0;
1452 #endif
1453 thflags = th->th_flags;
1454 inc = &tp->t_inpcb->inp_inc;
1455 tp->sackhint.last_sack_ack = 0;
1456
1457 /*
1458 * If this is either a state-changing packet or current state isn't
1459 * established, we require a write lock on tcbinfo. Otherwise, we
1460 * allow the tcbinfo to be in either alocked or unlocked, as the
1461 * caller may have unnecessarily acquired a write lock due to a race.
1462 */
1463 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1464 tp->t_state != TCPS_ESTABLISHED) {
1465 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for "
1466 "SYN/FIN/RST/!EST", __func__, ti_locked));
1467 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1468 } else {
1469 #ifdef INVARIANTS
1470 if (ti_locked == TI_WLOCKED)
1471 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1472 else {
1473 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1474 "ti_locked: %d", __func__, ti_locked));
1475 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1476 }
1477 #endif
1478 }
1479 INP_WLOCK_ASSERT(tp->t_inpcb);
1480 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1481 __func__));
1482 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1483 __func__));
1484
1485 /*
1486 * Segment received on connection.
1487 * Reset idle time and keep-alive timer.
1488 * XXX: This should be done after segment
1489 * validation to ignore broken/spoofed segs.
1490 */
1491 tp->t_rcvtime = ticks;
1492 if (TCPS_HAVEESTABLISHED(tp->t_state))
1493 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
1494
1495 /*
1496 * Unscale the window into a 32-bit value.
1497 * For the SYN_SENT state the scale is zero.
1498 */
1499 tiwin = th->th_win << tp->snd_scale;
1500
1501 /*
1502 * TCP ECN processing.
1503 */
1504 if (tp->t_flags & TF_ECN_PERMIT) {
1505 if (thflags & TH_CWR)
1506 tp->t_flags &= ~TF_ECN_SND_ECE;
1507 switch (iptos & IPTOS_ECN_MASK) {
1508 case IPTOS_ECN_CE:
1509 tp->t_flags |= TF_ECN_SND_ECE;
1510 TCPSTAT_INC(tcps_ecn_ce);
1511 break;
1512 case IPTOS_ECN_ECT0:
1513 TCPSTAT_INC(tcps_ecn_ect0);
1514 break;
1515 case IPTOS_ECN_ECT1:
1516 TCPSTAT_INC(tcps_ecn_ect1);
1517 break;
1518 }
1519 /* Congestion experienced. */
1520 if (thflags & TH_ECE) {
1521 cc_cong_signal(tp, th, CC_ECN);
1522 }
1523 }
1524
1525 /*
1526 * Parse options on any incoming segment.
1527 */
1528 tcp_dooptions(&to, (u_char *)(th + 1),
1529 (th->th_off << 2) - sizeof(struct tcphdr),
1530 (thflags & TH_SYN) ? TO_SYN : 0);
1531
1532 /*
1533 * If echoed timestamp is later than the current time,
1534 * fall back to non RFC1323 RTT calculation. Normalize
1535 * timestamp if syncookies were used when this connection
1536 * was established.
1537 */
1538 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1539 to.to_tsecr -= tp->ts_offset;
1540 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1541 to.to_tsecr = 0;
1542 }
1543 /*
1544 * If timestamps were negotiated during SYN/ACK they should
1545 * appear on every segment during this session and vice versa.
1546 */
1547 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1548 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1549 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1550 "no action\n", s, __func__);
1551 free(s, M_TCPLOG);
1552 }
1553 }
1554 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1555 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1556 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1557 "no action\n", s, __func__);
1558 free(s, M_TCPLOG);
1559 }
1560 }
1561
1562 /*
1563 * Process options only when we get SYN/ACK back. The SYN case
1564 * for incoming connections is handled in tcp_syncache.
1565 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1566 * or <SYN,ACK>) segment itself is never scaled.
1567 * XXX this is traditional behavior, may need to be cleaned up.
1568 */
1569 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1570 if ((to.to_flags & TOF_SCALE) &&
1571 (tp->t_flags & TF_REQ_SCALE)) {
1572 tp->t_flags |= TF_RCVD_SCALE;
1573 tp->snd_scale = to.to_wscale;
1574 }
1575 /*
1576 * Initial send window. It will be updated with
1577 * the next incoming segment to the scaled value.
1578 */
1579 tp->snd_wnd = th->th_win;
1580 if (to.to_flags & TOF_TS) {
1581 tp->t_flags |= TF_RCVD_TSTMP;
1582 tp->ts_recent = to.to_tsval;
1583 tp->ts_recent_age = tcp_ts_getticks();
1584 }
1585 if (to.to_flags & TOF_MSS)
1586 tcp_mss(tp, to.to_mss);
1587 if ((tp->t_flags & TF_SACK_PERMIT) &&
1588 (to.to_flags & TOF_SACKPERM) == 0)
1589 tp->t_flags &= ~TF_SACK_PERMIT;
1590 }
1591
1592 /*
1593 * Header prediction: check for the two common cases
1594 * of a uni-directional data xfer. If the packet has
1595 * no control flags, is in-sequence, the window didn't
1596 * change and we're not retransmitting, it's a
1597 * candidate. If the length is zero and the ack moved
1598 * forward, we're the sender side of the xfer. Just
1599 * free the data acked & wake any higher level process
1600 * that was blocked waiting for space. If the length
1601 * is non-zero and the ack didn't move, we're the
1602 * receiver side. If we're getting packets in-order
1603 * (the reassembly queue is empty), add the data to
1604 * the socket buffer and note that we need a delayed ack.
1605 * Make sure that the hidden state-flags are also off.
1606 * Since we check for TCPS_ESTABLISHED first, it can only
1607 * be TH_NEEDSYN.
1608 */
1609 if (tp->t_state == TCPS_ESTABLISHED &&
1610 th->th_seq == tp->rcv_nxt &&
1611 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1612 tp->snd_nxt == tp->snd_max &&
1613 tiwin && tiwin == tp->snd_wnd &&
1614 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1615 LIST_EMPTY(&tp->t_segq) &&
1616 ((to.to_flags & TOF_TS) == 0 ||
1617 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1618
1619 /*
1620 * If last ACK falls within this segment's sequence numbers,
1621 * record the timestamp.
1622 * NOTE that the test is modified according to the latest
1623 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1624 */
1625 if ((to.to_flags & TOF_TS) != 0 &&
1626 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1627 tp->ts_recent_age = tcp_ts_getticks();
1628 tp->ts_recent = to.to_tsval;
1629 }
1630
1631 if (tlen == 0) {
1632 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1633 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1634 !IN_RECOVERY(tp->t_flags) &&
1635 (to.to_flags & TOF_SACK) == 0 &&
1636 TAILQ_EMPTY(&tp->snd_holes)) {
1637 /*
1638 * This is a pure ack for outstanding data.
1639 */
1640 if (ti_locked == TI_WLOCKED)
1641 INP_INFO_WUNLOCK(&V_tcbinfo);
1642 ti_locked = TI_UNLOCKED;
1643
1644 TCPSTAT_INC(tcps_predack);
1645
1646 /*
1647 * "bad retransmit" recovery.
1648 */
1649 if (tp->t_rxtshift == 1 &&
1650 tp->t_flags & TF_PREVVALID &&
1651 (int)(ticks - tp->t_badrxtwin) < 0) {
1652 cc_cong_signal(tp, th, CC_RTO_ERR);
1653 }
1654
1655 /*
1656 * Recalculate the transmit timer / rtt.
1657 *
1658 * Some boxes send broken timestamp replies
1659 * during the SYN+ACK phase, ignore
1660 * timestamps of 0 or we could calculate a
1661 * huge RTT and blow up the retransmit timer.
1662 */
1663 if ((to.to_flags & TOF_TS) != 0 &&
1664 to.to_tsecr) {
1665 u_int t;
1666
1667 t = tcp_ts_getticks() - to.to_tsecr;
1668 if (!tp->t_rttlow || tp->t_rttlow > t)
1669 tp->t_rttlow = t;
1670 tcp_xmit_timer(tp,
1671 TCP_TS_TO_TICKS(t) + 1);
1672 } else if (tp->t_rtttime &&
1673 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1674 if (!tp->t_rttlow ||
1675 tp->t_rttlow > ticks - tp->t_rtttime)
1676 tp->t_rttlow = ticks - tp->t_rtttime;
1677 tcp_xmit_timer(tp,
1678 ticks - tp->t_rtttime);
1679 }
1680 acked = BYTES_THIS_ACK(tp, th);
1681
1682 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1683 hhook_run_tcp_est_in(tp, th, &to);
1684
1685 TCPSTAT_INC(tcps_rcvackpack);
1686 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1687 sbdrop(&so->so_snd, acked);
1688 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1689 SEQ_LEQ(th->th_ack, tp->snd_recover))
1690 tp->snd_recover = th->th_ack - 1;
1691
1692 /*
1693 * Let the congestion control algorithm update
1694 * congestion control related information. This
1695 * typically means increasing the congestion
1696 * window.
1697 */
1698 cc_ack_received(tp, th, CC_ACK);
1699
1700 tp->snd_una = th->th_ack;
1701 /*
1702 * Pull snd_wl2 up to prevent seq wrap relative
1703 * to th_ack.
1704 */
1705 tp->snd_wl2 = th->th_ack;
1706 tp->t_dupacks = 0;
1707 m_freem(m);
1708 ND6_HINT(tp); /* Some progress has been made. */
1709
1710 /*
1711 * If all outstanding data are acked, stop
1712 * retransmit timer, otherwise restart timer
1713 * using current (possibly backed-off) value.
1714 * If process is waiting for space,
1715 * wakeup/selwakeup/signal. If data
1716 * are ready to send, let tcp_output
1717 * decide between more output or persist.
1718 */
1719 #ifdef TCPDEBUG
1720 if (so->so_options & SO_DEBUG)
1721 tcp_trace(TA_INPUT, ostate, tp,
1722 (void *)tcp_saveipgen,
1723 &tcp_savetcp, 0);
1724 #endif
1725 if (tp->snd_una == tp->snd_max)
1726 tcp_timer_activate(tp, TT_REXMT, 0);
1727 else if (!tcp_timer_active(tp, TT_PERSIST))
1728 tcp_timer_activate(tp, TT_REXMT,
1729 tp->t_rxtcur);
1730 sowwakeup(so);
1731 if (so->so_snd.sb_cc)
1732 (void) tcp_output(tp);
1733 goto check_delack;
1734 }
1735 } else if (th->th_ack == tp->snd_una &&
1736 tlen <= sbspace(&so->so_rcv)) {
1737 int newsize = 0; /* automatic sockbuf scaling */
1738
1739 /*
1740 * This is a pure, in-sequence data packet with
1741 * nothing on the reassembly queue and we have enough
1742 * buffer space to take it.
1743 */
1744 if (ti_locked == TI_WLOCKED)
1745 INP_INFO_WUNLOCK(&V_tcbinfo);
1746 ti_locked = TI_UNLOCKED;
1747
1748 /* Clean receiver SACK report if present */
1749 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1750 tcp_clean_sackreport(tp);
1751 TCPSTAT_INC(tcps_preddat);
1752 tp->rcv_nxt += tlen;
1753 /*
1754 * Pull snd_wl1 up to prevent seq wrap relative to
1755 * th_seq.
1756 */
1757 tp->snd_wl1 = th->th_seq;
1758 /*
1759 * Pull rcv_up up to prevent seq wrap relative to
1760 * rcv_nxt.
1761 */
1762 tp->rcv_up = tp->rcv_nxt;
1763 TCPSTAT_INC(tcps_rcvpack);
1764 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1765 ND6_HINT(tp); /* Some progress has been made */
1766 #ifdef TCPDEBUG
1767 if (so->so_options & SO_DEBUG)
1768 tcp_trace(TA_INPUT, ostate, tp,
1769 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1770 #endif
1771 /*
1772 * Automatic sizing of receive socket buffer. Often the send
1773 * buffer size is not optimally adjusted to the actual network
1774 * conditions at hand (delay bandwidth product). Setting the
1775 * buffer size too small limits throughput on links with high
1776 * bandwidth and high delay (eg. trans-continental/oceanic links).
1777 *
1778 * On the receive side the socket buffer memory is only rarely
1779 * used to any significant extent. This allows us to be much
1780 * more aggressive in scaling the receive socket buffer. For
1781 * the case that the buffer space is actually used to a large
1782 * extent and we run out of kernel memory we can simply drop
1783 * the new segments; TCP on the sender will just retransmit it
1784 * later. Setting the buffer size too big may only consume too
1785 * much kernel memory if the application doesn't read() from
1786 * the socket or packet loss or reordering makes use of the
1787 * reassembly queue.
1788 *
1789 * The criteria to step up the receive buffer one notch are:
1790 * 1. the number of bytes received during the time it takes
1791 * one timestamp to be reflected back to us (the RTT);
1792 * 2. received bytes per RTT is within seven eighth of the
1793 * current socket buffer size;
1794 * 3. receive buffer size has not hit maximal automatic size;
1795 *
1796 * This algorithm does one step per RTT at most and only if
1797 * we receive a bulk stream w/o packet losses or reorderings.
1798 * Shrinking the buffer during idle times is not necessary as
1799 * it doesn't consume any memory when idle.
1800 *
1801 * TODO: Only step up if the application is actually serving
1802 * the buffer to better manage the socket buffer resources.
1803 */
1804 if (V_tcp_do_autorcvbuf &&
1805 to.to_tsecr &&
1806 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1807 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) &&
1808 to.to_tsecr - tp->rfbuf_ts < hz) {
1809 if (tp->rfbuf_cnt >
1810 (so->so_rcv.sb_hiwat / 8 * 7) &&
1811 so->so_rcv.sb_hiwat <
1812 V_tcp_autorcvbuf_max) {
1813 newsize =
1814 min(so->so_rcv.sb_hiwat +
1815 V_tcp_autorcvbuf_inc,
1816 V_tcp_autorcvbuf_max);
1817 }
1818 /* Start over with next RTT. */
1819 tp->rfbuf_ts = 0;
1820 tp->rfbuf_cnt = 0;
1821 } else
1822 tp->rfbuf_cnt += tlen; /* add up */
1823 }
1824
1825 /* Add data to socket buffer. */
1826 SOCKBUF_LOCK(&so->so_rcv);
1827 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1828 m_freem(m);
1829 } else {
1830 /*
1831 * Set new socket buffer size.
1832 * Give up when limit is reached.
1833 */
1834 if (newsize)
1835 if (!sbreserve_locked(&so->so_rcv,
1836 newsize, so, NULL))
1837 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1838 m_adj(m, drop_hdrlen); /* delayed header drop */
1839 sbappendstream_locked(&so->so_rcv, m);
1840 }
1841 /* NB: sorwakeup_locked() does an implicit unlock. */
1842 sorwakeup_locked(so);
1843 if (DELAY_ACK(tp, tlen)) {
1844 tp->t_flags |= TF_DELACK;
1845 } else {
1846 tp->t_flags |= TF_ACKNOW;
1847 tcp_output(tp);
1848 }
1849 goto check_delack;
1850 }
1851 }
1852
1853 /*
1854 * Calculate amount of space in receive window,
1855 * and then do TCP input processing.
1856 * Receive window is amount of space in rcv queue,
1857 * but not less than advertised window.
1858 */
1859 win = sbspace(&so->so_rcv);
1860 if (win < 0)
1861 win = 0;
1862 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1863
1864 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1865 tp->rfbuf_ts = 0;
1866 tp->rfbuf_cnt = 0;
1867
1868 switch (tp->t_state) {
1869
1870 /*
1871 * If the state is SYN_RECEIVED:
1872 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1873 */
1874 case TCPS_SYN_RECEIVED:
1875 if ((thflags & TH_ACK) &&
1876 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1877 SEQ_GT(th->th_ack, tp->snd_max))) {
1878 rstreason = BANDLIM_RST_OPENPORT;
1879 goto dropwithreset;
1880 }
1881 break;
1882
1883 /*
1884 * If the state is SYN_SENT:
1885 * if seg contains an ACK, but not for our SYN, drop the input.
1886 * if seg contains a RST, then drop the connection.
1887 * if seg does not contain SYN, then drop it.
1888 * Otherwise this is an acceptable SYN segment
1889 * initialize tp->rcv_nxt and tp->irs
1890 * if seg contains ack then advance tp->snd_una
1891 * if seg contains an ECE and ECN support is enabled, the stream
1892 * is ECN capable.
1893 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1894 * arrange for segment to be acked (eventually)
1895 * continue processing rest of data/controls, beginning with URG
1896 */
1897 case TCPS_SYN_SENT:
1898 if ((thflags & TH_ACK) &&
1899 (SEQ_LEQ(th->th_ack, tp->iss) ||
1900 SEQ_GT(th->th_ack, tp->snd_max))) {
1901 rstreason = BANDLIM_UNLIMITED;
1902 goto dropwithreset;
1903 }
1904 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
1905 TCP_PROBE5(connect__refused, NULL, tp,
1906 mtod(m, const char *), tp, th);
1907 tp = tcp_drop(tp, ECONNREFUSED);
1908 }
1909 if (thflags & TH_RST)
1910 goto drop;
1911 if (!(thflags & TH_SYN))
1912 goto drop;
1913
1914 tp->irs = th->th_seq;
1915 tcp_rcvseqinit(tp);
1916 if (thflags & TH_ACK) {
1917 TCPSTAT_INC(tcps_connects);
1918 soisconnected(so);
1919 #ifdef MAC
1920 mac_socketpeer_set_from_mbuf(m, so);
1921 #endif
1922 /* Do window scaling on this connection? */
1923 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1924 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1925 tp->rcv_scale = tp->request_r_scale;
1926 }
1927 tp->rcv_adv += imin(tp->rcv_wnd,
1928 TCP_MAXWIN << tp->rcv_scale);
1929 tp->snd_una++; /* SYN is acked */
1930 /*
1931 * If there's data, delay ACK; if there's also a FIN
1932 * ACKNOW will be turned on later.
1933 */
1934 if (DELAY_ACK(tp, tlen) && tlen != 0)
1935 tcp_timer_activate(tp, TT_DELACK,
1936 tcp_delacktime);
1937 else
1938 tp->t_flags |= TF_ACKNOW;
1939
1940 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
1941 tp->t_flags |= TF_ECN_PERMIT;
1942 TCPSTAT_INC(tcps_ecn_shs);
1943 }
1944
1945 /*
1946 * Received <SYN,ACK> in SYN_SENT[*] state.
1947 * Transitions:
1948 * SYN_SENT --> ESTABLISHED
1949 * SYN_SENT* --> FIN_WAIT_1
1950 */
1951 tp->t_starttime = ticks;
1952 if (tp->t_flags & TF_NEEDFIN) {
1953 tcp_state_change(tp, TCPS_FIN_WAIT_1);
1954 tp->t_flags &= ~TF_NEEDFIN;
1955 thflags &= ~TH_SYN;
1956 } else {
1957 tcp_state_change(tp, TCPS_ESTABLISHED);
1958 TCP_PROBE5(connect__established, NULL, tp,
1959 mtod(m, const char *), tp, th);
1960 cc_conn_init(tp);
1961 tcp_timer_activate(tp, TT_KEEP,
1962 TP_KEEPIDLE(tp));
1963 }
1964 } else {
1965 /*
1966 * Received initial SYN in SYN-SENT[*] state =>
1967 * simultaneous open. If segment contains CC option
1968 * and there is a cached CC, apply TAO test.
1969 * If it succeeds, connection is * half-synchronized.
1970 * Otherwise, do 3-way handshake:
1971 * SYN-SENT -> SYN-RECEIVED
1972 * SYN-SENT* -> SYN-RECEIVED*
1973 * If there was no CC option, clear cached CC value.
1974 */
1975 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1976 tcp_timer_activate(tp, TT_REXMT, 0);
1977 tcp_state_change(tp, TCPS_SYN_RECEIVED);
1978 }
1979
1980 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: "
1981 "ti_locked %d", __func__, ti_locked));
1982 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1983 INP_WLOCK_ASSERT(tp->t_inpcb);
1984
1985 /*
1986 * Advance th->th_seq to correspond to first data byte.
1987 * If data, trim to stay within window,
1988 * dropping FIN if necessary.
1989 */
1990 th->th_seq++;
1991 if (tlen > tp->rcv_wnd) {
1992 todrop = tlen - tp->rcv_wnd;
1993 m_adj(m, -todrop);
1994 tlen = tp->rcv_wnd;
1995 thflags &= ~TH_FIN;
1996 TCPSTAT_INC(tcps_rcvpackafterwin);
1997 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
1998 }
1999 tp->snd_wl1 = th->th_seq - 1;
2000 tp->rcv_up = th->th_seq;
2001 /*
2002 * Client side of transaction: already sent SYN and data.
2003 * If the remote host used T/TCP to validate the SYN,
2004 * our data will be ACK'd; if so, enter normal data segment
2005 * processing in the middle of step 5, ack processing.
2006 * Otherwise, goto step 6.
2007 */
2008 if (thflags & TH_ACK)
2009 goto process_ACK;
2010
2011 goto step6;
2012
2013 /*
2014 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2015 * do normal processing.
2016 *
2017 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2018 */
2019 case TCPS_LAST_ACK:
2020 case TCPS_CLOSING:
2021 break; /* continue normal processing */
2022 }
2023
2024 /*
2025 * States other than LISTEN or SYN_SENT.
2026 * First check the RST flag and sequence number since reset segments
2027 * are exempt from the timestamp and connection count tests. This
2028 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2029 * below which allowed reset segments in half the sequence space
2030 * to fall though and be processed (which gives forged reset
2031 * segments with a random sequence number a 50 percent chance of
2032 * killing a connection).
2033 * Then check timestamp, if present.
2034 * Then check the connection count, if present.
2035 * Then check that at least some bytes of segment are within
2036 * receive window. If segment begins before rcv_nxt,
2037 * drop leading data (and SYN); if nothing left, just ack.
2038 *
2039 *
2040 * If the RST bit is set, check the sequence number to see
2041 * if this is a valid reset segment.
2042 * RFC 793 page 37:
2043 * In all states except SYN-SENT, all reset (RST) segments
2044 * are validated by checking their SEQ-fields. A reset is
2045 * valid if its sequence number is in the window.
2046 * Note: this does not take into account delayed ACKs, so
2047 * we should test against last_ack_sent instead of rcv_nxt.
2048 * The sequence number in the reset segment is normally an
2049 * echo of our outgoing acknowlegement numbers, but some hosts
2050 * send a reset with the sequence number at the rightmost edge
2051 * of our receive window, and we have to handle this case.
2052 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
2053 * that brute force RST attacks are possible. To combat this,
2054 * we use a much stricter check while in the ESTABLISHED state,
2055 * only accepting RSTs where the sequence number is equal to
2056 * last_ack_sent. In all other states (the states in which a
2057 * RST is more likely), the more permissive check is used.
2058 * If we have multiple segments in flight, the initial reset
2059 * segment sequence numbers will be to the left of last_ack_sent,
2060 * but they will eventually catch up.
2061 * In any case, it never made sense to trim reset segments to
2062 * fit the receive window since RFC 1122 says:
2063 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
2064 *
2065 * A TCP SHOULD allow a received RST segment to include data.
2066 *
2067 * DISCUSSION
2068 * It has been suggested that a RST segment could contain
2069 * ASCII text that encoded and explained the cause of the
2070 * RST. No standard has yet been established for such
2071 * data.
2072 *
2073 * If the reset segment passes the sequence number test examine
2074 * the state:
2075 * SYN_RECEIVED STATE:
2076 * If passive open, return to LISTEN state.
2077 * If active open, inform user that connection was refused.
2078 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
2079 * Inform user that connection was reset, and close tcb.
2080 * CLOSING, LAST_ACK STATES:
2081 * Close the tcb.
2082 * TIME_WAIT STATE:
2083 * Drop the segment - see Stevens, vol. 2, p. 964 and
2084 * RFC 1337.
2085 */
2086 if (thflags & TH_RST) {
2087 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2088 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2089 switch (tp->t_state) {
2090
2091 case TCPS_SYN_RECEIVED:
2092 so->so_error = ECONNREFUSED;
2093 goto close;
2094
2095 case TCPS_ESTABLISHED:
2096 if (V_tcp_insecure_rst == 0 &&
2097 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
2098 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
2099 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2100 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
2101 TCPSTAT_INC(tcps_badrst);
2102 goto drop;
2103 }
2104 /* FALLTHROUGH */
2105 case TCPS_FIN_WAIT_1:
2106 case TCPS_FIN_WAIT_2:
2107 case TCPS_CLOSE_WAIT:
2108 so->so_error = ECONNRESET;
2109 close:
2110 KASSERT(ti_locked == TI_WLOCKED,
2111 ("tcp_do_segment: TH_RST 1 ti_locked %d",
2112 ti_locked));
2113 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2114
2115 tcp_state_change(tp, TCPS_CLOSED);
2116 TCPSTAT_INC(tcps_drops);
2117 tp = tcp_close(tp);
2118 break;
2119
2120 case TCPS_CLOSING:
2121 case TCPS_LAST_ACK:
2122 KASSERT(ti_locked == TI_WLOCKED,
2123 ("tcp_do_segment: TH_RST 2 ti_locked %d",
2124 ti_locked));
2125 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2126
2127 tp = tcp_close(tp);
2128 break;
2129 }
2130 }
2131 goto drop;
2132 }
2133
2134 /*
2135 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2136 * and it's less than ts_recent, drop it.
2137 */
2138 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2139 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2140
2141 /* Check to see if ts_recent is over 24 days old. */
2142 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2143 /*
2144 * Invalidate ts_recent. If this segment updates
2145 * ts_recent, the age will be reset later and ts_recent
2146 * will get a valid value. If it does not, setting
2147 * ts_recent to zero will at least satisfy the
2148 * requirement that zero be placed in the timestamp
2149 * echo reply when ts_recent isn't valid. The
2150 * age isn't reset until we get a valid ts_recent
2151 * because we don't want out-of-order segments to be
2152 * dropped when ts_recent is old.
2153 */
2154 tp->ts_recent = 0;
2155 } else {
2156 TCPSTAT_INC(tcps_rcvduppack);
2157 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2158 TCPSTAT_INC(tcps_pawsdrop);
2159 if (tlen)
2160 goto dropafterack;
2161 goto drop;
2162 }
2163 }
2164
2165 /*
2166 * In the SYN-RECEIVED state, validate that the packet belongs to
2167 * this connection before trimming the data to fit the receive
2168 * window. Check the sequence number versus IRS since we know
2169 * the sequence numbers haven't wrapped. This is a partial fix
2170 * for the "LAND" DoS attack.
2171 */
2172 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2173 rstreason = BANDLIM_RST_OPENPORT;
2174 goto dropwithreset;
2175 }
2176
2177 todrop = tp->rcv_nxt - th->th_seq;
2178 if (todrop > 0) {
2179 if (thflags & TH_SYN) {
2180 thflags &= ~TH_SYN;
2181 th->th_seq++;
2182 if (th->th_urp > 1)
2183 th->th_urp--;
2184 else
2185 thflags &= ~TH_URG;
2186 todrop--;
2187 }
2188 /*
2189 * Following if statement from Stevens, vol. 2, p. 960.
2190 */
2191 if (todrop > tlen
2192 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2193 /*
2194 * Any valid FIN must be to the left of the window.
2195 * At this point the FIN must be a duplicate or out
2196 * of sequence; drop it.
2197 */
2198 thflags &= ~TH_FIN;
2199
2200 /*
2201 * Send an ACK to resynchronize and drop any data.
2202 * But keep on processing for RST or ACK.
2203 */
2204 tp->t_flags |= TF_ACKNOW;
2205 todrop = tlen;
2206 TCPSTAT_INC(tcps_rcvduppack);
2207 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2208 } else {
2209 TCPSTAT_INC(tcps_rcvpartduppack);
2210 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2211 }
2212 drop_hdrlen += todrop; /* drop from the top afterwards */
2213 th->th_seq += todrop;
2214 tlen -= todrop;
2215 if (th->th_urp > todrop)
2216 th->th_urp -= todrop;
2217 else {
2218 thflags &= ~TH_URG;
2219 th->th_urp = 0;
2220 }
2221 }
2222
2223 /*
2224 * If new data are received on a connection after the
2225 * user processes are gone, then RST the other end.
2226 */
2227 if ((so->so_state & SS_NOFDREF) &&
2228 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2229 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && "
2230 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2231 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2232
2233 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2234 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2235 "after socket was closed, "
2236 "sending RST and removing tcpcb\n",
2237 s, __func__, tcpstates[tp->t_state], tlen);
2238 free(s, M_TCPLOG);
2239 }
2240 tp = tcp_close(tp);
2241 TCPSTAT_INC(tcps_rcvafterclose);
2242 rstreason = BANDLIM_UNLIMITED;
2243 goto dropwithreset;
2244 }
2245
2246 /*
2247 * If segment ends after window, drop trailing data
2248 * (and PUSH and FIN); if nothing left, just ACK.
2249 */
2250 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2251 if (todrop > 0) {
2252 TCPSTAT_INC(tcps_rcvpackafterwin);
2253 if (todrop >= tlen) {
2254 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2255 /*
2256 * If window is closed can only take segments at
2257 * window edge, and have to drop data and PUSH from
2258 * incoming segments. Continue processing, but
2259 * remember to ack. Otherwise, drop segment
2260 * and ack.
2261 */
2262 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2263 tp->t_flags |= TF_ACKNOW;
2264 TCPSTAT_INC(tcps_rcvwinprobe);
2265 } else
2266 goto dropafterack;
2267 } else
2268 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2269 m_adj(m, -todrop);
2270 tlen -= todrop;
2271 thflags &= ~(TH_PUSH|TH_FIN);
2272 }
2273
2274 /*
2275 * If last ACK falls within this segment's sequence numbers,
2276 * record its timestamp.
2277 * NOTE:
2278 * 1) That the test incorporates suggestions from the latest
2279 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2280 * 2) That updating only on newer timestamps interferes with
2281 * our earlier PAWS tests, so this check should be solely
2282 * predicated on the sequence space of this segment.
2283 * 3) That we modify the segment boundary check to be
2284 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2285 * instead of RFC1323's
2286 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2287 * This modified check allows us to overcome RFC1323's
2288 * limitations as described in Stevens TCP/IP Illustrated
2289 * Vol. 2 p.869. In such cases, we can still calculate the
2290 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2291 */
2292 if ((to.to_flags & TOF_TS) != 0 &&
2293 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2294 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2295 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2296 tp->ts_recent_age = tcp_ts_getticks();
2297 tp->ts_recent = to.to_tsval;
2298 }
2299
2300 /*
2301 * If a SYN is in the window, then this is an
2302 * error and we send an RST and drop the connection.
2303 */
2304 if (thflags & TH_SYN) {
2305 KASSERT(ti_locked == TI_WLOCKED,
2306 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2307 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2308
2309 tp = tcp_drop(tp, ECONNRESET);
2310 rstreason = BANDLIM_UNLIMITED;
2311 goto drop;
2312 }
2313
2314 /*
2315 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2316 * flag is on (half-synchronized state), then queue data for
2317 * later processing; else drop segment and return.
2318 */
2319 if ((thflags & TH_ACK) == 0) {
2320 if (tp->t_state == TCPS_SYN_RECEIVED ||
2321 (tp->t_flags & TF_NEEDSYN))
2322 goto step6;
2323 else if (tp->t_flags & TF_ACKNOW)
2324 goto dropafterack;
2325 else
2326 goto drop;
2327 }
2328
2329 /*
2330 * Ack processing.
2331 */
2332 switch (tp->t_state) {
2333
2334 /*
2335 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2336 * ESTABLISHED state and continue processing.
2337 * The ACK was checked above.
2338 */
2339 case TCPS_SYN_RECEIVED:
2340
2341 TCPSTAT_INC(tcps_connects);
2342 soisconnected(so);
2343 /* Do window scaling? */
2344 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2345 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2346 tp->rcv_scale = tp->request_r_scale;
2347 tp->snd_wnd = tiwin;
2348 }
2349 /*
2350 * Make transitions:
2351 * SYN-RECEIVED -> ESTABLISHED
2352 * SYN-RECEIVED* -> FIN-WAIT-1
2353 */
2354 tp->t_starttime = ticks;
2355 if (tp->t_flags & TF_NEEDFIN) {
2356 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2357 tp->t_flags &= ~TF_NEEDFIN;
2358 } else {
2359 tcp_state_change(tp, TCPS_ESTABLISHED);
2360 TCP_PROBE5(accept__established, NULL, tp,
2361 mtod(m, const char *), tp, th);
2362 cc_conn_init(tp);
2363 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2364 }
2365 /*
2366 * If segment contains data or ACK, will call tcp_reass()
2367 * later; if not, do so now to pass queued data to user.
2368 */
2369 if (tlen == 0 && (thflags & TH_FIN) == 0)
2370 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2371 (struct mbuf *)0);
2372 tp->snd_wl1 = th->th_seq - 1;
2373 /* FALLTHROUGH */
2374
2375 /*
2376 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2377 * ACKs. If the ack is in the range
2378 * tp->snd_una < th->th_ack <= tp->snd_max
2379 * then advance tp->snd_una to th->th_ack and drop
2380 * data from the retransmission queue. If this ACK reflects
2381 * more up to date window information we update our window information.
2382 */
2383 case TCPS_ESTABLISHED:
2384 case TCPS_FIN_WAIT_1:
2385 case TCPS_FIN_WAIT_2:
2386 case TCPS_CLOSE_WAIT:
2387 case TCPS_CLOSING:
2388 case TCPS_LAST_ACK:
2389 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2390 TCPSTAT_INC(tcps_rcvacktoomuch);
2391 goto dropafterack;
2392 }
2393 if ((tp->t_flags & TF_SACK_PERMIT) &&
2394 ((to.to_flags & TOF_SACK) ||
2395 !TAILQ_EMPTY(&tp->snd_holes)))
2396 tcp_sack_doack(tp, &to, th->th_ack);
2397
2398 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2399 hhook_run_tcp_est_in(tp, th, &to);
2400
2401 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2402 if (tlen == 0 && tiwin == tp->snd_wnd) {
2403 TCPSTAT_INC(tcps_rcvdupack);
2404 /*
2405 * If we have outstanding data (other than
2406 * a window probe), this is a completely
2407 * duplicate ack (ie, window info didn't
2408 * change), the ack is the biggest we've
2409 * seen and we've seen exactly our rexmt
2410 * threshhold of them, assume a packet
2411 * has been dropped and retransmit it.
2412 * Kludge snd_nxt & the congestion
2413 * window so we send only this one
2414 * packet.
2415 *
2416 * We know we're losing at the current
2417 * window size so do congestion avoidance
2418 * (set ssthresh to half the current window
2419 * and pull our congestion window back to
2420 * the new ssthresh).
2421 *
2422 * Dup acks mean that packets have left the
2423 * network (they're now cached at the receiver)
2424 * so bump cwnd by the amount in the receiver
2425 * to keep a constant cwnd packets in the
2426 * network.
2427 *
2428 * When using TCP ECN, notify the peer that
2429 * we reduced the cwnd.
2430 */
2431 if (!tcp_timer_active(tp, TT_REXMT) ||
2432 th->th_ack != tp->snd_una)
2433 tp->t_dupacks = 0;
2434 else if (++tp->t_dupacks > tcprexmtthresh ||
2435 IN_FASTRECOVERY(tp->t_flags)) {
2436 cc_ack_received(tp, th, CC_DUPACK);
2437 if ((tp->t_flags & TF_SACK_PERMIT) &&
2438 IN_FASTRECOVERY(tp->t_flags)) {
2439 int awnd;
2440
2441 /*
2442 * Compute the amount of data in flight first.
2443 * We can inject new data into the pipe iff
2444 * we have less than 1/2 the original window's
2445 * worth of data in flight.
2446 */
2447 awnd = (tp->snd_nxt - tp->snd_fack) +
2448 tp->sackhint.sack_bytes_rexmit;
2449 if (awnd < tp->snd_ssthresh) {
2450 tp->snd_cwnd += tp->t_maxseg;
2451 if (tp->snd_cwnd > tp->snd_ssthresh)
2452 tp->snd_cwnd = tp->snd_ssthresh;
2453 }
2454 } else
2455 tp->snd_cwnd += tp->t_maxseg;
2456 if ((thflags & TH_FIN) &&
2457 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2458 /*
2459 * If its a fin we need to process
2460 * it to avoid a race where both
2461 * sides enter FIN-WAIT and send FIN|ACK
2462 * at the same time.
2463 */
2464 break;
2465 }
2466 (void) tcp_output(tp);
2467 goto drop;
2468 } else if (tp->t_dupacks == tcprexmtthresh) {
2469 tcp_seq onxt = tp->snd_nxt;
2470
2471 /*
2472 * If we're doing sack, check to
2473 * see if we're already in sack
2474 * recovery. If we're not doing sack,
2475 * check to see if we're in newreno
2476 * recovery.
2477 */
2478 if (tp->t_flags & TF_SACK_PERMIT) {
2479 if (IN_FASTRECOVERY(tp->t_flags)) {
2480 tp->t_dupacks = 0;
2481 break;
2482 }
2483 } else {
2484 if (SEQ_LEQ(th->th_ack,
2485 tp->snd_recover)) {
2486 tp->t_dupacks = 0;
2487 break;
2488 }
2489 }
2490 /* Congestion signal before ack. */
2491 cc_cong_signal(tp, th, CC_NDUPACK);
2492 cc_ack_received(tp, th, CC_DUPACK);
2493 tcp_timer_activate(tp, TT_REXMT, 0);
2494 tp->t_rtttime = 0;
2495 if (tp->t_flags & TF_SACK_PERMIT) {
2496 TCPSTAT_INC(
2497 tcps_sack_recovery_episode);
2498 tp->sack_newdata = tp->snd_nxt;
2499 tp->snd_cwnd = tp->t_maxseg;
2500 (void) tcp_output(tp);
2501 goto drop;
2502 }
2503 tp->snd_nxt = th->th_ack;
2504 tp->snd_cwnd = tp->t_maxseg;
2505 if ((thflags & TH_FIN) &&
2506 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2507 /*
2508 * If its a fin we need to process
2509 * it to avoid a race where both
2510 * sides enter FIN-WAIT and send FIN|ACK
2511 * at the same time.
2512 */
2513 break;
2514 }
2515 (void) tcp_output(tp);
2516 KASSERT(tp->snd_limited <= 2,
2517 ("%s: tp->snd_limited too big",
2518 __func__));
2519 tp->snd_cwnd = tp->snd_ssthresh +
2520 tp->t_maxseg *
2521 (tp->t_dupacks - tp->snd_limited);
2522 if (SEQ_GT(onxt, tp->snd_nxt))
2523 tp->snd_nxt = onxt;
2524 goto drop;
2525 } else if (V_tcp_do_rfc3042) {
2526 cc_ack_received(tp, th, CC_DUPACK);
2527 u_long oldcwnd = tp->snd_cwnd;
2528 tcp_seq oldsndmax = tp->snd_max;
2529 u_int sent;
2530 int avail;
2531
2532 KASSERT(tp->t_dupacks == 1 ||
2533 tp->t_dupacks == 2,
2534 ("%s: dupacks not 1 or 2",
2535 __func__));
2536 if (tp->t_dupacks == 1)
2537 tp->snd_limited = 0;
2538 tp->snd_cwnd =
2539 (tp->snd_nxt - tp->snd_una) +
2540 (tp->t_dupacks - tp->snd_limited) *
2541 tp->t_maxseg;
2542 if ((thflags & TH_FIN) &&
2543 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2544 /*
2545 * If its a fin we need to process
2546 * it to avoid a race where both
2547 * sides enter FIN-WAIT and send FIN|ACK
2548 * at the same time.
2549 */
2550 break;
2551 }
2552 /*
2553 * Only call tcp_output when there
2554 * is new data available to be sent.
2555 * Otherwise we would send pure ACKs.
2556 */
2557 SOCKBUF_LOCK(&so->so_snd);
2558 avail = so->so_snd.sb_cc -
2559 (tp->snd_nxt - tp->snd_una);
2560 SOCKBUF_UNLOCK(&so->so_snd);
2561 if (avail > 0)
2562 (void) tcp_output(tp);
2563 sent = tp->snd_max - oldsndmax;
2564 if (sent > tp->t_maxseg) {
2565 KASSERT((tp->t_dupacks == 2 &&
2566 tp->snd_limited == 0) ||
2567 (sent == tp->t_maxseg + 1 &&
2568 tp->t_flags & TF_SENTFIN),
2569 ("%s: sent too much",
2570 __func__));
2571 tp->snd_limited = 2;
2572 } else if (sent > 0)
2573 ++tp->snd_limited;
2574 tp->snd_cwnd = oldcwnd;
2575 goto drop;
2576 }
2577 } else
2578 tp->t_dupacks = 0;
2579 break;
2580 }
2581
2582 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2583 ("%s: th_ack <= snd_una", __func__));
2584
2585 /*
2586 * If the congestion window was inflated to account
2587 * for the other side's cached packets, retract it.
2588 */
2589 if (IN_FASTRECOVERY(tp->t_flags)) {
2590 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2591 if (tp->t_flags & TF_SACK_PERMIT)
2592 tcp_sack_partialack(tp, th);
2593 else
2594 tcp_newreno_partial_ack(tp, th);
2595 } else
2596 cc_post_recovery(tp, th);
2597 }
2598 tp->t_dupacks = 0;
2599 /*
2600 * If we reach this point, ACK is not a duplicate,
2601 * i.e., it ACKs something we sent.
2602 */
2603 if (tp->t_flags & TF_NEEDSYN) {
2604 /*
2605 * T/TCP: Connection was half-synchronized, and our
2606 * SYN has been ACK'd (so connection is now fully
2607 * synchronized). Go to non-starred state,
2608 * increment snd_una for ACK of SYN, and check if
2609 * we can do window scaling.
2610 */
2611 tp->t_flags &= ~TF_NEEDSYN;
2612 tp->snd_una++;
2613 /* Do window scaling? */
2614 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2615 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2616 tp->rcv_scale = tp->request_r_scale;
2617 /* Send window already scaled. */
2618 }
2619 }
2620
2621 process_ACK:
2622 INP_WLOCK_ASSERT(tp->t_inpcb);
2623
2624 acked = BYTES_THIS_ACK(tp, th);
2625 TCPSTAT_INC(tcps_rcvackpack);
2626 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2627
2628 /*
2629 * If we just performed our first retransmit, and the ACK
2630 * arrives within our recovery window, then it was a mistake
2631 * to do the retransmit in the first place. Recover our
2632 * original cwnd and ssthresh, and proceed to transmit where
2633 * we left off.
2634 */
2635 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2636 (int)(ticks - tp->t_badrxtwin) < 0)
2637 cc_cong_signal(tp, th, CC_RTO_ERR);
2638
2639 /*
2640 * If we have a timestamp reply, update smoothed
2641 * round trip time. If no timestamp is present but
2642 * transmit timer is running and timed sequence
2643 * number was acked, update smoothed round trip time.
2644 * Since we now have an rtt measurement, cancel the
2645 * timer backoff (cf., Phil Karn's retransmit alg.).
2646 * Recompute the initial retransmit timer.
2647 *
2648 * Some boxes send broken timestamp replies
2649 * during the SYN+ACK phase, ignore
2650 * timestamps of 0 or we could calculate a
2651 * huge RTT and blow up the retransmit timer.
2652 */
2653 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2654 u_int t;
2655
2656 t = tcp_ts_getticks() - to.to_tsecr;
2657 if (!tp->t_rttlow || tp->t_rttlow > t)
2658 tp->t_rttlow = t;
2659 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2660 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2661 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2662 tp->t_rttlow = ticks - tp->t_rtttime;
2663 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2664 }
2665
2666 /*
2667 * If all outstanding data is acked, stop retransmit
2668 * timer and remember to restart (more output or persist).
2669 * If there is more data to be acked, restart retransmit
2670 * timer, using current (possibly backed-off) value.
2671 */
2672 if (th->th_ack == tp->snd_max) {
2673 tcp_timer_activate(tp, TT_REXMT, 0);
2674 needoutput = 1;
2675 } else if (!tcp_timer_active(tp, TT_PERSIST))
2676 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2677
2678 /*
2679 * If no data (only SYN) was ACK'd,
2680 * skip rest of ACK processing.
2681 */
2682 if (acked == 0)
2683 goto step6;
2684
2685 /*
2686 * Let the congestion control algorithm update congestion
2687 * control related information. This typically means increasing
2688 * the congestion window.
2689 */
2690 cc_ack_received(tp, th, CC_ACK);
2691
2692 SOCKBUF_LOCK(&so->so_snd);
2693 if (acked > so->so_snd.sb_cc) {
2694 tp->snd_wnd -= so->so_snd.sb_cc;
2695 mfree = sbcut_locked(&so->so_snd,
2696 (int)so->so_snd.sb_cc);
2697 ourfinisacked = 1;
2698 } else {
2699 mfree = sbcut_locked(&so->so_snd, acked);
2700 tp->snd_wnd -= acked;
2701 ourfinisacked = 0;
2702 }
2703 /* NB: sowwakeup_locked() does an implicit unlock. */
2704 sowwakeup_locked(so);
2705 m_freem(mfree);
2706 /* Detect una wraparound. */
2707 if (!IN_RECOVERY(tp->t_flags) &&
2708 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2709 SEQ_LEQ(th->th_ack, tp->snd_recover))
2710 tp->snd_recover = th->th_ack - 1;
2711 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2712 if (IN_RECOVERY(tp->t_flags) &&
2713 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2714 EXIT_RECOVERY(tp->t_flags);
2715 }
2716 tp->snd_una = th->th_ack;
2717 if (tp->t_flags & TF_SACK_PERMIT) {
2718 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2719 tp->snd_recover = tp->snd_una;
2720 }
2721 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2722 tp->snd_nxt = tp->snd_una;
2723
2724 switch (tp->t_state) {
2725
2726 /*
2727 * In FIN_WAIT_1 STATE in addition to the processing
2728 * for the ESTABLISHED state if our FIN is now acknowledged
2729 * then enter FIN_WAIT_2.
2730 */
2731 case TCPS_FIN_WAIT_1:
2732 if (ourfinisacked) {
2733 /*
2734 * If we can't receive any more
2735 * data, then closing user can proceed.
2736 * Starting the timer is contrary to the
2737 * specification, but if we don't get a FIN
2738 * we'll hang forever.
2739 *
2740 * XXXjl:
2741 * we should release the tp also, and use a
2742 * compressed state.
2743 */
2744 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2745 soisdisconnected(so);
2746 tcp_timer_activate(tp, TT_2MSL,
2747 (tcp_fast_finwait2_recycle ?
2748 tcp_finwait2_timeout :
2749 TP_MAXIDLE(tp)));
2750 }
2751 tcp_state_change(tp, TCPS_FIN_WAIT_2);
2752 }
2753 break;
2754
2755 /*
2756 * In CLOSING STATE in addition to the processing for
2757 * the ESTABLISHED state if the ACK acknowledges our FIN
2758 * then enter the TIME-WAIT state, otherwise ignore
2759 * the segment.
2760 */
2761 case TCPS_CLOSING:
2762 if (ourfinisacked) {
2763 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2764 tcp_twstart(tp);
2765 INP_INFO_WUNLOCK(&V_tcbinfo);
2766 m_freem(m);
2767 return;
2768 }
2769 break;
2770
2771 /*
2772 * In LAST_ACK, we may still be waiting for data to drain
2773 * and/or to be acked, as well as for the ack of our FIN.
2774 * If our FIN is now acknowledged, delete the TCB,
2775 * enter the closed state and return.
2776 */
2777 case TCPS_LAST_ACK:
2778 if (ourfinisacked) {
2779 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2780 tp = tcp_close(tp);
2781 goto drop;
2782 }
2783 break;
2784 }
2785 }
2786
2787 step6:
2788 INP_WLOCK_ASSERT(tp->t_inpcb);
2789
2790 /*
2791 * Update window information.
2792 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2793 */
2794 if ((thflags & TH_ACK) &&
2795 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2796 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2797 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2798 /* keep track of pure window updates */
2799 if (tlen == 0 &&
2800 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2801 TCPSTAT_INC(tcps_rcvwinupd);
2802 tp->snd_wnd = tiwin;
2803 tp->snd_wl1 = th->th_seq;
2804 tp->snd_wl2 = th->th_ack;
2805 if (tp->snd_wnd > tp->max_sndwnd)
2806 tp->max_sndwnd = tp->snd_wnd;
2807 needoutput = 1;
2808 }
2809
2810 /*
2811 * Process segments with URG.
2812 */
2813 if ((thflags & TH_URG) && th->th_urp &&
2814 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2815 /*
2816 * This is a kludge, but if we receive and accept
2817 * random urgent pointers, we'll crash in
2818 * soreceive. It's hard to imagine someone
2819 * actually wanting to send this much urgent data.
2820 */
2821 SOCKBUF_LOCK(&so->so_rcv);
2822 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2823 th->th_urp = 0; /* XXX */
2824 thflags &= ~TH_URG; /* XXX */
2825 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2826 goto dodata; /* XXX */
2827 }
2828 /*
2829 * If this segment advances the known urgent pointer,
2830 * then mark the data stream. This should not happen
2831 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2832 * a FIN has been received from the remote side.
2833 * In these states we ignore the URG.
2834 *
2835 * According to RFC961 (Assigned Protocols),
2836 * the urgent pointer points to the last octet
2837 * of urgent data. We continue, however,
2838 * to consider it to indicate the first octet
2839 * of data past the urgent section as the original
2840 * spec states (in one of two places).
2841 */
2842 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2843 tp->rcv_up = th->th_seq + th->th_urp;
2844 so->so_oobmark = so->so_rcv.sb_cc +
2845 (tp->rcv_up - tp->rcv_nxt) - 1;
2846 if (so->so_oobmark == 0)
2847 so->so_rcv.sb_state |= SBS_RCVATMARK;
2848 sohasoutofband(so);
2849 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2850 }
2851 SOCKBUF_UNLOCK(&so->so_rcv);
2852 /*
2853 * Remove out of band data so doesn't get presented to user.
2854 * This can happen independent of advancing the URG pointer,
2855 * but if two URG's are pending at once, some out-of-band
2856 * data may creep in... ick.
2857 */
2858 if (th->th_urp <= (u_long)tlen &&
2859 !(so->so_options & SO_OOBINLINE)) {
2860 /* hdr drop is delayed */
2861 tcp_pulloutofband(so, th, m, drop_hdrlen);
2862 }
2863 } else {
2864 /*
2865 * If no out of band data is expected,
2866 * pull receive urgent pointer along
2867 * with the receive window.
2868 */
2869 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2870 tp->rcv_up = tp->rcv_nxt;
2871 }
2872 dodata: /* XXX */
2873 INP_WLOCK_ASSERT(tp->t_inpcb);
2874
2875 /*
2876 * Process the segment text, merging it into the TCP sequencing queue,
2877 * and arranging for acknowledgment of receipt if necessary.
2878 * This process logically involves adjusting tp->rcv_wnd as data
2879 * is presented to the user (this happens in tcp_usrreq.c,
2880 * case PRU_RCVD). If a FIN has already been received on this
2881 * connection then we just ignore the text.
2882 */
2883 if ((tlen || (thflags & TH_FIN)) &&
2884 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2885 tcp_seq save_start = th->th_seq;
2886 m_adj(m, drop_hdrlen); /* delayed header drop */
2887 /*
2888 * Insert segment which includes th into TCP reassembly queue
2889 * with control block tp. Set thflags to whether reassembly now
2890 * includes a segment with FIN. This handles the common case
2891 * inline (segment is the next to be received on an established
2892 * connection, and the queue is empty), avoiding linkage into
2893 * and removal from the queue and repetition of various
2894 * conversions.
2895 * Set DELACK for segments received in order, but ack
2896 * immediately when segments are out of order (so
2897 * fast retransmit can work).
2898 */
2899 if (th->th_seq == tp->rcv_nxt &&
2900 LIST_EMPTY(&tp->t_segq) &&
2901 TCPS_HAVEESTABLISHED(tp->t_state)) {
2902 if (DELAY_ACK(tp, tlen))
2903 tp->t_flags |= TF_DELACK;
2904 else
2905 tp->t_flags |= TF_ACKNOW;
2906 tp->rcv_nxt += tlen;
2907 thflags = th->th_flags & TH_FIN;
2908 TCPSTAT_INC(tcps_rcvpack);
2909 TCPSTAT_ADD(tcps_rcvbyte, tlen);
2910 ND6_HINT(tp);
2911 SOCKBUF_LOCK(&so->so_rcv);
2912 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2913 m_freem(m);
2914 else
2915 sbappendstream_locked(&so->so_rcv, m);
2916 /* NB: sorwakeup_locked() does an implicit unlock. */
2917 sorwakeup_locked(so);
2918 } else {
2919 /*
2920 * XXX: Due to the header drop above "th" is
2921 * theoretically invalid by now. Fortunately
2922 * m_adj() doesn't actually frees any mbufs
2923 * when trimming from the head.
2924 */
2925 thflags = tcp_reass(tp, th, &tlen, m);
2926 tp->t_flags |= TF_ACKNOW;
2927 }
2928 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2929 tcp_update_sack_list(tp, save_start, save_start + tlen);
2930 #if 0
2931 /*
2932 * Note the amount of data that peer has sent into
2933 * our window, in order to estimate the sender's
2934 * buffer size.
2935 * XXX: Unused.
2936 */
2937 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
2938 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2939 else
2940 len = so->so_rcv.sb_hiwat;
2941 #endif
2942 } else {
2943 m_freem(m);
2944 thflags &= ~TH_FIN;
2945 }
2946
2947 /*
2948 * If FIN is received ACK the FIN and let the user know
2949 * that the connection is closing.
2950 */
2951 if (thflags & TH_FIN) {
2952 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2953 socantrcvmore(so);
2954 /*
2955 * If connection is half-synchronized
2956 * (ie NEEDSYN flag on) then delay ACK,
2957 * so it may be piggybacked when SYN is sent.
2958 * Otherwise, since we received a FIN then no
2959 * more input can be expected, send ACK now.
2960 */
2961 if (tp->t_flags & TF_NEEDSYN)
2962 tp->t_flags |= TF_DELACK;
2963 else
2964 tp->t_flags |= TF_ACKNOW;
2965 tp->rcv_nxt++;
2966 }
2967 switch (tp->t_state) {
2968
2969 /*
2970 * In SYN_RECEIVED and ESTABLISHED STATES
2971 * enter the CLOSE_WAIT state.
2972 */
2973 case TCPS_SYN_RECEIVED:
2974 tp->t_starttime = ticks;
2975 /* FALLTHROUGH */
2976 case TCPS_ESTABLISHED:
2977 tcp_state_change(tp, TCPS_CLOSE_WAIT);
2978 break;
2979
2980 /*
2981 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2982 * enter the CLOSING state.
2983 */
2984 case TCPS_FIN_WAIT_1:
2985 tcp_state_change(tp, TCPS_CLOSING);
2986 break;
2987
2988 /*
2989 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2990 * starting the time-wait timer, turning off the other
2991 * standard timers.
2992 */
2993 case TCPS_FIN_WAIT_2:
2994 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2995 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata "
2996 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
2997 ti_locked));
2998
2999 tcp_twstart(tp);
3000 INP_INFO_WUNLOCK(&V_tcbinfo);
3001 return;
3002 }
3003 }
3004 if (ti_locked == TI_WLOCKED)
3005 INP_INFO_WUNLOCK(&V_tcbinfo);
3006 ti_locked = TI_UNLOCKED;
3007
3008 #ifdef TCPDEBUG
3009 if (so->so_options & SO_DEBUG)
3010 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3011 &tcp_savetcp, 0);
3012 #endif
3013
3014 /*
3015 * Return any desired output.
3016 */
3017 if (needoutput || (tp->t_flags & TF_ACKNOW))
3018 (void) tcp_output(tp);
3019
3020 check_delack:
3021 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
3022 __func__, ti_locked));
3023 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3024 INP_WLOCK_ASSERT(tp->t_inpcb);
3025
3026 if (tp->t_flags & TF_DELACK) {
3027 tp->t_flags &= ~TF_DELACK;
3028 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3029 }
3030 INP_WUNLOCK(tp->t_inpcb);
3031 return;
3032
3033 dropafterack:
3034 /*
3035 * Generate an ACK dropping incoming segment if it occupies
3036 * sequence space, where the ACK reflects our state.
3037 *
3038 * We can now skip the test for the RST flag since all
3039 * paths to this code happen after packets containing
3040 * RST have been dropped.
3041 *
3042 * In the SYN-RECEIVED state, don't send an ACK unless the
3043 * segment we received passes the SYN-RECEIVED ACK test.
3044 * If it fails send a RST. This breaks the loop in the
3045 * "LAND" DoS attack, and also prevents an ACK storm
3046 * between two listening ports that have been sent forged
3047 * SYN segments, each with the source address of the other.
3048 */
3049 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3050 (SEQ_GT(tp->snd_una, th->th_ack) ||
3051 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3052 rstreason = BANDLIM_RST_OPENPORT;
3053 goto dropwithreset;
3054 }
3055 #ifdef TCPDEBUG
3056 if (so->so_options & SO_DEBUG)
3057 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3058 &tcp_savetcp, 0);
3059 #endif
3060 if (ti_locked == TI_WLOCKED)
3061 INP_INFO_WUNLOCK(&V_tcbinfo);
3062 ti_locked = TI_UNLOCKED;
3063
3064 tp->t_flags |= TF_ACKNOW;
3065 (void) tcp_output(tp);
3066 INP_WUNLOCK(tp->t_inpcb);
3067 m_freem(m);
3068 return;
3069
3070 dropwithreset:
3071 if (ti_locked == TI_WLOCKED)
3072 INP_INFO_WUNLOCK(&V_tcbinfo);
3073 ti_locked = TI_UNLOCKED;
3074
3075 if (tp != NULL) {
3076 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3077 INP_WUNLOCK(tp->t_inpcb);
3078 } else
3079 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3080 return;
3081
3082 drop:
3083 if (ti_locked == TI_WLOCKED) {
3084 INP_INFO_WUNLOCK(&V_tcbinfo);
3085 ti_locked = TI_UNLOCKED;
3086 }
3087 #ifdef INVARIANTS
3088 else
3089 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3090 #endif
3091
3092 /*
3093 * Drop space held by incoming segment and return.
3094 */
3095 #ifdef TCPDEBUG
3096 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3097 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3098 &tcp_savetcp, 0);
3099 #endif
3100 if (tp != NULL)
3101 INP_WUNLOCK(tp->t_inpcb);
3102 m_freem(m);
3103 }
3104
3105 /*
3106 * Issue RST and make ACK acceptable to originator of segment.
3107 * The mbuf must still include the original packet header.
3108 * tp may be NULL.
3109 */
3110 static void
3111 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3112 int tlen, int rstreason)
3113 {
3114 #ifdef INET
3115 struct ip *ip;
3116 #endif
3117 #ifdef INET6
3118 struct ip6_hdr *ip6;
3119 #endif
3120
3121 if (tp != NULL) {
3122 INP_WLOCK_ASSERT(tp->t_inpcb);
3123 }
3124
3125 /* Don't bother if destination was broadcast/multicast. */
3126 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3127 goto drop;
3128 #ifdef INET6
3129 if (mtod(m, struct ip *)->ip_v == 6) {
3130 ip6 = mtod(m, struct ip6_hdr *);
3131 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3132 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3133 goto drop;
3134 /* IPv6 anycast check is done at tcp6_input() */
3135 }
3136 #endif
3137 #if defined(INET) && defined(INET6)
3138 else
3139 #endif
3140 #ifdef INET
3141 {
3142 ip = mtod(m, struct ip *);
3143 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3144 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3145 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3146 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3147 goto drop;
3148 }
3149 #endif
3150
3151 /* Perform bandwidth limiting. */
3152 if (badport_bandlim(rstreason) < 0)
3153 goto drop;
3154
3155 /* tcp_respond consumes the mbuf chain. */
3156 if (th->th_flags & TH_ACK) {
3157 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3158 th->th_ack, TH_RST);
3159 } else {
3160 if (th->th_flags & TH_SYN)
3161 tlen++;
3162 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3163 (tcp_seq)0, TH_RST|TH_ACK);
3164 }
3165 return;
3166 drop:
3167 m_freem(m);
3168 }
3169
3170 /*
3171 * Parse TCP options and place in tcpopt.
3172 */
3173 static void
3174 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3175 {
3176 int opt, optlen;
3177
3178 to->to_flags = 0;
3179 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3180 opt = cp[0];
3181 if (opt == TCPOPT_EOL)
3182 break;
3183 if (opt == TCPOPT_NOP)
3184 optlen = 1;
3185 else {
3186 if (cnt < 2)
3187 break;
3188 optlen = cp[1];
3189 if (optlen < 2 || optlen > cnt)
3190 break;
3191 }
3192 switch (opt) {
3193 case TCPOPT_MAXSEG:
3194 if (optlen != TCPOLEN_MAXSEG)
3195 continue;
3196 if (!(flags & TO_SYN))
3197 continue;
3198 to->to_flags |= TOF_MSS;
3199 bcopy((char *)cp + 2,
3200 (char *)&to->to_mss, sizeof(to->to_mss));
3201 to->to_mss = ntohs(to->to_mss);
3202 break;
3203 case TCPOPT_WINDOW:
3204 if (optlen != TCPOLEN_WINDOW)
3205 continue;
3206 if (!(flags & TO_SYN))
3207 continue;
3208 to->to_flags |= TOF_SCALE;
3209 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3210 break;
3211 case TCPOPT_TIMESTAMP:
3212 if (optlen != TCPOLEN_TIMESTAMP)
3213 continue;
3214 to->to_flags |= TOF_TS;
3215 bcopy((char *)cp + 2,
3216 (char *)&to->to_tsval, sizeof(to->to_tsval));
3217 to->to_tsval = ntohl(to->to_tsval);
3218 bcopy((char *)cp + 6,
3219 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3220 to->to_tsecr = ntohl(to->to_tsecr);
3221 break;
3222 #ifdef TCP_SIGNATURE
3223 /*
3224 * XXX In order to reply to a host which has set the
3225 * TCP_SIGNATURE option in its initial SYN, we have to
3226 * record the fact that the option was observed here
3227 * for the syncache code to perform the correct response.
3228 */
3229 case TCPOPT_SIGNATURE:
3230 if (optlen != TCPOLEN_SIGNATURE)
3231 continue;
3232 to->to_flags |= TOF_SIGNATURE;
3233 to->to_signature = cp + 2;
3234 break;
3235 #endif
3236 case TCPOPT_SACK_PERMITTED:
3237 if (optlen != TCPOLEN_SACK_PERMITTED)
3238 continue;
3239 if (!(flags & TO_SYN))
3240 continue;
3241 if (!V_tcp_do_sack)
3242 continue;
3243 to->to_flags |= TOF_SACKPERM;
3244 break;
3245 case TCPOPT_SACK:
3246 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3247 continue;
3248 if (flags & TO_SYN)
3249 continue;
3250 to->to_flags |= TOF_SACK;
3251 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3252 to->to_sacks = cp + 2;
3253 TCPSTAT_INC(tcps_sack_rcv_blocks);
3254 break;
3255 default:
3256 continue;
3257 }
3258 }
3259 }
3260
3261 /*
3262 * Pull out of band byte out of a segment so
3263 * it doesn't appear in the user's data queue.
3264 * It is still reflected in the segment length for
3265 * sequencing purposes.
3266 */
3267 static void
3268 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3269 int off)
3270 {
3271 int cnt = off + th->th_urp - 1;
3272
3273 while (cnt >= 0) {
3274 if (m->m_len > cnt) {
3275 char *cp = mtod(m, caddr_t) + cnt;
3276 struct tcpcb *tp = sototcpcb(so);
3277
3278 INP_WLOCK_ASSERT(tp->t_inpcb);
3279
3280 tp->t_iobc = *cp;
3281 tp->t_oobflags |= TCPOOB_HAVEDATA;
3282 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3283 m->m_len--;
3284 if (m->m_flags & M_PKTHDR)
3285 m->m_pkthdr.len--;
3286 return;
3287 }
3288 cnt -= m->m_len;
3289 m = m->m_next;
3290 if (m == NULL)
3291 break;
3292 }
3293 panic("tcp_pulloutofband");
3294 }
3295
3296 /*
3297 * Collect new round-trip time estimate
3298 * and update averages and current timeout.
3299 */
3300 static void
3301 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3302 {
3303 int delta;
3304
3305 INP_WLOCK_ASSERT(tp->t_inpcb);
3306
3307 TCPSTAT_INC(tcps_rttupdated);
3308 tp->t_rttupdated++;
3309 if (tp->t_srtt != 0) {
3310 /*
3311 * srtt is stored as fixed point with 5 bits after the
3312 * binary point (i.e., scaled by 8). The following magic
3313 * is equivalent to the smoothing algorithm in rfc793 with
3314 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3315 * point). Adjust rtt to origin 0.
3316 */
3317 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3318 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3319
3320 if ((tp->t_srtt += delta) <= 0)
3321 tp->t_srtt = 1;
3322
3323 /*
3324 * We accumulate a smoothed rtt variance (actually, a
3325 * smoothed mean difference), then set the retransmit
3326 * timer to smoothed rtt + 4 times the smoothed variance.
3327 * rttvar is stored as fixed point with 4 bits after the
3328 * binary point (scaled by 16). The following is
3329 * equivalent to rfc793 smoothing with an alpha of .75
3330 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3331 * rfc793's wired-in beta.
3332 */
3333 if (delta < 0)
3334 delta = -delta;
3335 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3336 if ((tp->t_rttvar += delta) <= 0)
3337 tp->t_rttvar = 1;
3338 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3339 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3340 } else {
3341 /*
3342 * No rtt measurement yet - use the unsmoothed rtt.
3343 * Set the variance to half the rtt (so our first
3344 * retransmit happens at 3*rtt).
3345 */
3346 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3347 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3348 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3349 }
3350 tp->t_rtttime = 0;
3351 tp->t_rxtshift = 0;
3352
3353 /*
3354 * the retransmit should happen at rtt + 4 * rttvar.
3355 * Because of the way we do the smoothing, srtt and rttvar
3356 * will each average +1/2 tick of bias. When we compute
3357 * the retransmit timer, we want 1/2 tick of rounding and
3358 * 1 extra tick because of +-1/2 tick uncertainty in the
3359 * firing of the timer. The bias will give us exactly the
3360 * 1.5 tick we need. But, because the bias is
3361 * statistical, we have to test that we don't drop below
3362 * the minimum feasible timer (which is 2 ticks).
3363 */
3364 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3365 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3366
3367 /*
3368 * We received an ack for a packet that wasn't retransmitted;
3369 * it is probably safe to discard any error indications we've
3370 * received recently. This isn't quite right, but close enough
3371 * for now (a route might have failed after we sent a segment,
3372 * and the return path might not be symmetrical).
3373 */
3374 tp->t_softerror = 0;
3375 }
3376
3377 /*
3378 * Determine a reasonable value for maxseg size.
3379 * If the route is known, check route for mtu.
3380 * If none, use an mss that can be handled on the outgoing interface
3381 * without forcing IP to fragment. If no route is found, route has no mtu,
3382 * or the destination isn't local, use a default, hopefully conservative
3383 * size (usually 512 or the default IP max size, but no more than the mtu
3384 * of the interface), as we can't discover anything about intervening
3385 * gateways or networks. We also initialize the congestion/slow start
3386 * window to be a single segment if the destination isn't local.
3387 * While looking at the routing entry, we also initialize other path-dependent
3388 * parameters from pre-set or cached values in the routing entry.
3389 *
3390 * Also take into account the space needed for options that we
3391 * send regularly. Make maxseg shorter by that amount to assure
3392 * that we can send maxseg amount of data even when the options
3393 * are present. Store the upper limit of the length of options plus
3394 * data in maxopd.
3395 *
3396 * NOTE that this routine is only called when we process an incoming
3397 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3398 * settings are handled in tcp_mssopt().
3399 */
3400 void
3401 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3402 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3403 {
3404 int mss = 0;
3405 u_long maxmtu = 0;
3406 struct inpcb *inp = tp->t_inpcb;
3407 struct hc_metrics_lite metrics;
3408 int origoffer;
3409 #ifdef INET6
3410 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3411 size_t min_protoh = isipv6 ?
3412 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3413 sizeof (struct tcpiphdr);
3414 #else
3415 const size_t min_protoh = sizeof(struct tcpiphdr);
3416 #endif
3417
3418 INP_WLOCK_ASSERT(tp->t_inpcb);
3419
3420 if (mtuoffer != -1) {
3421 KASSERT(offer == -1, ("%s: conflict", __func__));
3422 offer = mtuoffer - min_protoh;
3423 }
3424 origoffer = offer;
3425
3426 /* Initialize. */
3427 #ifdef INET6
3428 if (isipv6) {
3429 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3430 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt;
3431 }
3432 #endif
3433 #if defined(INET) && defined(INET6)
3434 else
3435 #endif
3436 #ifdef INET
3437 {
3438 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3439 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt;
3440 }
3441 #endif
3442
3443 /*
3444 * No route to sender, stay with default mss and return.
3445 */
3446 if (maxmtu == 0) {
3447 /*
3448 * In case we return early we need to initialize metrics
3449 * to a defined state as tcp_hc_get() would do for us
3450 * if there was no cache hit.
3451 */
3452 if (metricptr != NULL)
3453 bzero(metricptr, sizeof(struct hc_metrics_lite));
3454 return;
3455 }
3456
3457 /* What have we got? */
3458 switch (offer) {
3459 case 0:
3460 /*
3461 * Offer == 0 means that there was no MSS on the SYN
3462 * segment, in this case we use tcp_mssdflt as
3463 * already assigned to t_maxopd above.
3464 */
3465 offer = tp->t_maxopd;
3466 break;
3467
3468 case -1:
3469 /*
3470 * Offer == -1 means that we didn't receive SYN yet.
3471 */
3472 /* FALLTHROUGH */
3473
3474 default:
3475 /*
3476 * Prevent DoS attack with too small MSS. Round up
3477 * to at least minmss.
3478 */
3479 offer = max(offer, V_tcp_minmss);
3480 }
3481
3482 /*
3483 * rmx information is now retrieved from tcp_hostcache.
3484 */
3485 tcp_hc_get(&inp->inp_inc, &metrics);
3486 if (metricptr != NULL)
3487 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3488
3489 /*
3490 * If there's a discovered mtu int tcp hostcache, use it
3491 * else, use the link mtu.
3492 */
3493 if (metrics.rmx_mtu)
3494 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3495 else {
3496 #ifdef INET6
3497 if (isipv6) {
3498 mss = maxmtu - min_protoh;
3499 if (!V_path_mtu_discovery &&
3500 !in6_localaddr(&inp->in6p_faddr))
3501 mss = min(mss, V_tcp_v6mssdflt);
3502 }
3503 #endif
3504 #if defined(INET) && defined(INET6)
3505 else
3506 #endif
3507 #ifdef INET
3508 {
3509 mss = maxmtu - min_protoh;
3510 if (!V_path_mtu_discovery &&
3511 !in_localaddr(inp->inp_faddr))
3512 mss = min(mss, V_tcp_mssdflt);
3513 }
3514 #endif
3515 /*
3516 * XXX - The above conditional (mss = maxmtu - min_protoh)
3517 * probably violates the TCP spec.
3518 * The problem is that, since we don't know the
3519 * other end's MSS, we are supposed to use a conservative
3520 * default. But, if we do that, then MTU discovery will
3521 * never actually take place, because the conservative
3522 * default is much less than the MTUs typically seen
3523 * on the Internet today. For the moment, we'll sweep
3524 * this under the carpet.
3525 *
3526 * The conservative default might not actually be a problem
3527 * if the only case this occurs is when sending an initial
3528 * SYN with options and data to a host we've never talked
3529 * to before. Then, they will reply with an MSS value which
3530 * will get recorded and the new parameters should get
3531 * recomputed. For Further Study.
3532 */
3533 }
3534 mss = min(mss, offer);
3535
3536 /*
3537 * Sanity check: make sure that maxopd will be large
3538 * enough to allow some data on segments even if the
3539 * all the option space is used (40bytes). Otherwise
3540 * funny things may happen in tcp_output.
3541 */
3542 mss = max(mss, 64);
3543
3544 /*
3545 * maxopd stores the maximum length of data AND options
3546 * in a segment; maxseg is the amount of data in a normal
3547 * segment. We need to store this value (maxopd) apart
3548 * from maxseg, because now every segment carries options
3549 * and thus we normally have somewhat less data in segments.
3550 */
3551 tp->t_maxopd = mss;
3552
3553 /*
3554 * origoffer==-1 indicates that no segments were received yet.
3555 * In this case we just guess.
3556 */
3557 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
3558 (origoffer == -1 ||
3559 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3560 mss -= TCPOLEN_TSTAMP_APPA;
3561
3562 tp->t_maxseg = mss;
3563 }
3564
3565 void
3566 tcp_mss(struct tcpcb *tp, int offer)
3567 {
3568 int mss;
3569 u_long bufsize;
3570 struct inpcb *inp;
3571 struct socket *so;
3572 struct hc_metrics_lite metrics;
3573 struct tcp_ifcap cap;
3574
3575 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3576
3577 bzero(&cap, sizeof(cap));
3578 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3579
3580 mss = tp->t_maxseg;
3581 inp = tp->t_inpcb;
3582
3583 /*
3584 * If there's a pipesize, change the socket buffer to that size,
3585 * don't change if sb_hiwat is different than default (then it
3586 * has been changed on purpose with setsockopt).
3587 * Make the socket buffers an integral number of mss units;
3588 * if the mss is larger than the socket buffer, decrease the mss.
3589 */
3590 so = inp->inp_socket;
3591 SOCKBUF_LOCK(&so->so_snd);
3592 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3593 bufsize = metrics.rmx_sendpipe;
3594 else
3595 bufsize = so->so_snd.sb_hiwat;
3596 if (bufsize < mss)
3597 mss = bufsize;
3598 else {
3599 bufsize = roundup(bufsize, mss);
3600 if (bufsize > sb_max)
3601 bufsize = sb_max;
3602 if (bufsize > so->so_snd.sb_hiwat)
3603 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3604 }
3605 SOCKBUF_UNLOCK(&so->so_snd);
3606 tp->t_maxseg = mss;
3607
3608 SOCKBUF_LOCK(&so->so_rcv);
3609 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3610 bufsize = metrics.rmx_recvpipe;
3611 else
3612 bufsize = so->so_rcv.sb_hiwat;
3613 if (bufsize > mss) {
3614 bufsize = roundup(bufsize, mss);
3615 if (bufsize > sb_max)
3616 bufsize = sb_max;
3617 if (bufsize > so->so_rcv.sb_hiwat)
3618 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3619 }
3620 SOCKBUF_UNLOCK(&so->so_rcv);
3621
3622 /* Check the interface for TSO capabilities. */
3623 if (cap.ifcap & CSUM_TSO) {
3624 tp->t_flags |= TF_TSO;
3625 tp->t_tsomax = cap.tsomax;
3626 }
3627 }
3628
3629 /*
3630 * Determine the MSS option to send on an outgoing SYN.
3631 */
3632 int
3633 tcp_mssopt(struct in_conninfo *inc)
3634 {
3635 int mss = 0;
3636 u_long maxmtu = 0;
3637 u_long thcmtu = 0;
3638 size_t min_protoh;
3639
3640 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3641
3642 #ifdef INET6
3643 if (inc->inc_flags & INC_ISIPV6) {
3644 mss = V_tcp_v6mssdflt;
3645 maxmtu = tcp_maxmtu6(inc, NULL);
3646 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3647 }
3648 #endif
3649 #if defined(INET) && defined(INET6)
3650 else
3651 #endif
3652 #ifdef INET
3653 {
3654 mss = V_tcp_mssdflt;
3655 maxmtu = tcp_maxmtu(inc, NULL);
3656 min_protoh = sizeof(struct tcpiphdr);
3657 }
3658 #endif
3659 #if defined(INET6) || defined(INET)
3660 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3661 #endif
3662
3663 if (maxmtu && thcmtu)
3664 mss = min(maxmtu, thcmtu) - min_protoh;
3665 else if (maxmtu || thcmtu)
3666 mss = max(maxmtu, thcmtu) - min_protoh;
3667
3668 return (mss);
3669 }
3670
3671
3672 /*
3673 * On a partial ack arrives, force the retransmission of the
3674 * next unacknowledged segment. Do not clear tp->t_dupacks.
3675 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3676 * be started again.
3677 */
3678 static void
3679 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3680 {
3681 tcp_seq onxt = tp->snd_nxt;
3682 u_long ocwnd = tp->snd_cwnd;
3683
3684 INP_WLOCK_ASSERT(tp->t_inpcb);
3685
3686 tcp_timer_activate(tp, TT_REXMT, 0);
3687 tp->t_rtttime = 0;
3688 tp->snd_nxt = th->th_ack;
3689 /*
3690 * Set snd_cwnd to one segment beyond acknowledged offset.
3691 * (tp->snd_una has not yet been updated when this function is called.)
3692 */
3693 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th);
3694 tp->t_flags |= TF_ACKNOW;
3695 (void) tcp_output(tp);
3696 tp->snd_cwnd = ocwnd;
3697 if (SEQ_GT(onxt, tp->snd_nxt))
3698 tp->snd_nxt = onxt;
3699 /*
3700 * Partial window deflation. Relies on fact that tp->snd_una
3701 * not updated yet.
3702 */
3703 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3704 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3705 else
3706 tp->snd_cwnd = 0;
3707 tp->snd_cwnd += tp->t_maxseg;
3708 }
Cache object: 1e448fd677cb864c90098ddebca00746
|