1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
8 * Copyright (c) 2010-2011 Juniper Networks, Inc.
9 * All rights reserved.
10 *
11 * Portions of this software were developed at the Centre for Advanced Internet
12 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
13 * James Healy and David Hayes, made possible in part by a grant from the Cisco
14 * University Research Program Fund at Community Foundation Silicon Valley.
15 *
16 * Portions of this software were developed at the Centre for Advanced
17 * Internet Architectures, Swinburne University of Technology, Melbourne,
18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
19 *
20 * Portions of this software were developed by Robert N. M. Watson under
21 * contract to Juniper Networks, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 *
47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/9.2/sys/netinet/tcp_input.c 271669 2014-09-16 09:50:19Z delphij $");
52
53 #include "opt_ipfw.h" /* for ipfw_fwd */
54 #include "opt_inet.h"
55 #include "opt_inet6.h"
56 #include "opt_ipsec.h"
57 #include "opt_tcpdebug.h"
58
59 #include <sys/param.h>
60 #include <sys/kernel.h>
61 #include <sys/hhook.h>
62 #include <sys/malloc.h>
63 #include <sys/mbuf.h>
64 #include <sys/proc.h> /* for proc0 declaration */
65 #include <sys/protosw.h>
66 #include <sys/signalvar.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/sysctl.h>
70 #include <sys/syslog.h>
71 #include <sys/systm.h>
72
73 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
74
75 #include <vm/uma.h>
76
77 #include <net/if.h>
78 #include <net/route.h>
79 #include <net/vnet.h>
80
81 #define TCPSTATES /* for logging */
82
83 #include <netinet/cc.h>
84 #include <netinet/in.h>
85 #include <netinet/in_pcb.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in_var.h>
88 #include <netinet/ip.h>
89 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
90 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
91 #include <netinet/ip_var.h>
92 #include <netinet/ip_options.h>
93 #include <netinet/ip6.h>
94 #include <netinet/icmp6.h>
95 #include <netinet6/in6_pcb.h>
96 #include <netinet6/ip6_var.h>
97 #include <netinet6/nd6.h>
98 #include <netinet/tcp_fsm.h>
99 #include <netinet/tcp_seq.h>
100 #include <netinet/tcp_timer.h>
101 #include <netinet/tcp_var.h>
102 #include <netinet6/tcp6_var.h>
103 #include <netinet/tcpip.h>
104 #include <netinet/tcp_syncache.h>
105 #ifdef TCPDEBUG
106 #include <netinet/tcp_debug.h>
107 #endif /* TCPDEBUG */
108 #ifdef TCP_OFFLOAD
109 #include <netinet/tcp_offload.h>
110 #endif
111
112 #ifdef IPSEC
113 #include <netipsec/ipsec.h>
114 #include <netipsec/ipsec6.h>
115 #endif /*IPSEC*/
116
117 #include <machine/in_cksum.h>
118
119 #include <security/mac/mac_framework.h>
120
121 const int tcprexmtthresh = 3;
122
123 VNET_DEFINE(struct tcpstat, tcpstat);
124 SYSCTL_VNET_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
125 &VNET_NAME(tcpstat), tcpstat,
126 "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
127
128 int tcp_log_in_vain = 0;
129 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
130 &tcp_log_in_vain, 0,
131 "Log all incoming TCP segments to closed ports");
132
133 VNET_DEFINE(int, blackhole) = 0;
134 #define V_blackhole VNET(blackhole)
135 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
136 &VNET_NAME(blackhole), 0,
137 "Do not send RST on segments to closed ports");
138
139 VNET_DEFINE(int, tcp_delack_enabled) = 1;
140 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
141 &VNET_NAME(tcp_delack_enabled), 0,
142 "Delay ACK to try and piggyback it onto a data packet");
143
144 VNET_DEFINE(int, drop_synfin) = 0;
145 #define V_drop_synfin VNET(drop_synfin)
146 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
147 &VNET_NAME(drop_synfin), 0,
148 "Drop TCP packets with SYN+FIN set");
149
150 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
151 #define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
152 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
153 &VNET_NAME(tcp_do_rfc3042), 0,
154 "Enable RFC 3042 (Limited Transmit)");
155
156 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
157 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
158 &VNET_NAME(tcp_do_rfc3390), 0,
159 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
160
161 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, experimental, CTLFLAG_RW, 0,
162 "Experimental TCP extensions");
163
164 VNET_DEFINE(int, tcp_do_initcwnd10) = 0;
165 SYSCTL_VNET_INT(_net_inet_tcp_experimental, OID_AUTO, initcwnd10, CTLFLAG_RW,
166 &VNET_NAME(tcp_do_initcwnd10), 0,
167 "Enable draft-ietf-tcpm-initcwnd-05 (Increasing initial CWND to 10)");
168
169 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
170 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW,
171 &VNET_NAME(tcp_do_rfc3465), 0,
172 "Enable RFC 3465 (Appropriate Byte Counting)");
173
174 VNET_DEFINE(int, tcp_abc_l_var) = 2;
175 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW,
176 &VNET_NAME(tcp_abc_l_var), 2,
177 "Cap the max cwnd increment during slow-start to this number of segments");
178
179 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
180
181 VNET_DEFINE(int, tcp_do_ecn) = 0;
182 SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW,
183 &VNET_NAME(tcp_do_ecn), 0,
184 "TCP ECN support");
185
186 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
187 SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW,
188 &VNET_NAME(tcp_ecn_maxretries), 0,
189 "Max retries before giving up on ECN");
190
191 VNET_DEFINE(int, tcp_insecure_rst) = 0;
192 #define V_tcp_insecure_rst VNET(tcp_insecure_rst)
193 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
194 &VNET_NAME(tcp_insecure_rst), 0,
195 "Follow the old (insecure) criteria for accepting RST packets");
196
197 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
198 #define V_tcp_recvspace VNET(tcp_recvspace)
199 SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
200 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
201
202 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
203 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
204 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
205 &VNET_NAME(tcp_do_autorcvbuf), 0,
206 "Enable automatic receive buffer sizing");
207
208 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
209 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
210 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
211 &VNET_NAME(tcp_autorcvbuf_inc), 0,
212 "Incrementor step size of automatic receive buffer");
213
214 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
215 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
216 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
217 &VNET_NAME(tcp_autorcvbuf_max), 0,
218 "Max size of automatic receive buffer");
219
220 VNET_DEFINE(struct inpcbhead, tcb);
221 #define tcb6 tcb /* for KAME src sync over BSD*'s */
222 VNET_DEFINE(struct inpcbinfo, tcbinfo);
223
224 static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
225 static void tcp_do_segment(struct mbuf *, struct tcphdr *,
226 struct socket *, struct tcpcb *, int, int, uint8_t,
227 int);
228 static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
229 struct tcpcb *, int, int);
230 static void tcp_pulloutofband(struct socket *,
231 struct tcphdr *, struct mbuf *, int);
232 static void tcp_xmit_timer(struct tcpcb *, int);
233 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
234 static void inline tcp_fields_to_host(struct tcphdr *);
235 #ifdef TCP_SIGNATURE
236 static void inline tcp_fields_to_net(struct tcphdr *);
237 static int inline tcp_signature_verify_input(struct mbuf *, int, int,
238 int, struct tcpopt *, struct tcphdr *, u_int);
239 #endif
240 static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th,
241 uint16_t type);
242 static void inline cc_conn_init(struct tcpcb *tp);
243 static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th);
244 static void inline hhook_run_tcp_est_in(struct tcpcb *tp,
245 struct tcphdr *th, struct tcpopt *to);
246
247 /*
248 * Kernel module interface for updating tcpstat. The argument is an index
249 * into tcpstat treated as an array of u_long. While this encodes the
250 * general layout of tcpstat into the caller, it doesn't encode its location,
251 * so that future changes to add, for example, per-CPU stats support won't
252 * cause binary compatibility problems for kernel modules.
253 */
254 void
255 kmod_tcpstat_inc(int statnum)
256 {
257
258 (*((u_long *)&V_tcpstat + statnum))++;
259 }
260
261 /*
262 * Wrapper for the TCP established input helper hook.
263 */
264 static void inline
265 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
266 {
267 struct tcp_hhook_data hhook_data;
268
269 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
270 hhook_data.tp = tp;
271 hhook_data.th = th;
272 hhook_data.to = to;
273
274 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
275 tp->osd);
276 }
277 }
278
279 /*
280 * CC wrapper hook functions
281 */
282 static void inline
283 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
284 {
285 INP_WLOCK_ASSERT(tp->t_inpcb);
286
287 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
288 if (tp->snd_cwnd <= tp->snd_wnd)
289 tp->ccv->flags |= CCF_CWND_LIMITED;
290 else
291 tp->ccv->flags &= ~CCF_CWND_LIMITED;
292
293 if (type == CC_ACK) {
294 if (tp->snd_cwnd > tp->snd_ssthresh) {
295 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
296 V_tcp_abc_l_var * tp->t_maxseg);
297 if (tp->t_bytes_acked >= tp->snd_cwnd) {
298 tp->t_bytes_acked -= tp->snd_cwnd;
299 tp->ccv->flags |= CCF_ABC_SENTAWND;
300 }
301 } else {
302 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
303 tp->t_bytes_acked = 0;
304 }
305 }
306
307 if (CC_ALGO(tp)->ack_received != NULL) {
308 /* XXXLAS: Find a way to live without this */
309 tp->ccv->curack = th->th_ack;
310 CC_ALGO(tp)->ack_received(tp->ccv, type);
311 }
312 }
313
314 static void inline
315 cc_conn_init(struct tcpcb *tp)
316 {
317 struct hc_metrics_lite metrics;
318 struct inpcb *inp = tp->t_inpcb;
319 int rtt;
320
321 INP_WLOCK_ASSERT(tp->t_inpcb);
322
323 tcp_hc_get(&inp->inp_inc, &metrics);
324
325 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
326 tp->t_srtt = rtt;
327 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
328 TCPSTAT_INC(tcps_usedrtt);
329 if (metrics.rmx_rttvar) {
330 tp->t_rttvar = metrics.rmx_rttvar;
331 TCPSTAT_INC(tcps_usedrttvar);
332 } else {
333 /* default variation is +- 1 rtt */
334 tp->t_rttvar =
335 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
336 }
337 TCPT_RANGESET(tp->t_rxtcur,
338 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
339 tp->t_rttmin, TCPTV_REXMTMAX);
340 }
341 if (metrics.rmx_ssthresh) {
342 /*
343 * There's some sort of gateway or interface
344 * buffer limit on the path. Use this to set
345 * the slow start threshhold, but set the
346 * threshold to no less than 2*mss.
347 */
348 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh);
349 TCPSTAT_INC(tcps_usedssthresh);
350 }
351
352 /*
353 * Set the initial slow-start flight size.
354 *
355 * RFC5681 Section 3.1 specifies the default conservative values.
356 * RFC3390 specifies slightly more aggressive values.
357 * Draft-ietf-tcpm-initcwnd-05 increases it to ten segments.
358 *
359 * If a SYN or SYN/ACK was lost and retransmitted, we have to
360 * reduce the initial CWND to one segment as congestion is likely
361 * requiring us to be cautious.
362 */
363 if (tp->snd_cwnd == 1)
364 tp->snd_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
365 else if (V_tcp_do_initcwnd10)
366 tp->snd_cwnd = min(10 * tp->t_maxseg,
367 max(2 * tp->t_maxseg, 14600));
368 else if (V_tcp_do_rfc3390)
369 tp->snd_cwnd = min(4 * tp->t_maxseg,
370 max(2 * tp->t_maxseg, 4380));
371 else {
372 /* Per RFC5681 Section 3.1 */
373 if (tp->t_maxseg > 2190)
374 tp->snd_cwnd = 2 * tp->t_maxseg;
375 else if (tp->t_maxseg > 1095)
376 tp->snd_cwnd = 3 * tp->t_maxseg;
377 else
378 tp->snd_cwnd = 4 * tp->t_maxseg;
379 }
380
381 if (CC_ALGO(tp)->conn_init != NULL)
382 CC_ALGO(tp)->conn_init(tp->ccv);
383 }
384
385 void inline
386 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
387 {
388 INP_WLOCK_ASSERT(tp->t_inpcb);
389
390 switch(type) {
391 case CC_NDUPACK:
392 if (!IN_FASTRECOVERY(tp->t_flags)) {
393 tp->snd_recover = tp->snd_max;
394 if (tp->t_flags & TF_ECN_PERMIT)
395 tp->t_flags |= TF_ECN_SND_CWR;
396 }
397 break;
398 case CC_ECN:
399 if (!IN_CONGRECOVERY(tp->t_flags)) {
400 TCPSTAT_INC(tcps_ecn_rcwnd);
401 tp->snd_recover = tp->snd_max;
402 if (tp->t_flags & TF_ECN_PERMIT)
403 tp->t_flags |= TF_ECN_SND_CWR;
404 }
405 break;
406 case CC_RTO:
407 tp->t_dupacks = 0;
408 tp->t_bytes_acked = 0;
409 EXIT_RECOVERY(tp->t_flags);
410 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
411 tp->t_maxseg) * tp->t_maxseg;
412 tp->snd_cwnd = tp->t_maxseg;
413 break;
414 case CC_RTO_ERR:
415 TCPSTAT_INC(tcps_sndrexmitbad);
416 /* RTO was unnecessary, so reset everything. */
417 tp->snd_cwnd = tp->snd_cwnd_prev;
418 tp->snd_ssthresh = tp->snd_ssthresh_prev;
419 tp->snd_recover = tp->snd_recover_prev;
420 if (tp->t_flags & TF_WASFRECOVERY)
421 ENTER_FASTRECOVERY(tp->t_flags);
422 if (tp->t_flags & TF_WASCRECOVERY)
423 ENTER_CONGRECOVERY(tp->t_flags);
424 tp->snd_nxt = tp->snd_max;
425 tp->t_flags &= ~TF_PREVVALID;
426 tp->t_badrxtwin = 0;
427 break;
428 }
429
430 if (CC_ALGO(tp)->cong_signal != NULL) {
431 if (th != NULL)
432 tp->ccv->curack = th->th_ack;
433 CC_ALGO(tp)->cong_signal(tp->ccv, type);
434 }
435 }
436
437 static void inline
438 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
439 {
440 INP_WLOCK_ASSERT(tp->t_inpcb);
441
442 /* XXXLAS: KASSERT that we're in recovery? */
443
444 if (CC_ALGO(tp)->post_recovery != NULL) {
445 tp->ccv->curack = th->th_ack;
446 CC_ALGO(tp)->post_recovery(tp->ccv);
447 }
448 /* XXXLAS: EXIT_RECOVERY ? */
449 tp->t_bytes_acked = 0;
450 }
451
452 static inline void
453 tcp_fields_to_host(struct tcphdr *th)
454 {
455
456 th->th_seq = ntohl(th->th_seq);
457 th->th_ack = ntohl(th->th_ack);
458 th->th_win = ntohs(th->th_win);
459 th->th_urp = ntohs(th->th_urp);
460 }
461
462 #ifdef TCP_SIGNATURE
463 static inline void
464 tcp_fields_to_net(struct tcphdr *th)
465 {
466
467 th->th_seq = htonl(th->th_seq);
468 th->th_ack = htonl(th->th_ack);
469 th->th_win = htons(th->th_win);
470 th->th_urp = htons(th->th_urp);
471 }
472
473 static inline int
474 tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen,
475 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
476 {
477 int ret;
478
479 tcp_fields_to_net(th);
480 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag);
481 tcp_fields_to_host(th);
482 return (ret);
483 }
484 #endif
485
486 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
487 #ifdef INET6
488 #define ND6_HINT(tp) \
489 do { \
490 if ((tp) && (tp)->t_inpcb && \
491 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
492 nd6_nud_hint(NULL, NULL, 0); \
493 } while (0)
494 #else
495 #define ND6_HINT(tp)
496 #endif
497
498 /*
499 * Indicate whether this ack should be delayed. We can delay the ack if
500 * - there is no delayed ack timer in progress and
501 * - our last ack wasn't a 0-sized window. We never want to delay
502 * the ack that opens up a 0-sized window and
503 * - delayed acks are enabled or
504 * - this is a half-synchronized T/TCP connection.
505 */
506 #define DELAY_ACK(tp) \
507 ((!tcp_timer_active(tp, TT_DELACK) && \
508 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
509 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
510
511 /*
512 * TCP input handling is split into multiple parts:
513 * tcp6_input is a thin wrapper around tcp_input for the extended
514 * ip6_protox[] call format in ip6_input
515 * tcp_input handles primary segment validation, inpcb lookup and
516 * SYN processing on listen sockets
517 * tcp_do_segment processes the ACK and text of the segment for
518 * establishing, established and closing connections
519 */
520 #ifdef INET6
521 int
522 tcp6_input(struct mbuf **mp, int *offp, int proto)
523 {
524 struct mbuf *m = *mp;
525 struct in6_ifaddr *ia6;
526
527 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
528
529 /*
530 * draft-itojun-ipv6-tcp-to-anycast
531 * better place to put this in?
532 */
533 ia6 = ip6_getdstifaddr(m);
534 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
535 struct ip6_hdr *ip6;
536
537 ifa_free(&ia6->ia_ifa);
538 ip6 = mtod(m, struct ip6_hdr *);
539 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
540 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
541 return IPPROTO_DONE;
542 }
543 if (ia6)
544 ifa_free(&ia6->ia_ifa);
545
546 tcp_input(m, *offp);
547 return IPPROTO_DONE;
548 }
549 #endif /* INET6 */
550
551 void
552 tcp_input(struct mbuf *m, int off0)
553 {
554 struct tcphdr *th = NULL;
555 struct ip *ip = NULL;
556 #ifdef INET
557 struct ipovly *ipov;
558 #endif
559 struct inpcb *inp = NULL;
560 struct tcpcb *tp = NULL;
561 struct socket *so = NULL;
562 u_char *optp = NULL;
563 int optlen = 0;
564 #ifdef INET
565 int len;
566 #endif
567 int tlen = 0, off;
568 int drop_hdrlen;
569 int thflags;
570 int rstreason = 0; /* For badport_bandlim accounting purposes */
571 #ifdef TCP_SIGNATURE
572 uint8_t sig_checked = 0;
573 #endif
574 uint8_t iptos = 0;
575 struct m_tag *fwd_tag = NULL;
576 #ifdef INET6
577 struct ip6_hdr *ip6 = NULL;
578 int isipv6;
579 #else
580 const void *ip6 = NULL;
581 #endif /* INET6 */
582 struct tcpopt to; /* options in this segment */
583 char *s = NULL; /* address and port logging */
584 int ti_locked;
585 #define TI_UNLOCKED 1
586 #define TI_WLOCKED 2
587
588 #ifdef TCPDEBUG
589 /*
590 * The size of tcp_saveipgen must be the size of the max ip header,
591 * now IPv6.
592 */
593 u_char tcp_saveipgen[IP6_HDR_LEN];
594 struct tcphdr tcp_savetcp;
595 short ostate = 0;
596 #endif
597
598 #ifdef INET6
599 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
600 #endif
601
602 to.to_flags = 0;
603 TCPSTAT_INC(tcps_rcvtotal);
604
605 #ifdef INET6
606 if (isipv6) {
607 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
608
609 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
610 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
611 if (m == NULL) {
612 TCPSTAT_INC(tcps_rcvshort);
613 return;
614 }
615 }
616
617 ip6 = mtod(m, struct ip6_hdr *);
618 th = (struct tcphdr *)((caddr_t)ip6 + off0);
619 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
620 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
621 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
622 th->th_sum = m->m_pkthdr.csum_data;
623 else
624 th->th_sum = in6_cksum_pseudo(ip6, tlen,
625 IPPROTO_TCP, m->m_pkthdr.csum_data);
626 th->th_sum ^= 0xffff;
627 } else
628 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
629 if (th->th_sum) {
630 TCPSTAT_INC(tcps_rcvbadsum);
631 goto drop;
632 }
633
634 /*
635 * Be proactive about unspecified IPv6 address in source.
636 * As we use all-zero to indicate unbounded/unconnected pcb,
637 * unspecified IPv6 address can be used to confuse us.
638 *
639 * Note that packets with unspecified IPv6 destination is
640 * already dropped in ip6_input.
641 */
642 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
643 /* XXX stat */
644 goto drop;
645 }
646 }
647 #endif
648 #if defined(INET) && defined(INET6)
649 else
650 #endif
651 #ifdef INET
652 {
653 /*
654 * Get IP and TCP header together in first mbuf.
655 * Note: IP leaves IP header in first mbuf.
656 */
657 if (off0 > sizeof (struct ip)) {
658 ip_stripoptions(m, (struct mbuf *)0);
659 off0 = sizeof(struct ip);
660 }
661 if (m->m_len < sizeof (struct tcpiphdr)) {
662 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
663 == NULL) {
664 TCPSTAT_INC(tcps_rcvshort);
665 return;
666 }
667 }
668 ip = mtod(m, struct ip *);
669 ipov = (struct ipovly *)ip;
670 th = (struct tcphdr *)((caddr_t)ip + off0);
671 tlen = ip->ip_len;
672
673 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
674 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
675 th->th_sum = m->m_pkthdr.csum_data;
676 else
677 th->th_sum = in_pseudo(ip->ip_src.s_addr,
678 ip->ip_dst.s_addr,
679 htonl(m->m_pkthdr.csum_data +
680 ip->ip_len +
681 IPPROTO_TCP));
682 th->th_sum ^= 0xffff;
683 #ifdef TCPDEBUG
684 ipov->ih_len = (u_short)tlen;
685 ipov->ih_len = htons(ipov->ih_len);
686 #endif
687 } else {
688 /*
689 * Checksum extended TCP header and data.
690 */
691 len = sizeof (struct ip) + tlen;
692 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
693 ipov->ih_len = (u_short)tlen;
694 ipov->ih_len = htons(ipov->ih_len);
695 th->th_sum = in_cksum(m, len);
696 }
697 if (th->th_sum) {
698 TCPSTAT_INC(tcps_rcvbadsum);
699 goto drop;
700 }
701 /* Re-initialization for later version check */
702 ip->ip_v = IPVERSION;
703 }
704 #endif /* INET */
705
706 #ifdef INET6
707 if (isipv6)
708 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
709 #endif
710 #if defined(INET) && defined(INET6)
711 else
712 #endif
713 #ifdef INET
714 iptos = ip->ip_tos;
715 #endif
716
717 /*
718 * Check that TCP offset makes sense,
719 * pull out TCP options and adjust length. XXX
720 */
721 off = th->th_off << 2;
722 if (off < sizeof (struct tcphdr) || off > tlen) {
723 TCPSTAT_INC(tcps_rcvbadoff);
724 goto drop;
725 }
726 tlen -= off; /* tlen is used instead of ti->ti_len */
727 if (off > sizeof (struct tcphdr)) {
728 #ifdef INET6
729 if (isipv6) {
730 IP6_EXTHDR_CHECK(m, off0, off, );
731 ip6 = mtod(m, struct ip6_hdr *);
732 th = (struct tcphdr *)((caddr_t)ip6 + off0);
733 }
734 #endif
735 #if defined(INET) && defined(INET6)
736 else
737 #endif
738 #ifdef INET
739 {
740 if (m->m_len < sizeof(struct ip) + off) {
741 if ((m = m_pullup(m, sizeof (struct ip) + off))
742 == NULL) {
743 TCPSTAT_INC(tcps_rcvshort);
744 return;
745 }
746 ip = mtod(m, struct ip *);
747 ipov = (struct ipovly *)ip;
748 th = (struct tcphdr *)((caddr_t)ip + off0);
749 }
750 }
751 #endif
752 optlen = off - sizeof (struct tcphdr);
753 optp = (u_char *)(th + 1);
754 }
755 thflags = th->th_flags;
756
757 /*
758 * Convert TCP protocol specific fields to host format.
759 */
760 tcp_fields_to_host(th);
761
762 /*
763 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
764 */
765 drop_hdrlen = off0 + off;
766
767 /*
768 * Locate pcb for segment; if we're likely to add or remove a
769 * connection then first acquire pcbinfo lock. There are two cases
770 * where we might discover later we need a write lock despite the
771 * flags: ACKs moving a connection out of the syncache, and ACKs for
772 * a connection in TIMEWAIT.
773 */
774 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0) {
775 INP_INFO_WLOCK(&V_tcbinfo);
776 ti_locked = TI_WLOCKED;
777 } else
778 ti_locked = TI_UNLOCKED;
779
780 findpcb:
781 #ifdef INVARIANTS
782 if (ti_locked == TI_WLOCKED) {
783 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
784 } else {
785 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
786 }
787 #endif
788
789 /*
790 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
791 */
792 if (
793 #ifdef INET6
794 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
795 #ifdef INET
796 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
797 #endif
798 #endif
799 #if defined(INET) && !defined(INET6)
800 (m->m_flags & M_IP_NEXTHOP)
801 #endif
802 )
803 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
804
805 #ifdef INET6
806 if (isipv6 && fwd_tag != NULL) {
807 struct sockaddr_in6 *next_hop6;
808
809 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
810 /*
811 * Transparently forwarded. Pretend to be the destination.
812 * Already got one like this?
813 */
814 inp = in6_pcblookup_mbuf(&V_tcbinfo,
815 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
816 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
817 if (!inp) {
818 /*
819 * It's new. Try to find the ambushing socket.
820 * Because we've rewritten the destination address,
821 * any hardware-generated hash is ignored.
822 */
823 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
824 th->th_sport, &next_hop6->sin6_addr,
825 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
826 th->th_dport, INPLOOKUP_WILDCARD |
827 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
828 }
829 /* Remove the tag from the packet. We don't need it anymore. */
830 m_tag_delete(m, fwd_tag);
831 m->m_flags &= ~M_IP6_NEXTHOP;
832 fwd_tag = NULL;
833 } else if (isipv6) {
834 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
835 th->th_sport, &ip6->ip6_dst, th->th_dport,
836 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
837 m->m_pkthdr.rcvif, m);
838 }
839 #endif /* INET6 */
840 #if defined(INET6) && defined(INET)
841 else
842 #endif
843 #ifdef INET
844 if (fwd_tag != NULL) {
845 struct sockaddr_in *next_hop;
846
847 next_hop = (struct sockaddr_in *)(fwd_tag+1);
848 /*
849 * Transparently forwarded. Pretend to be the destination.
850 * already got one like this?
851 */
852 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
853 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
854 m->m_pkthdr.rcvif, m);
855 if (!inp) {
856 /*
857 * It's new. Try to find the ambushing socket.
858 * Because we've rewritten the destination address,
859 * any hardware-generated hash is ignored.
860 */
861 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
862 th->th_sport, next_hop->sin_addr,
863 next_hop->sin_port ? ntohs(next_hop->sin_port) :
864 th->th_dport, INPLOOKUP_WILDCARD |
865 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
866 }
867 /* Remove the tag from the packet. We don't need it anymore. */
868 m_tag_delete(m, fwd_tag);
869 m->m_flags &= ~M_IP_NEXTHOP;
870 fwd_tag = NULL;
871 } else
872 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
873 th->th_sport, ip->ip_dst, th->th_dport,
874 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
875 m->m_pkthdr.rcvif, m);
876 #endif /* INET */
877
878 /*
879 * If the INPCB does not exist then all data in the incoming
880 * segment is discarded and an appropriate RST is sent back.
881 * XXX MRT Send RST using which routing table?
882 */
883 if (inp == NULL) {
884 /*
885 * Log communication attempts to ports that are not
886 * in use.
887 */
888 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
889 tcp_log_in_vain == 2) {
890 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
891 log(LOG_INFO, "%s; %s: Connection attempt "
892 "to closed port\n", s, __func__);
893 }
894 /*
895 * When blackholing do not respond with a RST but
896 * completely ignore the segment and drop it.
897 */
898 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
899 V_blackhole == 2)
900 goto dropunlock;
901
902 rstreason = BANDLIM_RST_CLOSEDPORT;
903 goto dropwithreset;
904 }
905 INP_WLOCK_ASSERT(inp);
906 if (!(inp->inp_flags & INP_HW_FLOWID)
907 && (m->m_flags & M_FLOWID)
908 && ((inp->inp_socket == NULL)
909 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) {
910 inp->inp_flags |= INP_HW_FLOWID;
911 inp->inp_flags &= ~INP_SW_FLOWID;
912 inp->inp_flowid = m->m_pkthdr.flowid;
913 }
914 #ifdef IPSEC
915 #ifdef INET6
916 if (isipv6 && ipsec6_in_reject(m, inp)) {
917 IPSEC6STAT_INC(in_polvio);
918 goto dropunlock;
919 } else
920 #endif /* INET6 */
921 if (ipsec4_in_reject(m, inp) != 0) {
922 IPSECSTAT_INC(in_polvio);
923 goto dropunlock;
924 }
925 #endif /* IPSEC */
926
927 /*
928 * Check the minimum TTL for socket.
929 */
930 if (inp->inp_ip_minttl != 0) {
931 #ifdef INET6
932 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
933 goto dropunlock;
934 else
935 #endif
936 if (inp->inp_ip_minttl > ip->ip_ttl)
937 goto dropunlock;
938 }
939
940 /*
941 * A previous connection in TIMEWAIT state is supposed to catch stray
942 * or duplicate segments arriving late. If this segment was a
943 * legitimate new connection attempt the old INPCB gets removed and
944 * we can try again to find a listening socket.
945 *
946 * At this point, due to earlier optimism, we may hold only an inpcb
947 * lock, and not the inpcbinfo write lock. If so, we need to try to
948 * acquire it, or if that fails, acquire a reference on the inpcb,
949 * drop all locks, acquire a global write lock, and then re-acquire
950 * the inpcb lock. We may at that point discover that another thread
951 * has tried to free the inpcb, in which case we need to loop back
952 * and try to find a new inpcb to deliver to.
953 *
954 * XXXRW: It may be time to rethink timewait locking.
955 */
956 relocked:
957 if (inp->inp_flags & INP_TIMEWAIT) {
958 if (ti_locked == TI_UNLOCKED) {
959 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
960 in_pcbref(inp);
961 INP_WUNLOCK(inp);
962 INP_INFO_WLOCK(&V_tcbinfo);
963 ti_locked = TI_WLOCKED;
964 INP_WLOCK(inp);
965 if (in_pcbrele_wlocked(inp)) {
966 inp = NULL;
967 goto findpcb;
968 }
969 } else
970 ti_locked = TI_WLOCKED;
971 }
972 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
973
974 if (thflags & TH_SYN)
975 tcp_dooptions(&to, optp, optlen, TO_SYN);
976 /*
977 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
978 */
979 if (tcp_twcheck(inp, &to, th, m, tlen))
980 goto findpcb;
981 INP_INFO_WUNLOCK(&V_tcbinfo);
982 return;
983 }
984 /*
985 * The TCPCB may no longer exist if the connection is winding
986 * down or it is in the CLOSED state. Either way we drop the
987 * segment and send an appropriate response.
988 */
989 tp = intotcpcb(inp);
990 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
991 rstreason = BANDLIM_RST_CLOSEDPORT;
992 goto dropwithreset;
993 }
994
995 #ifdef TCP_OFFLOAD
996 if (tp->t_flags & TF_TOE) {
997 tcp_offload_input(tp, m);
998 m = NULL; /* consumed by the TOE driver */
999 goto dropunlock;
1000 }
1001 #endif
1002
1003 /*
1004 * We've identified a valid inpcb, but it could be that we need an
1005 * inpcbinfo write lock but don't hold it. In this case, attempt to
1006 * acquire using the same strategy as the TIMEWAIT case above. If we
1007 * relock, we have to jump back to 'relocked' as the connection might
1008 * now be in TIMEWAIT.
1009 */
1010 #ifdef INVARIANTS
1011 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0)
1012 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1013 #endif
1014 if (tp->t_state != TCPS_ESTABLISHED) {
1015 if (ti_locked == TI_UNLOCKED) {
1016 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
1017 in_pcbref(inp);
1018 INP_WUNLOCK(inp);
1019 INP_INFO_WLOCK(&V_tcbinfo);
1020 ti_locked = TI_WLOCKED;
1021 INP_WLOCK(inp);
1022 if (in_pcbrele_wlocked(inp)) {
1023 inp = NULL;
1024 goto findpcb;
1025 }
1026 goto relocked;
1027 } else
1028 ti_locked = TI_WLOCKED;
1029 }
1030 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1031 }
1032
1033 #ifdef MAC
1034 INP_WLOCK_ASSERT(inp);
1035 if (mac_inpcb_check_deliver(inp, m))
1036 goto dropunlock;
1037 #endif
1038 so = inp->inp_socket;
1039 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1040 #ifdef TCPDEBUG
1041 if (so->so_options & SO_DEBUG) {
1042 ostate = tp->t_state;
1043 #ifdef INET6
1044 if (isipv6) {
1045 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1046 } else
1047 #endif
1048 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1049 tcp_savetcp = *th;
1050 }
1051 #endif /* TCPDEBUG */
1052 /*
1053 * When the socket is accepting connections (the INPCB is in LISTEN
1054 * state) we look into the SYN cache if this is a new connection
1055 * attempt or the completion of a previous one. Because listen
1056 * sockets are never in TCPS_ESTABLISHED, the V_tcbinfo lock will be
1057 * held in this case.
1058 */
1059 if (so->so_options & SO_ACCEPTCONN) {
1060 struct in_conninfo inc;
1061
1062 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
1063 "tp not listening", __func__));
1064 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1065
1066 bzero(&inc, sizeof(inc));
1067 #ifdef INET6
1068 if (isipv6) {
1069 inc.inc_flags |= INC_ISIPV6;
1070 inc.inc6_faddr = ip6->ip6_src;
1071 inc.inc6_laddr = ip6->ip6_dst;
1072 } else
1073 #endif
1074 {
1075 inc.inc_faddr = ip->ip_src;
1076 inc.inc_laddr = ip->ip_dst;
1077 }
1078 inc.inc_fport = th->th_sport;
1079 inc.inc_lport = th->th_dport;
1080 inc.inc_fibnum = so->so_fibnum;
1081
1082 /*
1083 * Check for an existing connection attempt in syncache if
1084 * the flag is only ACK. A successful lookup creates a new
1085 * socket appended to the listen queue in SYN_RECEIVED state.
1086 */
1087 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1088 /*
1089 * Parse the TCP options here because
1090 * syncookies need access to the reflected
1091 * timestamp.
1092 */
1093 tcp_dooptions(&to, optp, optlen, 0);
1094 /*
1095 * NB: syncache_expand() doesn't unlock
1096 * inp and tcpinfo locks.
1097 */
1098 if (!syncache_expand(&inc, &to, th, &so, m)) {
1099 /*
1100 * No syncache entry or ACK was not
1101 * for our SYN/ACK. Send a RST.
1102 * NB: syncache did its own logging
1103 * of the failure cause.
1104 */
1105 rstreason = BANDLIM_RST_OPENPORT;
1106 goto dropwithreset;
1107 }
1108 if (so == NULL) {
1109 /*
1110 * We completed the 3-way handshake
1111 * but could not allocate a socket
1112 * either due to memory shortage,
1113 * listen queue length limits or
1114 * global socket limits. Send RST
1115 * or wait and have the remote end
1116 * retransmit the ACK for another
1117 * try.
1118 */
1119 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1120 log(LOG_DEBUG, "%s; %s: Listen socket: "
1121 "Socket allocation failed due to "
1122 "limits or memory shortage, %s\n",
1123 s, __func__,
1124 V_tcp_sc_rst_sock_fail ?
1125 "sending RST" : "try again");
1126 if (V_tcp_sc_rst_sock_fail) {
1127 rstreason = BANDLIM_UNLIMITED;
1128 goto dropwithreset;
1129 } else
1130 goto dropunlock;
1131 }
1132 /*
1133 * Socket is created in state SYN_RECEIVED.
1134 * Unlock the listen socket, lock the newly
1135 * created socket and update the tp variable.
1136 */
1137 INP_WUNLOCK(inp); /* listen socket */
1138 inp = sotoinpcb(so);
1139 INP_WLOCK(inp); /* new connection */
1140 tp = intotcpcb(inp);
1141 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1142 ("%s: ", __func__));
1143 #ifdef TCP_SIGNATURE
1144 if (sig_checked == 0) {
1145 tcp_dooptions(&to, optp, optlen,
1146 (thflags & TH_SYN) ? TO_SYN : 0);
1147 if (!tcp_signature_verify_input(m, off0, tlen,
1148 optlen, &to, th, tp->t_flags)) {
1149
1150 /*
1151 * In SYN_SENT state if it receives an
1152 * RST, it is allowed for further
1153 * processing.
1154 */
1155 if ((thflags & TH_RST) == 0 ||
1156 (tp->t_state == TCPS_SYN_SENT) == 0)
1157 goto dropunlock;
1158 }
1159 sig_checked = 1;
1160 }
1161 #endif
1162
1163 /*
1164 * Process the segment and the data it
1165 * contains. tcp_do_segment() consumes
1166 * the mbuf chain and unlocks the inpcb.
1167 */
1168 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1169 iptos, ti_locked);
1170 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1171 return;
1172 }
1173 /*
1174 * Segment flag validation for new connection attempts:
1175 *
1176 * Our (SYN|ACK) response was rejected.
1177 * Check with syncache and remove entry to prevent
1178 * retransmits.
1179 *
1180 * NB: syncache_chkrst does its own logging of failure
1181 * causes.
1182 */
1183 if (thflags & TH_RST) {
1184 syncache_chkrst(&inc, th);
1185 goto dropunlock;
1186 }
1187 /*
1188 * We can't do anything without SYN.
1189 */
1190 if ((thflags & TH_SYN) == 0) {
1191 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1192 log(LOG_DEBUG, "%s; %s: Listen socket: "
1193 "SYN is missing, segment ignored\n",
1194 s, __func__);
1195 TCPSTAT_INC(tcps_badsyn);
1196 goto dropunlock;
1197 }
1198 /*
1199 * (SYN|ACK) is bogus on a listen socket.
1200 */
1201 if (thflags & TH_ACK) {
1202 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1203 log(LOG_DEBUG, "%s; %s: Listen socket: "
1204 "SYN|ACK invalid, segment rejected\n",
1205 s, __func__);
1206 syncache_badack(&inc); /* XXX: Not needed! */
1207 TCPSTAT_INC(tcps_badsyn);
1208 rstreason = BANDLIM_RST_OPENPORT;
1209 goto dropwithreset;
1210 }
1211 /*
1212 * If the drop_synfin option is enabled, drop all
1213 * segments with both the SYN and FIN bits set.
1214 * This prevents e.g. nmap from identifying the
1215 * TCP/IP stack.
1216 * XXX: Poor reasoning. nmap has other methods
1217 * and is constantly refining its stack detection
1218 * strategies.
1219 * XXX: This is a violation of the TCP specification
1220 * and was used by RFC1644.
1221 */
1222 if ((thflags & TH_FIN) && V_drop_synfin) {
1223 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1224 log(LOG_DEBUG, "%s; %s: Listen socket: "
1225 "SYN|FIN segment ignored (based on "
1226 "sysctl setting)\n", s, __func__);
1227 TCPSTAT_INC(tcps_badsyn);
1228 goto dropunlock;
1229 }
1230 /*
1231 * Segment's flags are (SYN) or (SYN|FIN).
1232 *
1233 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1234 * as they do not affect the state of the TCP FSM.
1235 * The data pointed to by TH_URG and th_urp is ignored.
1236 */
1237 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1238 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1239 KASSERT(thflags & (TH_SYN),
1240 ("%s: Listen socket: TH_SYN not set", __func__));
1241 #ifdef INET6
1242 /*
1243 * If deprecated address is forbidden,
1244 * we do not accept SYN to deprecated interface
1245 * address to prevent any new inbound connection from
1246 * getting established.
1247 * When we do not accept SYN, we send a TCP RST,
1248 * with deprecated source address (instead of dropping
1249 * it). We compromise it as it is much better for peer
1250 * to send a RST, and RST will be the final packet
1251 * for the exchange.
1252 *
1253 * If we do not forbid deprecated addresses, we accept
1254 * the SYN packet. RFC2462 does not suggest dropping
1255 * SYN in this case.
1256 * If we decipher RFC2462 5.5.4, it says like this:
1257 * 1. use of deprecated addr with existing
1258 * communication is okay - "SHOULD continue to be
1259 * used"
1260 * 2. use of it with new communication:
1261 * (2a) "SHOULD NOT be used if alternate address
1262 * with sufficient scope is available"
1263 * (2b) nothing mentioned otherwise.
1264 * Here we fall into (2b) case as we have no choice in
1265 * our source address selection - we must obey the peer.
1266 *
1267 * The wording in RFC2462 is confusing, and there are
1268 * multiple description text for deprecated address
1269 * handling - worse, they are not exactly the same.
1270 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1271 */
1272 if (isipv6 && !V_ip6_use_deprecated) {
1273 struct in6_ifaddr *ia6;
1274
1275 ia6 = ip6_getdstifaddr(m);
1276 if (ia6 != NULL &&
1277 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1278 ifa_free(&ia6->ia_ifa);
1279 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1280 log(LOG_DEBUG, "%s; %s: Listen socket: "
1281 "Connection attempt to deprecated "
1282 "IPv6 address rejected\n",
1283 s, __func__);
1284 rstreason = BANDLIM_RST_OPENPORT;
1285 goto dropwithreset;
1286 }
1287 if (ia6)
1288 ifa_free(&ia6->ia_ifa);
1289 }
1290 #endif /* INET6 */
1291 /*
1292 * Basic sanity checks on incoming SYN requests:
1293 * Don't respond if the destination is a link layer
1294 * broadcast according to RFC1122 4.2.3.10, p. 104.
1295 * If it is from this socket it must be forged.
1296 * Don't respond if the source or destination is a
1297 * global or subnet broad- or multicast address.
1298 * Note that it is quite possible to receive unicast
1299 * link-layer packets with a broadcast IP address. Use
1300 * in_broadcast() to find them.
1301 */
1302 if (m->m_flags & (M_BCAST|M_MCAST)) {
1303 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1304 log(LOG_DEBUG, "%s; %s: Listen socket: "
1305 "Connection attempt from broad- or multicast "
1306 "link layer address ignored\n", s, __func__);
1307 goto dropunlock;
1308 }
1309 #ifdef INET6
1310 if (isipv6) {
1311 if (th->th_dport == th->th_sport &&
1312 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1313 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1314 log(LOG_DEBUG, "%s; %s: Listen socket: "
1315 "Connection attempt to/from self "
1316 "ignored\n", s, __func__);
1317 goto dropunlock;
1318 }
1319 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1320 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1321 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1322 log(LOG_DEBUG, "%s; %s: Listen socket: "
1323 "Connection attempt from/to multicast "
1324 "address ignored\n", s, __func__);
1325 goto dropunlock;
1326 }
1327 }
1328 #endif
1329 #if defined(INET) && defined(INET6)
1330 else
1331 #endif
1332 #ifdef INET
1333 {
1334 if (th->th_dport == th->th_sport &&
1335 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1336 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1337 log(LOG_DEBUG, "%s; %s: Listen socket: "
1338 "Connection attempt from/to self "
1339 "ignored\n", s, __func__);
1340 goto dropunlock;
1341 }
1342 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1343 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1344 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1345 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1346 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1347 log(LOG_DEBUG, "%s; %s: Listen socket: "
1348 "Connection attempt from/to broad- "
1349 "or multicast address ignored\n",
1350 s, __func__);
1351 goto dropunlock;
1352 }
1353 }
1354 #endif
1355 /*
1356 * SYN appears to be valid. Create compressed TCP state
1357 * for syncache.
1358 */
1359 #ifdef TCPDEBUG
1360 if (so->so_options & SO_DEBUG)
1361 tcp_trace(TA_INPUT, ostate, tp,
1362 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1363 #endif
1364 tcp_dooptions(&to, optp, optlen, TO_SYN);
1365 syncache_add(&inc, &to, th, inp, &so, m);
1366 /*
1367 * Entry added to syncache and mbuf consumed.
1368 * Everything already unlocked by syncache_add().
1369 */
1370 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1371 return;
1372 } else if (tp->t_state == TCPS_LISTEN) {
1373 /*
1374 * When a listen socket is torn down the SO_ACCEPTCONN
1375 * flag is removed first while connections are drained
1376 * from the accept queue in a unlock/lock cycle of the
1377 * ACCEPT_LOCK, opening a race condition allowing a SYN
1378 * attempt go through unhandled.
1379 */
1380 goto dropunlock;
1381 }
1382
1383 #ifdef TCP_SIGNATURE
1384 if (sig_checked == 0) {
1385 tcp_dooptions(&to, optp, optlen,
1386 (thflags & TH_SYN) ? TO_SYN : 0);
1387 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to,
1388 th, tp->t_flags)) {
1389
1390 /*
1391 * In SYN_SENT state if it receives an RST, it is
1392 * allowed for further processing.
1393 */
1394 if ((thflags & TH_RST) == 0 ||
1395 (tp->t_state == TCPS_SYN_SENT) == 0)
1396 goto dropunlock;
1397 }
1398 sig_checked = 1;
1399 }
1400 #endif
1401
1402 /*
1403 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1404 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1405 * the inpcb, and unlocks pcbinfo.
1406 */
1407 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1408 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1409 return;
1410
1411 dropwithreset:
1412 if (ti_locked == TI_WLOCKED) {
1413 INP_INFO_WUNLOCK(&V_tcbinfo);
1414 ti_locked = TI_UNLOCKED;
1415 }
1416 #ifdef INVARIANTS
1417 else {
1418 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1419 "ti_locked: %d", __func__, ti_locked));
1420 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1421 }
1422 #endif
1423
1424 if (inp != NULL) {
1425 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1426 INP_WUNLOCK(inp);
1427 } else
1428 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1429 m = NULL; /* mbuf chain got consumed. */
1430 goto drop;
1431
1432 dropunlock:
1433 if (ti_locked == TI_WLOCKED) {
1434 INP_INFO_WUNLOCK(&V_tcbinfo);
1435 ti_locked = TI_UNLOCKED;
1436 }
1437 #ifdef INVARIANTS
1438 else {
1439 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1440 "ti_locked: %d", __func__, ti_locked));
1441 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1442 }
1443 #endif
1444
1445 if (inp != NULL)
1446 INP_WUNLOCK(inp);
1447
1448 drop:
1449 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1450 if (s != NULL)
1451 free(s, M_TCPLOG);
1452 if (m != NULL)
1453 m_freem(m);
1454 }
1455
1456 static void
1457 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1458 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1459 int ti_locked)
1460 {
1461 int thflags, acked, ourfinisacked, needoutput = 0;
1462 int rstreason, todrop, win;
1463 u_long tiwin;
1464 struct tcpopt to;
1465
1466 #ifdef TCPDEBUG
1467 /*
1468 * The size of tcp_saveipgen must be the size of the max ip header,
1469 * now IPv6.
1470 */
1471 u_char tcp_saveipgen[IP6_HDR_LEN];
1472 struct tcphdr tcp_savetcp;
1473 short ostate = 0;
1474 #endif
1475 thflags = th->th_flags;
1476 tp->sackhint.last_sack_ack = 0;
1477
1478 /*
1479 * If this is either a state-changing packet or current state isn't
1480 * established, we require a write lock on tcbinfo. Otherwise, we
1481 * allow either a read lock or a write lock, as we may have acquired
1482 * a write lock due to a race.
1483 *
1484 * Require a global write lock for SYN/FIN/RST segments or
1485 * non-established connections; otherwise accept either a read or
1486 * write lock, as we may have conservatively acquired a write lock in
1487 * certain cases in tcp_input() (is this still true?). Currently we
1488 * will never enter with no lock, so we try to drop it quickly in the
1489 * common pure ack/pure data cases.
1490 */
1491 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1492 tp->t_state != TCPS_ESTABLISHED) {
1493 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for "
1494 "SYN/FIN/RST/!EST", __func__, ti_locked));
1495 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1496 } else {
1497 #ifdef INVARIANTS
1498 if (ti_locked == TI_WLOCKED)
1499 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1500 else {
1501 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1502 "ti_locked: %d", __func__, ti_locked));
1503 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1504 }
1505 #endif
1506 }
1507 INP_WLOCK_ASSERT(tp->t_inpcb);
1508 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1509 __func__));
1510 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1511 __func__));
1512
1513 /*
1514 * Segment received on connection.
1515 * Reset idle time and keep-alive timer.
1516 * XXX: This should be done after segment
1517 * validation to ignore broken/spoofed segs.
1518 */
1519 tp->t_rcvtime = ticks;
1520 if (TCPS_HAVEESTABLISHED(tp->t_state))
1521 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
1522
1523 /*
1524 * Unscale the window into a 32-bit value.
1525 * For the SYN_SENT state the scale is zero.
1526 */
1527 tiwin = th->th_win << tp->snd_scale;
1528
1529 /*
1530 * TCP ECN processing.
1531 */
1532 if (tp->t_flags & TF_ECN_PERMIT) {
1533 if (thflags & TH_CWR)
1534 tp->t_flags &= ~TF_ECN_SND_ECE;
1535 switch (iptos & IPTOS_ECN_MASK) {
1536 case IPTOS_ECN_CE:
1537 tp->t_flags |= TF_ECN_SND_ECE;
1538 TCPSTAT_INC(tcps_ecn_ce);
1539 break;
1540 case IPTOS_ECN_ECT0:
1541 TCPSTAT_INC(tcps_ecn_ect0);
1542 break;
1543 case IPTOS_ECN_ECT1:
1544 TCPSTAT_INC(tcps_ecn_ect1);
1545 break;
1546 }
1547 /* Congestion experienced. */
1548 if (thflags & TH_ECE) {
1549 cc_cong_signal(tp, th, CC_ECN);
1550 }
1551 }
1552
1553 /*
1554 * Parse options on any incoming segment.
1555 */
1556 tcp_dooptions(&to, (u_char *)(th + 1),
1557 (th->th_off << 2) - sizeof(struct tcphdr),
1558 (thflags & TH_SYN) ? TO_SYN : 0);
1559
1560 /*
1561 * If echoed timestamp is later than the current time,
1562 * fall back to non RFC1323 RTT calculation. Normalize
1563 * timestamp if syncookies were used when this connection
1564 * was established.
1565 */
1566 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1567 to.to_tsecr -= tp->ts_offset;
1568 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1569 to.to_tsecr = 0;
1570 }
1571
1572 /*
1573 * Process options only when we get SYN/ACK back. The SYN case
1574 * for incoming connections is handled in tcp_syncache.
1575 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1576 * or <SYN,ACK>) segment itself is never scaled.
1577 * XXX this is traditional behavior, may need to be cleaned up.
1578 */
1579 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1580 if ((to.to_flags & TOF_SCALE) &&
1581 (tp->t_flags & TF_REQ_SCALE)) {
1582 tp->t_flags |= TF_RCVD_SCALE;
1583 tp->snd_scale = to.to_wscale;
1584 }
1585 /*
1586 * Initial send window. It will be updated with
1587 * the next incoming segment to the scaled value.
1588 */
1589 tp->snd_wnd = th->th_win;
1590 if (to.to_flags & TOF_TS) {
1591 tp->t_flags |= TF_RCVD_TSTMP;
1592 tp->ts_recent = to.to_tsval;
1593 tp->ts_recent_age = tcp_ts_getticks();
1594 }
1595 if (to.to_flags & TOF_MSS)
1596 tcp_mss(tp, to.to_mss);
1597 if ((tp->t_flags & TF_SACK_PERMIT) &&
1598 (to.to_flags & TOF_SACKPERM) == 0)
1599 tp->t_flags &= ~TF_SACK_PERMIT;
1600 }
1601
1602 /*
1603 * Header prediction: check for the two common cases
1604 * of a uni-directional data xfer. If the packet has
1605 * no control flags, is in-sequence, the window didn't
1606 * change and we're not retransmitting, it's a
1607 * candidate. If the length is zero and the ack moved
1608 * forward, we're the sender side of the xfer. Just
1609 * free the data acked & wake any higher level process
1610 * that was blocked waiting for space. If the length
1611 * is non-zero and the ack didn't move, we're the
1612 * receiver side. If we're getting packets in-order
1613 * (the reassembly queue is empty), add the data to
1614 * the socket buffer and note that we need a delayed ack.
1615 * Make sure that the hidden state-flags are also off.
1616 * Since we check for TCPS_ESTABLISHED first, it can only
1617 * be TH_NEEDSYN.
1618 */
1619 if (tp->t_state == TCPS_ESTABLISHED &&
1620 th->th_seq == tp->rcv_nxt &&
1621 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1622 tp->snd_nxt == tp->snd_max &&
1623 tiwin && tiwin == tp->snd_wnd &&
1624 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1625 LIST_EMPTY(&tp->t_segq) &&
1626 ((to.to_flags & TOF_TS) == 0 ||
1627 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1628
1629 /*
1630 * If last ACK falls within this segment's sequence numbers,
1631 * record the timestamp.
1632 * NOTE that the test is modified according to the latest
1633 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1634 */
1635 if ((to.to_flags & TOF_TS) != 0 &&
1636 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1637 tp->ts_recent_age = tcp_ts_getticks();
1638 tp->ts_recent = to.to_tsval;
1639 }
1640
1641 if (tlen == 0) {
1642 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1643 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1644 !IN_RECOVERY(tp->t_flags) &&
1645 (to.to_flags & TOF_SACK) == 0 &&
1646 TAILQ_EMPTY(&tp->snd_holes)) {
1647 /*
1648 * This is a pure ack for outstanding data.
1649 */
1650 if (ti_locked == TI_WLOCKED)
1651 INP_INFO_WUNLOCK(&V_tcbinfo);
1652 ti_locked = TI_UNLOCKED;
1653
1654 TCPSTAT_INC(tcps_predack);
1655
1656 /*
1657 * "bad retransmit" recovery.
1658 */
1659 if (tp->t_rxtshift == 1 &&
1660 tp->t_flags & TF_PREVVALID &&
1661 (int)(ticks - tp->t_badrxtwin) < 0) {
1662 cc_cong_signal(tp, th, CC_RTO_ERR);
1663 }
1664
1665 /*
1666 * Recalculate the transmit timer / rtt.
1667 *
1668 * Some boxes send broken timestamp replies
1669 * during the SYN+ACK phase, ignore
1670 * timestamps of 0 or we could calculate a
1671 * huge RTT and blow up the retransmit timer.
1672 */
1673 if ((to.to_flags & TOF_TS) != 0 &&
1674 to.to_tsecr) {
1675 u_int t;
1676
1677 t = tcp_ts_getticks() - to.to_tsecr;
1678 if (!tp->t_rttlow || tp->t_rttlow > t)
1679 tp->t_rttlow = t;
1680 tcp_xmit_timer(tp,
1681 TCP_TS_TO_TICKS(t) + 1);
1682 } else if (tp->t_rtttime &&
1683 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1684 if (!tp->t_rttlow ||
1685 tp->t_rttlow > ticks - tp->t_rtttime)
1686 tp->t_rttlow = ticks - tp->t_rtttime;
1687 tcp_xmit_timer(tp,
1688 ticks - tp->t_rtttime);
1689 }
1690 acked = BYTES_THIS_ACK(tp, th);
1691
1692 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1693 hhook_run_tcp_est_in(tp, th, &to);
1694
1695 TCPSTAT_INC(tcps_rcvackpack);
1696 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1697 sbdrop(&so->so_snd, acked);
1698 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1699 SEQ_LEQ(th->th_ack, tp->snd_recover))
1700 tp->snd_recover = th->th_ack - 1;
1701
1702 /*
1703 * Let the congestion control algorithm update
1704 * congestion control related information. This
1705 * typically means increasing the congestion
1706 * window.
1707 */
1708 cc_ack_received(tp, th, CC_ACK);
1709
1710 tp->snd_una = th->th_ack;
1711 /*
1712 * Pull snd_wl2 up to prevent seq wrap relative
1713 * to th_ack.
1714 */
1715 tp->snd_wl2 = th->th_ack;
1716 tp->t_dupacks = 0;
1717 m_freem(m);
1718 ND6_HINT(tp); /* Some progress has been made. */
1719
1720 /*
1721 * If all outstanding data are acked, stop
1722 * retransmit timer, otherwise restart timer
1723 * using current (possibly backed-off) value.
1724 * If process is waiting for space,
1725 * wakeup/selwakeup/signal. If data
1726 * are ready to send, let tcp_output
1727 * decide between more output or persist.
1728 */
1729 #ifdef TCPDEBUG
1730 if (so->so_options & SO_DEBUG)
1731 tcp_trace(TA_INPUT, ostate, tp,
1732 (void *)tcp_saveipgen,
1733 &tcp_savetcp, 0);
1734 #endif
1735 if (tp->snd_una == tp->snd_max)
1736 tcp_timer_activate(tp, TT_REXMT, 0);
1737 else if (!tcp_timer_active(tp, TT_PERSIST))
1738 tcp_timer_activate(tp, TT_REXMT,
1739 tp->t_rxtcur);
1740 sowwakeup(so);
1741 if (so->so_snd.sb_cc)
1742 (void) tcp_output(tp);
1743 goto check_delack;
1744 }
1745 } else if (th->th_ack == tp->snd_una &&
1746 tlen <= sbspace(&so->so_rcv)) {
1747 int newsize = 0; /* automatic sockbuf scaling */
1748
1749 /*
1750 * This is a pure, in-sequence data packet with
1751 * nothing on the reassembly queue and we have enough
1752 * buffer space to take it.
1753 */
1754 if (ti_locked == TI_WLOCKED)
1755 INP_INFO_WUNLOCK(&V_tcbinfo);
1756 ti_locked = TI_UNLOCKED;
1757
1758 /* Clean receiver SACK report if present */
1759 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1760 tcp_clean_sackreport(tp);
1761 TCPSTAT_INC(tcps_preddat);
1762 tp->rcv_nxt += tlen;
1763 /*
1764 * Pull snd_wl1 up to prevent seq wrap relative to
1765 * th_seq.
1766 */
1767 tp->snd_wl1 = th->th_seq;
1768 /*
1769 * Pull rcv_up up to prevent seq wrap relative to
1770 * rcv_nxt.
1771 */
1772 tp->rcv_up = tp->rcv_nxt;
1773 TCPSTAT_INC(tcps_rcvpack);
1774 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1775 ND6_HINT(tp); /* Some progress has been made */
1776 #ifdef TCPDEBUG
1777 if (so->so_options & SO_DEBUG)
1778 tcp_trace(TA_INPUT, ostate, tp,
1779 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1780 #endif
1781 /*
1782 * Automatic sizing of receive socket buffer. Often the send
1783 * buffer size is not optimally adjusted to the actual network
1784 * conditions at hand (delay bandwidth product). Setting the
1785 * buffer size too small limits throughput on links with high
1786 * bandwidth and high delay (eg. trans-continental/oceanic links).
1787 *
1788 * On the receive side the socket buffer memory is only rarely
1789 * used to any significant extent. This allows us to be much
1790 * more aggressive in scaling the receive socket buffer. For
1791 * the case that the buffer space is actually used to a large
1792 * extent and we run out of kernel memory we can simply drop
1793 * the new segments; TCP on the sender will just retransmit it
1794 * later. Setting the buffer size too big may only consume too
1795 * much kernel memory if the application doesn't read() from
1796 * the socket or packet loss or reordering makes use of the
1797 * reassembly queue.
1798 *
1799 * The criteria to step up the receive buffer one notch are:
1800 * 1. the number of bytes received during the time it takes
1801 * one timestamp to be reflected back to us (the RTT);
1802 * 2. received bytes per RTT is within seven eighth of the
1803 * current socket buffer size;
1804 * 3. receive buffer size has not hit maximal automatic size;
1805 *
1806 * This algorithm does one step per RTT at most and only if
1807 * we receive a bulk stream w/o packet losses or reorderings.
1808 * Shrinking the buffer during idle times is not necessary as
1809 * it doesn't consume any memory when idle.
1810 *
1811 * TODO: Only step up if the application is actually serving
1812 * the buffer to better manage the socket buffer resources.
1813 */
1814 if (V_tcp_do_autorcvbuf &&
1815 to.to_tsecr &&
1816 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1817 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) &&
1818 to.to_tsecr - tp->rfbuf_ts < hz) {
1819 if (tp->rfbuf_cnt >
1820 (so->so_rcv.sb_hiwat / 8 * 7) &&
1821 so->so_rcv.sb_hiwat <
1822 V_tcp_autorcvbuf_max) {
1823 newsize =
1824 min(so->so_rcv.sb_hiwat +
1825 V_tcp_autorcvbuf_inc,
1826 V_tcp_autorcvbuf_max);
1827 }
1828 /* Start over with next RTT. */
1829 tp->rfbuf_ts = 0;
1830 tp->rfbuf_cnt = 0;
1831 } else
1832 tp->rfbuf_cnt += tlen; /* add up */
1833 }
1834
1835 /* Add data to socket buffer. */
1836 SOCKBUF_LOCK(&so->so_rcv);
1837 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1838 m_freem(m);
1839 } else {
1840 /*
1841 * Set new socket buffer size.
1842 * Give up when limit is reached.
1843 */
1844 if (newsize)
1845 if (!sbreserve_locked(&so->so_rcv,
1846 newsize, so, NULL))
1847 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1848 m_adj(m, drop_hdrlen); /* delayed header drop */
1849 sbappendstream_locked(&so->so_rcv, m);
1850 }
1851 /* NB: sorwakeup_locked() does an implicit unlock. */
1852 sorwakeup_locked(so);
1853 if (DELAY_ACK(tp)) {
1854 tp->t_flags |= TF_DELACK;
1855 } else {
1856 tp->t_flags |= TF_ACKNOW;
1857 tcp_output(tp);
1858 }
1859 goto check_delack;
1860 }
1861 }
1862
1863 /*
1864 * Calculate amount of space in receive window,
1865 * and then do TCP input processing.
1866 * Receive window is amount of space in rcv queue,
1867 * but not less than advertised window.
1868 */
1869 win = sbspace(&so->so_rcv);
1870 if (win < 0)
1871 win = 0;
1872 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1873
1874 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1875 tp->rfbuf_ts = 0;
1876 tp->rfbuf_cnt = 0;
1877
1878 switch (tp->t_state) {
1879
1880 /*
1881 * If the state is SYN_RECEIVED:
1882 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1883 */
1884 case TCPS_SYN_RECEIVED:
1885 if ((thflags & TH_ACK) &&
1886 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1887 SEQ_GT(th->th_ack, tp->snd_max))) {
1888 rstreason = BANDLIM_RST_OPENPORT;
1889 goto dropwithreset;
1890 }
1891 break;
1892
1893 /*
1894 * If the state is SYN_SENT:
1895 * if seg contains an ACK, but not for our SYN, drop the input.
1896 * if seg contains a RST, then drop the connection.
1897 * if seg does not contain SYN, then drop it.
1898 * Otherwise this is an acceptable SYN segment
1899 * initialize tp->rcv_nxt and tp->irs
1900 * if seg contains ack then advance tp->snd_una
1901 * if seg contains an ECE and ECN support is enabled, the stream
1902 * is ECN capable.
1903 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1904 * arrange for segment to be acked (eventually)
1905 * continue processing rest of data/controls, beginning with URG
1906 */
1907 case TCPS_SYN_SENT:
1908 if ((thflags & TH_ACK) &&
1909 (SEQ_LEQ(th->th_ack, tp->iss) ||
1910 SEQ_GT(th->th_ack, tp->snd_max))) {
1911 rstreason = BANDLIM_UNLIMITED;
1912 goto dropwithreset;
1913 }
1914 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST))
1915 tp = tcp_drop(tp, ECONNREFUSED);
1916 if (thflags & TH_RST)
1917 goto drop;
1918 if (!(thflags & TH_SYN))
1919 goto drop;
1920
1921 tp->irs = th->th_seq;
1922 tcp_rcvseqinit(tp);
1923 if (thflags & TH_ACK) {
1924 TCPSTAT_INC(tcps_connects);
1925 soisconnected(so);
1926 #ifdef MAC
1927 mac_socketpeer_set_from_mbuf(m, so);
1928 #endif
1929 /* Do window scaling on this connection? */
1930 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1931 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1932 tp->rcv_scale = tp->request_r_scale;
1933 }
1934 tp->rcv_adv += imin(tp->rcv_wnd,
1935 TCP_MAXWIN << tp->rcv_scale);
1936 tp->snd_una++; /* SYN is acked */
1937 /*
1938 * If there's data, delay ACK; if there's also a FIN
1939 * ACKNOW will be turned on later.
1940 */
1941 if (DELAY_ACK(tp) && tlen != 0)
1942 tcp_timer_activate(tp, TT_DELACK,
1943 tcp_delacktime);
1944 else
1945 tp->t_flags |= TF_ACKNOW;
1946
1947 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
1948 tp->t_flags |= TF_ECN_PERMIT;
1949 TCPSTAT_INC(tcps_ecn_shs);
1950 }
1951
1952 /*
1953 * Received <SYN,ACK> in SYN_SENT[*] state.
1954 * Transitions:
1955 * SYN_SENT --> ESTABLISHED
1956 * SYN_SENT* --> FIN_WAIT_1
1957 */
1958 tp->t_starttime = ticks;
1959 if (tp->t_flags & TF_NEEDFIN) {
1960 tp->t_state = TCPS_FIN_WAIT_1;
1961 tp->t_flags &= ~TF_NEEDFIN;
1962 thflags &= ~TH_SYN;
1963 } else {
1964 tp->t_state = TCPS_ESTABLISHED;
1965 cc_conn_init(tp);
1966 tcp_timer_activate(tp, TT_KEEP,
1967 TP_KEEPIDLE(tp));
1968 }
1969 } else {
1970 /*
1971 * Received initial SYN in SYN-SENT[*] state =>
1972 * simultaneous open. If segment contains CC option
1973 * and there is a cached CC, apply TAO test.
1974 * If it succeeds, connection is * half-synchronized.
1975 * Otherwise, do 3-way handshake:
1976 * SYN-SENT -> SYN-RECEIVED
1977 * SYN-SENT* -> SYN-RECEIVED*
1978 * If there was no CC option, clear cached CC value.
1979 */
1980 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1981 tcp_timer_activate(tp, TT_REXMT, 0);
1982 tp->t_state = TCPS_SYN_RECEIVED;
1983 }
1984
1985 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: "
1986 "ti_locked %d", __func__, ti_locked));
1987 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1988 INP_WLOCK_ASSERT(tp->t_inpcb);
1989
1990 /*
1991 * Advance th->th_seq to correspond to first data byte.
1992 * If data, trim to stay within window,
1993 * dropping FIN if necessary.
1994 */
1995 th->th_seq++;
1996 if (tlen > tp->rcv_wnd) {
1997 todrop = tlen - tp->rcv_wnd;
1998 m_adj(m, -todrop);
1999 tlen = tp->rcv_wnd;
2000 thflags &= ~TH_FIN;
2001 TCPSTAT_INC(tcps_rcvpackafterwin);
2002 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2003 }
2004 tp->snd_wl1 = th->th_seq - 1;
2005 tp->rcv_up = th->th_seq;
2006 /*
2007 * Client side of transaction: already sent SYN and data.
2008 * If the remote host used T/TCP to validate the SYN,
2009 * our data will be ACK'd; if so, enter normal data segment
2010 * processing in the middle of step 5, ack processing.
2011 * Otherwise, goto step 6.
2012 */
2013 if (thflags & TH_ACK)
2014 goto process_ACK;
2015
2016 goto step6;
2017
2018 /*
2019 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2020 * do normal processing.
2021 *
2022 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2023 */
2024 case TCPS_LAST_ACK:
2025 case TCPS_CLOSING:
2026 break; /* continue normal processing */
2027 }
2028
2029 /*
2030 * States other than LISTEN or SYN_SENT.
2031 * First check the RST flag and sequence number since reset segments
2032 * are exempt from the timestamp and connection count tests. This
2033 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2034 * below which allowed reset segments in half the sequence space
2035 * to fall though and be processed (which gives forged reset
2036 * segments with a random sequence number a 50 percent chance of
2037 * killing a connection).
2038 * Then check timestamp, if present.
2039 * Then check the connection count, if present.
2040 * Then check that at least some bytes of segment are within
2041 * receive window. If segment begins before rcv_nxt,
2042 * drop leading data (and SYN); if nothing left, just ack.
2043 *
2044 *
2045 * If the RST bit is set, check the sequence number to see
2046 * if this is a valid reset segment.
2047 * RFC 793 page 37:
2048 * In all states except SYN-SENT, all reset (RST) segments
2049 * are validated by checking their SEQ-fields. A reset is
2050 * valid if its sequence number is in the window.
2051 * Note: this does not take into account delayed ACKs, so
2052 * we should test against last_ack_sent instead of rcv_nxt.
2053 * The sequence number in the reset segment is normally an
2054 * echo of our outgoing acknowlegement numbers, but some hosts
2055 * send a reset with the sequence number at the rightmost edge
2056 * of our receive window, and we have to handle this case.
2057 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
2058 * that brute force RST attacks are possible. To combat this,
2059 * we use a much stricter check while in the ESTABLISHED state,
2060 * only accepting RSTs where the sequence number is equal to
2061 * last_ack_sent. In all other states (the states in which a
2062 * RST is more likely), the more permissive check is used.
2063 * If we have multiple segments in flight, the initial reset
2064 * segment sequence numbers will be to the left of last_ack_sent,
2065 * but they will eventually catch up.
2066 * In any case, it never made sense to trim reset segments to
2067 * fit the receive window since RFC 1122 says:
2068 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
2069 *
2070 * A TCP SHOULD allow a received RST segment to include data.
2071 *
2072 * DISCUSSION
2073 * It has been suggested that a RST segment could contain
2074 * ASCII text that encoded and explained the cause of the
2075 * RST. No standard has yet been established for such
2076 * data.
2077 *
2078 * If the reset segment passes the sequence number test examine
2079 * the state:
2080 * SYN_RECEIVED STATE:
2081 * If passive open, return to LISTEN state.
2082 * If active open, inform user that connection was refused.
2083 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
2084 * Inform user that connection was reset, and close tcb.
2085 * CLOSING, LAST_ACK STATES:
2086 * Close the tcb.
2087 * TIME_WAIT STATE:
2088 * Drop the segment - see Stevens, vol. 2, p. 964 and
2089 * RFC 1337.
2090 */
2091 if (thflags & TH_RST) {
2092 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2093 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2094 switch (tp->t_state) {
2095
2096 case TCPS_SYN_RECEIVED:
2097 so->so_error = ECONNREFUSED;
2098 goto close;
2099
2100 case TCPS_ESTABLISHED:
2101 if (V_tcp_insecure_rst == 0 &&
2102 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
2103 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
2104 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2105 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
2106 TCPSTAT_INC(tcps_badrst);
2107 goto drop;
2108 }
2109 /* FALLTHROUGH */
2110 case TCPS_FIN_WAIT_1:
2111 case TCPS_FIN_WAIT_2:
2112 case TCPS_CLOSE_WAIT:
2113 so->so_error = ECONNRESET;
2114 close:
2115 KASSERT(ti_locked == TI_WLOCKED,
2116 ("tcp_do_segment: TH_RST 1 ti_locked %d",
2117 ti_locked));
2118 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2119
2120 tp->t_state = TCPS_CLOSED;
2121 TCPSTAT_INC(tcps_drops);
2122 tp = tcp_close(tp);
2123 break;
2124
2125 case TCPS_CLOSING:
2126 case TCPS_LAST_ACK:
2127 KASSERT(ti_locked == TI_WLOCKED,
2128 ("tcp_do_segment: TH_RST 2 ti_locked %d",
2129 ti_locked));
2130 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2131
2132 tp = tcp_close(tp);
2133 break;
2134 }
2135 }
2136 goto drop;
2137 }
2138
2139 /*
2140 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2141 * and it's less than ts_recent, drop it.
2142 */
2143 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2144 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2145
2146 /* Check to see if ts_recent is over 24 days old. */
2147 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2148 /*
2149 * Invalidate ts_recent. If this segment updates
2150 * ts_recent, the age will be reset later and ts_recent
2151 * will get a valid value. If it does not, setting
2152 * ts_recent to zero will at least satisfy the
2153 * requirement that zero be placed in the timestamp
2154 * echo reply when ts_recent isn't valid. The
2155 * age isn't reset until we get a valid ts_recent
2156 * because we don't want out-of-order segments to be
2157 * dropped when ts_recent is old.
2158 */
2159 tp->ts_recent = 0;
2160 } else {
2161 TCPSTAT_INC(tcps_rcvduppack);
2162 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2163 TCPSTAT_INC(tcps_pawsdrop);
2164 if (tlen)
2165 goto dropafterack;
2166 goto drop;
2167 }
2168 }
2169
2170 /*
2171 * In the SYN-RECEIVED state, validate that the packet belongs to
2172 * this connection before trimming the data to fit the receive
2173 * window. Check the sequence number versus IRS since we know
2174 * the sequence numbers haven't wrapped. This is a partial fix
2175 * for the "LAND" DoS attack.
2176 */
2177 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2178 rstreason = BANDLIM_RST_OPENPORT;
2179 goto dropwithreset;
2180 }
2181
2182 todrop = tp->rcv_nxt - th->th_seq;
2183 if (todrop > 0) {
2184 if (thflags & TH_SYN) {
2185 thflags &= ~TH_SYN;
2186 th->th_seq++;
2187 if (th->th_urp > 1)
2188 th->th_urp--;
2189 else
2190 thflags &= ~TH_URG;
2191 todrop--;
2192 }
2193 /*
2194 * Following if statement from Stevens, vol. 2, p. 960.
2195 */
2196 if (todrop > tlen
2197 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2198 /*
2199 * Any valid FIN must be to the left of the window.
2200 * At this point the FIN must be a duplicate or out
2201 * of sequence; drop it.
2202 */
2203 thflags &= ~TH_FIN;
2204
2205 /*
2206 * Send an ACK to resynchronize and drop any data.
2207 * But keep on processing for RST or ACK.
2208 */
2209 tp->t_flags |= TF_ACKNOW;
2210 todrop = tlen;
2211 TCPSTAT_INC(tcps_rcvduppack);
2212 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2213 } else {
2214 TCPSTAT_INC(tcps_rcvpartduppack);
2215 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2216 }
2217 drop_hdrlen += todrop; /* drop from the top afterwards */
2218 th->th_seq += todrop;
2219 tlen -= todrop;
2220 if (th->th_urp > todrop)
2221 th->th_urp -= todrop;
2222 else {
2223 thflags &= ~TH_URG;
2224 th->th_urp = 0;
2225 }
2226 }
2227
2228 /*
2229 * If new data are received on a connection after the
2230 * user processes are gone, then RST the other end.
2231 */
2232 if ((so->so_state & SS_NOFDREF) &&
2233 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2234 char *s;
2235
2236 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && "
2237 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2238 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2239
2240 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
2241 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket "
2242 "was closed, sending RST and removing tcpcb\n",
2243 s, __func__, tcpstates[tp->t_state], tlen);
2244 free(s, M_TCPLOG);
2245 }
2246 tp = tcp_close(tp);
2247 TCPSTAT_INC(tcps_rcvafterclose);
2248 rstreason = BANDLIM_UNLIMITED;
2249 goto dropwithreset;
2250 }
2251
2252 /*
2253 * If segment ends after window, drop trailing data
2254 * (and PUSH and FIN); if nothing left, just ACK.
2255 */
2256 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2257 if (todrop > 0) {
2258 TCPSTAT_INC(tcps_rcvpackafterwin);
2259 if (todrop >= tlen) {
2260 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2261 /*
2262 * If window is closed can only take segments at
2263 * window edge, and have to drop data and PUSH from
2264 * incoming segments. Continue processing, but
2265 * remember to ack. Otherwise, drop segment
2266 * and ack.
2267 */
2268 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2269 tp->t_flags |= TF_ACKNOW;
2270 TCPSTAT_INC(tcps_rcvwinprobe);
2271 } else
2272 goto dropafterack;
2273 } else
2274 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2275 m_adj(m, -todrop);
2276 tlen -= todrop;
2277 thflags &= ~(TH_PUSH|TH_FIN);
2278 }
2279
2280 /*
2281 * If last ACK falls within this segment's sequence numbers,
2282 * record its timestamp.
2283 * NOTE:
2284 * 1) That the test incorporates suggestions from the latest
2285 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2286 * 2) That updating only on newer timestamps interferes with
2287 * our earlier PAWS tests, so this check should be solely
2288 * predicated on the sequence space of this segment.
2289 * 3) That we modify the segment boundary check to be
2290 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2291 * instead of RFC1323's
2292 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2293 * This modified check allows us to overcome RFC1323's
2294 * limitations as described in Stevens TCP/IP Illustrated
2295 * Vol. 2 p.869. In such cases, we can still calculate the
2296 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2297 */
2298 if ((to.to_flags & TOF_TS) != 0 &&
2299 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2300 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2301 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2302 tp->ts_recent_age = tcp_ts_getticks();
2303 tp->ts_recent = to.to_tsval;
2304 }
2305
2306 /*
2307 * If a SYN is in the window, then this is an
2308 * error and we send an RST and drop the connection.
2309 */
2310 if (thflags & TH_SYN) {
2311 KASSERT(ti_locked == TI_WLOCKED,
2312 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2313 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2314
2315 tp = tcp_drop(tp, ECONNRESET);
2316 rstreason = BANDLIM_UNLIMITED;
2317 goto drop;
2318 }
2319
2320 /*
2321 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2322 * flag is on (half-synchronized state), then queue data for
2323 * later processing; else drop segment and return.
2324 */
2325 if ((thflags & TH_ACK) == 0) {
2326 if (tp->t_state == TCPS_SYN_RECEIVED ||
2327 (tp->t_flags & TF_NEEDSYN))
2328 goto step6;
2329 else if (tp->t_flags & TF_ACKNOW)
2330 goto dropafterack;
2331 else
2332 goto drop;
2333 }
2334
2335 /*
2336 * Ack processing.
2337 */
2338 switch (tp->t_state) {
2339
2340 /*
2341 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2342 * ESTABLISHED state and continue processing.
2343 * The ACK was checked above.
2344 */
2345 case TCPS_SYN_RECEIVED:
2346
2347 TCPSTAT_INC(tcps_connects);
2348 soisconnected(so);
2349 /* Do window scaling? */
2350 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2351 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2352 tp->rcv_scale = tp->request_r_scale;
2353 tp->snd_wnd = tiwin;
2354 }
2355 /*
2356 * Make transitions:
2357 * SYN-RECEIVED -> ESTABLISHED
2358 * SYN-RECEIVED* -> FIN-WAIT-1
2359 */
2360 tp->t_starttime = ticks;
2361 if (tp->t_flags & TF_NEEDFIN) {
2362 tp->t_state = TCPS_FIN_WAIT_1;
2363 tp->t_flags &= ~TF_NEEDFIN;
2364 } else {
2365 tp->t_state = TCPS_ESTABLISHED;
2366 cc_conn_init(tp);
2367 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2368 }
2369 /*
2370 * If segment contains data or ACK, will call tcp_reass()
2371 * later; if not, do so now to pass queued data to user.
2372 */
2373 if (tlen == 0 && (thflags & TH_FIN) == 0)
2374 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2375 (struct mbuf *)0);
2376 tp->snd_wl1 = th->th_seq - 1;
2377 /* FALLTHROUGH */
2378
2379 /*
2380 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2381 * ACKs. If the ack is in the range
2382 * tp->snd_una < th->th_ack <= tp->snd_max
2383 * then advance tp->snd_una to th->th_ack and drop
2384 * data from the retransmission queue. If this ACK reflects
2385 * more up to date window information we update our window information.
2386 */
2387 case TCPS_ESTABLISHED:
2388 case TCPS_FIN_WAIT_1:
2389 case TCPS_FIN_WAIT_2:
2390 case TCPS_CLOSE_WAIT:
2391 case TCPS_CLOSING:
2392 case TCPS_LAST_ACK:
2393 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2394 TCPSTAT_INC(tcps_rcvacktoomuch);
2395 goto dropafterack;
2396 }
2397 if ((tp->t_flags & TF_SACK_PERMIT) &&
2398 ((to.to_flags & TOF_SACK) ||
2399 !TAILQ_EMPTY(&tp->snd_holes)))
2400 tcp_sack_doack(tp, &to, th->th_ack);
2401
2402 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2403 hhook_run_tcp_est_in(tp, th, &to);
2404
2405 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2406 if (tlen == 0 && tiwin == tp->snd_wnd) {
2407 TCPSTAT_INC(tcps_rcvdupack);
2408 /*
2409 * If we have outstanding data (other than
2410 * a window probe), this is a completely
2411 * duplicate ack (ie, window info didn't
2412 * change), the ack is the biggest we've
2413 * seen and we've seen exactly our rexmt
2414 * threshhold of them, assume a packet
2415 * has been dropped and retransmit it.
2416 * Kludge snd_nxt & the congestion
2417 * window so we send only this one
2418 * packet.
2419 *
2420 * We know we're losing at the current
2421 * window size so do congestion avoidance
2422 * (set ssthresh to half the current window
2423 * and pull our congestion window back to
2424 * the new ssthresh).
2425 *
2426 * Dup acks mean that packets have left the
2427 * network (they're now cached at the receiver)
2428 * so bump cwnd by the amount in the receiver
2429 * to keep a constant cwnd packets in the
2430 * network.
2431 *
2432 * When using TCP ECN, notify the peer that
2433 * we reduced the cwnd.
2434 */
2435 if (!tcp_timer_active(tp, TT_REXMT) ||
2436 th->th_ack != tp->snd_una)
2437 tp->t_dupacks = 0;
2438 else if (++tp->t_dupacks > tcprexmtthresh ||
2439 IN_FASTRECOVERY(tp->t_flags)) {
2440 cc_ack_received(tp, th, CC_DUPACK);
2441 if ((tp->t_flags & TF_SACK_PERMIT) &&
2442 IN_FASTRECOVERY(tp->t_flags)) {
2443 int awnd;
2444
2445 /*
2446 * Compute the amount of data in flight first.
2447 * We can inject new data into the pipe iff
2448 * we have less than 1/2 the original window's
2449 * worth of data in flight.
2450 */
2451 awnd = (tp->snd_nxt - tp->snd_fack) +
2452 tp->sackhint.sack_bytes_rexmit;
2453 if (awnd < tp->snd_ssthresh) {
2454 tp->snd_cwnd += tp->t_maxseg;
2455 if (tp->snd_cwnd > tp->snd_ssthresh)
2456 tp->snd_cwnd = tp->snd_ssthresh;
2457 }
2458 } else
2459 tp->snd_cwnd += tp->t_maxseg;
2460 if ((thflags & TH_FIN) &&
2461 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2462 /*
2463 * If its a fin we need to process
2464 * it to avoid a race where both
2465 * sides enter FIN-WAIT and send FIN|ACK
2466 * at the same time.
2467 */
2468 break;
2469 }
2470 (void) tcp_output(tp);
2471 goto drop;
2472 } else if (tp->t_dupacks == tcprexmtthresh) {
2473 tcp_seq onxt = tp->snd_nxt;
2474
2475 /*
2476 * If we're doing sack, check to
2477 * see if we're already in sack
2478 * recovery. If we're not doing sack,
2479 * check to see if we're in newreno
2480 * recovery.
2481 */
2482 if (tp->t_flags & TF_SACK_PERMIT) {
2483 if (IN_FASTRECOVERY(tp->t_flags)) {
2484 tp->t_dupacks = 0;
2485 break;
2486 }
2487 } else {
2488 if (SEQ_LEQ(th->th_ack,
2489 tp->snd_recover)) {
2490 tp->t_dupacks = 0;
2491 break;
2492 }
2493 }
2494 /* Congestion signal before ack. */
2495 cc_cong_signal(tp, th, CC_NDUPACK);
2496 cc_ack_received(tp, th, CC_DUPACK);
2497 tcp_timer_activate(tp, TT_REXMT, 0);
2498 tp->t_rtttime = 0;
2499 if (tp->t_flags & TF_SACK_PERMIT) {
2500 TCPSTAT_INC(
2501 tcps_sack_recovery_episode);
2502 tp->sack_newdata = tp->snd_nxt;
2503 tp->snd_cwnd = tp->t_maxseg;
2504 (void) tcp_output(tp);
2505 goto drop;
2506 }
2507 tp->snd_nxt = th->th_ack;
2508 tp->snd_cwnd = tp->t_maxseg;
2509 if ((thflags & TH_FIN) &&
2510 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2511 /*
2512 * If its a fin we need to process
2513 * it to avoid a race where both
2514 * sides enter FIN-WAIT and send FIN|ACK
2515 * at the same time.
2516 */
2517 break;
2518 }
2519 (void) tcp_output(tp);
2520 KASSERT(tp->snd_limited <= 2,
2521 ("%s: tp->snd_limited too big",
2522 __func__));
2523 tp->snd_cwnd = tp->snd_ssthresh +
2524 tp->t_maxseg *
2525 (tp->t_dupacks - tp->snd_limited);
2526 if (SEQ_GT(onxt, tp->snd_nxt))
2527 tp->snd_nxt = onxt;
2528 goto drop;
2529 } else if (V_tcp_do_rfc3042) {
2530 cc_ack_received(tp, th, CC_DUPACK);
2531 u_long oldcwnd = tp->snd_cwnd;
2532 tcp_seq oldsndmax = tp->snd_max;
2533 u_int sent;
2534 int avail;
2535
2536 KASSERT(tp->t_dupacks == 1 ||
2537 tp->t_dupacks == 2,
2538 ("%s: dupacks not 1 or 2",
2539 __func__));
2540 if (tp->t_dupacks == 1)
2541 tp->snd_limited = 0;
2542 tp->snd_cwnd =
2543 (tp->snd_nxt - tp->snd_una) +
2544 (tp->t_dupacks - tp->snd_limited) *
2545 tp->t_maxseg;
2546 if ((thflags & TH_FIN) &&
2547 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2548 /*
2549 * If its a fin we need to process
2550 * it to avoid a race where both
2551 * sides enter FIN-WAIT and send FIN|ACK
2552 * at the same time.
2553 */
2554 break;
2555 }
2556 /*
2557 * Only call tcp_output when there
2558 * is new data available to be sent.
2559 * Otherwise we would send pure ACKs.
2560 */
2561 SOCKBUF_LOCK(&so->so_snd);
2562 avail = so->so_snd.sb_cc -
2563 (tp->snd_nxt - tp->snd_una);
2564 SOCKBUF_UNLOCK(&so->so_snd);
2565 if (avail > 0)
2566 (void) tcp_output(tp);
2567 sent = tp->snd_max - oldsndmax;
2568 if (sent > tp->t_maxseg) {
2569 KASSERT((tp->t_dupacks == 2 &&
2570 tp->snd_limited == 0) ||
2571 (sent == tp->t_maxseg + 1 &&
2572 tp->t_flags & TF_SENTFIN),
2573 ("%s: sent too much",
2574 __func__));
2575 tp->snd_limited = 2;
2576 } else if (sent > 0)
2577 ++tp->snd_limited;
2578 tp->snd_cwnd = oldcwnd;
2579 goto drop;
2580 }
2581 } else
2582 tp->t_dupacks = 0;
2583 break;
2584 }
2585
2586 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2587 ("%s: th_ack <= snd_una", __func__));
2588
2589 /*
2590 * If the congestion window was inflated to account
2591 * for the other side's cached packets, retract it.
2592 */
2593 if (IN_FASTRECOVERY(tp->t_flags)) {
2594 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2595 if (tp->t_flags & TF_SACK_PERMIT)
2596 tcp_sack_partialack(tp, th);
2597 else
2598 tcp_newreno_partial_ack(tp, th);
2599 } else
2600 cc_post_recovery(tp, th);
2601 }
2602 tp->t_dupacks = 0;
2603 /*
2604 * If we reach this point, ACK is not a duplicate,
2605 * i.e., it ACKs something we sent.
2606 */
2607 if (tp->t_flags & TF_NEEDSYN) {
2608 /*
2609 * T/TCP: Connection was half-synchronized, and our
2610 * SYN has been ACK'd (so connection is now fully
2611 * synchronized). Go to non-starred state,
2612 * increment snd_una for ACK of SYN, and check if
2613 * we can do window scaling.
2614 */
2615 tp->t_flags &= ~TF_NEEDSYN;
2616 tp->snd_una++;
2617 /* Do window scaling? */
2618 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2619 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2620 tp->rcv_scale = tp->request_r_scale;
2621 /* Send window already scaled. */
2622 }
2623 }
2624
2625 process_ACK:
2626 INP_WLOCK_ASSERT(tp->t_inpcb);
2627
2628 acked = BYTES_THIS_ACK(tp, th);
2629 TCPSTAT_INC(tcps_rcvackpack);
2630 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2631
2632 /*
2633 * If we just performed our first retransmit, and the ACK
2634 * arrives within our recovery window, then it was a mistake
2635 * to do the retransmit in the first place. Recover our
2636 * original cwnd and ssthresh, and proceed to transmit where
2637 * we left off.
2638 */
2639 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2640 (int)(ticks - tp->t_badrxtwin) < 0)
2641 cc_cong_signal(tp, th, CC_RTO_ERR);
2642
2643 /*
2644 * If we have a timestamp reply, update smoothed
2645 * round trip time. If no timestamp is present but
2646 * transmit timer is running and timed sequence
2647 * number was acked, update smoothed round trip time.
2648 * Since we now have an rtt measurement, cancel the
2649 * timer backoff (cf., Phil Karn's retransmit alg.).
2650 * Recompute the initial retransmit timer.
2651 *
2652 * Some boxes send broken timestamp replies
2653 * during the SYN+ACK phase, ignore
2654 * timestamps of 0 or we could calculate a
2655 * huge RTT and blow up the retransmit timer.
2656 */
2657 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2658 u_int t;
2659
2660 t = tcp_ts_getticks() - to.to_tsecr;
2661 if (!tp->t_rttlow || tp->t_rttlow > t)
2662 tp->t_rttlow = t;
2663 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2664 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2665 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2666 tp->t_rttlow = ticks - tp->t_rtttime;
2667 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2668 }
2669
2670 /*
2671 * If all outstanding data is acked, stop retransmit
2672 * timer and remember to restart (more output or persist).
2673 * If there is more data to be acked, restart retransmit
2674 * timer, using current (possibly backed-off) value.
2675 */
2676 if (th->th_ack == tp->snd_max) {
2677 tcp_timer_activate(tp, TT_REXMT, 0);
2678 needoutput = 1;
2679 } else if (!tcp_timer_active(tp, TT_PERSIST))
2680 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2681
2682 /*
2683 * If no data (only SYN) was ACK'd,
2684 * skip rest of ACK processing.
2685 */
2686 if (acked == 0)
2687 goto step6;
2688
2689 /*
2690 * Let the congestion control algorithm update congestion
2691 * control related information. This typically means increasing
2692 * the congestion window.
2693 */
2694 cc_ack_received(tp, th, CC_ACK);
2695
2696 SOCKBUF_LOCK(&so->so_snd);
2697 if (acked > so->so_snd.sb_cc) {
2698 tp->snd_wnd -= so->so_snd.sb_cc;
2699 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2700 ourfinisacked = 1;
2701 } else {
2702 sbdrop_locked(&so->so_snd, acked);
2703 tp->snd_wnd -= acked;
2704 ourfinisacked = 0;
2705 }
2706 /* NB: sowwakeup_locked() does an implicit unlock. */
2707 sowwakeup_locked(so);
2708 /* Detect una wraparound. */
2709 if (!IN_RECOVERY(tp->t_flags) &&
2710 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2711 SEQ_LEQ(th->th_ack, tp->snd_recover))
2712 tp->snd_recover = th->th_ack - 1;
2713 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2714 if (IN_RECOVERY(tp->t_flags) &&
2715 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2716 EXIT_RECOVERY(tp->t_flags);
2717 }
2718 tp->snd_una = th->th_ack;
2719 if (tp->t_flags & TF_SACK_PERMIT) {
2720 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2721 tp->snd_recover = tp->snd_una;
2722 }
2723 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2724 tp->snd_nxt = tp->snd_una;
2725
2726 switch (tp->t_state) {
2727
2728 /*
2729 * In FIN_WAIT_1 STATE in addition to the processing
2730 * for the ESTABLISHED state if our FIN is now acknowledged
2731 * then enter FIN_WAIT_2.
2732 */
2733 case TCPS_FIN_WAIT_1:
2734 if (ourfinisacked) {
2735 /*
2736 * If we can't receive any more
2737 * data, then closing user can proceed.
2738 * Starting the timer is contrary to the
2739 * specification, but if we don't get a FIN
2740 * we'll hang forever.
2741 *
2742 * XXXjl:
2743 * we should release the tp also, and use a
2744 * compressed state.
2745 */
2746 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2747 soisdisconnected(so);
2748 tcp_timer_activate(tp, TT_2MSL,
2749 (tcp_fast_finwait2_recycle ?
2750 tcp_finwait2_timeout :
2751 TP_MAXIDLE(tp)));
2752 }
2753 tp->t_state = TCPS_FIN_WAIT_2;
2754 }
2755 break;
2756
2757 /*
2758 * In CLOSING STATE in addition to the processing for
2759 * the ESTABLISHED state if the ACK acknowledges our FIN
2760 * then enter the TIME-WAIT state, otherwise ignore
2761 * the segment.
2762 */
2763 case TCPS_CLOSING:
2764 if (ourfinisacked) {
2765 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2766 tcp_twstart(tp);
2767 INP_INFO_WUNLOCK(&V_tcbinfo);
2768 m_freem(m);
2769 return;
2770 }
2771 break;
2772
2773 /*
2774 * In LAST_ACK, we may still be waiting for data to drain
2775 * and/or to be acked, as well as for the ack of our FIN.
2776 * If our FIN is now acknowledged, delete the TCB,
2777 * enter the closed state and return.
2778 */
2779 case TCPS_LAST_ACK:
2780 if (ourfinisacked) {
2781 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2782 tp = tcp_close(tp);
2783 goto drop;
2784 }
2785 break;
2786 }
2787 }
2788
2789 step6:
2790 INP_WLOCK_ASSERT(tp->t_inpcb);
2791
2792 /*
2793 * Update window information.
2794 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2795 */
2796 if ((thflags & TH_ACK) &&
2797 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2798 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2799 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2800 /* keep track of pure window updates */
2801 if (tlen == 0 &&
2802 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2803 TCPSTAT_INC(tcps_rcvwinupd);
2804 tp->snd_wnd = tiwin;
2805 tp->snd_wl1 = th->th_seq;
2806 tp->snd_wl2 = th->th_ack;
2807 if (tp->snd_wnd > tp->max_sndwnd)
2808 tp->max_sndwnd = tp->snd_wnd;
2809 needoutput = 1;
2810 }
2811
2812 /*
2813 * Process segments with URG.
2814 */
2815 if ((thflags & TH_URG) && th->th_urp &&
2816 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2817 /*
2818 * This is a kludge, but if we receive and accept
2819 * random urgent pointers, we'll crash in
2820 * soreceive. It's hard to imagine someone
2821 * actually wanting to send this much urgent data.
2822 */
2823 SOCKBUF_LOCK(&so->so_rcv);
2824 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2825 th->th_urp = 0; /* XXX */
2826 thflags &= ~TH_URG; /* XXX */
2827 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2828 goto dodata; /* XXX */
2829 }
2830 /*
2831 * If this segment advances the known urgent pointer,
2832 * then mark the data stream. This should not happen
2833 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2834 * a FIN has been received from the remote side.
2835 * In these states we ignore the URG.
2836 *
2837 * According to RFC961 (Assigned Protocols),
2838 * the urgent pointer points to the last octet
2839 * of urgent data. We continue, however,
2840 * to consider it to indicate the first octet
2841 * of data past the urgent section as the original
2842 * spec states (in one of two places).
2843 */
2844 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2845 tp->rcv_up = th->th_seq + th->th_urp;
2846 so->so_oobmark = so->so_rcv.sb_cc +
2847 (tp->rcv_up - tp->rcv_nxt) - 1;
2848 if (so->so_oobmark == 0)
2849 so->so_rcv.sb_state |= SBS_RCVATMARK;
2850 sohasoutofband(so);
2851 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2852 }
2853 SOCKBUF_UNLOCK(&so->so_rcv);
2854 /*
2855 * Remove out of band data so doesn't get presented to user.
2856 * This can happen independent of advancing the URG pointer,
2857 * but if two URG's are pending at once, some out-of-band
2858 * data may creep in... ick.
2859 */
2860 if (th->th_urp <= (u_long)tlen &&
2861 !(so->so_options & SO_OOBINLINE)) {
2862 /* hdr drop is delayed */
2863 tcp_pulloutofband(so, th, m, drop_hdrlen);
2864 }
2865 } else {
2866 /*
2867 * If no out of band data is expected,
2868 * pull receive urgent pointer along
2869 * with the receive window.
2870 */
2871 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2872 tp->rcv_up = tp->rcv_nxt;
2873 }
2874 dodata: /* XXX */
2875 INP_WLOCK_ASSERT(tp->t_inpcb);
2876
2877 /*
2878 * Process the segment text, merging it into the TCP sequencing queue,
2879 * and arranging for acknowledgment of receipt if necessary.
2880 * This process logically involves adjusting tp->rcv_wnd as data
2881 * is presented to the user (this happens in tcp_usrreq.c,
2882 * case PRU_RCVD). If a FIN has already been received on this
2883 * connection then we just ignore the text.
2884 */
2885 if ((tlen || (thflags & TH_FIN)) &&
2886 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2887 tcp_seq save_start = th->th_seq;
2888 m_adj(m, drop_hdrlen); /* delayed header drop */
2889 /*
2890 * Insert segment which includes th into TCP reassembly queue
2891 * with control block tp. Set thflags to whether reassembly now
2892 * includes a segment with FIN. This handles the common case
2893 * inline (segment is the next to be received on an established
2894 * connection, and the queue is empty), avoiding linkage into
2895 * and removal from the queue and repetition of various
2896 * conversions.
2897 * Set DELACK for segments received in order, but ack
2898 * immediately when segments are out of order (so
2899 * fast retransmit can work).
2900 */
2901 if (th->th_seq == tp->rcv_nxt &&
2902 LIST_EMPTY(&tp->t_segq) &&
2903 TCPS_HAVEESTABLISHED(tp->t_state)) {
2904 if (DELAY_ACK(tp))
2905 tp->t_flags |= TF_DELACK;
2906 else
2907 tp->t_flags |= TF_ACKNOW;
2908 tp->rcv_nxt += tlen;
2909 thflags = th->th_flags & TH_FIN;
2910 TCPSTAT_INC(tcps_rcvpack);
2911 TCPSTAT_ADD(tcps_rcvbyte, tlen);
2912 ND6_HINT(tp);
2913 SOCKBUF_LOCK(&so->so_rcv);
2914 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2915 m_freem(m);
2916 else
2917 sbappendstream_locked(&so->so_rcv, m);
2918 /* NB: sorwakeup_locked() does an implicit unlock. */
2919 sorwakeup_locked(so);
2920 } else {
2921 /*
2922 * XXX: Due to the header drop above "th" is
2923 * theoretically invalid by now. Fortunately
2924 * m_adj() doesn't actually frees any mbufs
2925 * when trimming from the head.
2926 */
2927 thflags = tcp_reass(tp, th, &tlen, m);
2928 tp->t_flags |= TF_ACKNOW;
2929 }
2930 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2931 tcp_update_sack_list(tp, save_start, save_start + tlen);
2932 #if 0
2933 /*
2934 * Note the amount of data that peer has sent into
2935 * our window, in order to estimate the sender's
2936 * buffer size.
2937 * XXX: Unused.
2938 */
2939 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
2940 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2941 else
2942 len = so->so_rcv.sb_hiwat;
2943 #endif
2944 } else {
2945 m_freem(m);
2946 thflags &= ~TH_FIN;
2947 }
2948
2949 /*
2950 * If FIN is received ACK the FIN and let the user know
2951 * that the connection is closing.
2952 */
2953 if (thflags & TH_FIN) {
2954 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2955 socantrcvmore(so);
2956 /*
2957 * If connection is half-synchronized
2958 * (ie NEEDSYN flag on) then delay ACK,
2959 * so it may be piggybacked when SYN is sent.
2960 * Otherwise, since we received a FIN then no
2961 * more input can be expected, send ACK now.
2962 */
2963 if (tp->t_flags & TF_NEEDSYN)
2964 tp->t_flags |= TF_DELACK;
2965 else
2966 tp->t_flags |= TF_ACKNOW;
2967 tp->rcv_nxt++;
2968 }
2969 switch (tp->t_state) {
2970
2971 /*
2972 * In SYN_RECEIVED and ESTABLISHED STATES
2973 * enter the CLOSE_WAIT state.
2974 */
2975 case TCPS_SYN_RECEIVED:
2976 tp->t_starttime = ticks;
2977 /* FALLTHROUGH */
2978 case TCPS_ESTABLISHED:
2979 tp->t_state = TCPS_CLOSE_WAIT;
2980 break;
2981
2982 /*
2983 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2984 * enter the CLOSING state.
2985 */
2986 case TCPS_FIN_WAIT_1:
2987 tp->t_state = TCPS_CLOSING;
2988 break;
2989
2990 /*
2991 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2992 * starting the time-wait timer, turning off the other
2993 * standard timers.
2994 */
2995 case TCPS_FIN_WAIT_2:
2996 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2997 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata "
2998 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
2999 ti_locked));
3000
3001 tcp_twstart(tp);
3002 INP_INFO_WUNLOCK(&V_tcbinfo);
3003 return;
3004 }
3005 }
3006 if (ti_locked == TI_WLOCKED)
3007 INP_INFO_WUNLOCK(&V_tcbinfo);
3008 ti_locked = TI_UNLOCKED;
3009
3010 #ifdef TCPDEBUG
3011 if (so->so_options & SO_DEBUG)
3012 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3013 &tcp_savetcp, 0);
3014 #endif
3015
3016 /*
3017 * Return any desired output.
3018 */
3019 if (needoutput || (tp->t_flags & TF_ACKNOW))
3020 (void) tcp_output(tp);
3021
3022 check_delack:
3023 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
3024 __func__, ti_locked));
3025 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3026 INP_WLOCK_ASSERT(tp->t_inpcb);
3027
3028 if (tp->t_flags & TF_DELACK) {
3029 tp->t_flags &= ~TF_DELACK;
3030 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3031 }
3032 INP_WUNLOCK(tp->t_inpcb);
3033 return;
3034
3035 dropafterack:
3036 /*
3037 * Generate an ACK dropping incoming segment if it occupies
3038 * sequence space, where the ACK reflects our state.
3039 *
3040 * We can now skip the test for the RST flag since all
3041 * paths to this code happen after packets containing
3042 * RST have been dropped.
3043 *
3044 * In the SYN-RECEIVED state, don't send an ACK unless the
3045 * segment we received passes the SYN-RECEIVED ACK test.
3046 * If it fails send a RST. This breaks the loop in the
3047 * "LAND" DoS attack, and also prevents an ACK storm
3048 * between two listening ports that have been sent forged
3049 * SYN segments, each with the source address of the other.
3050 */
3051 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3052 (SEQ_GT(tp->snd_una, th->th_ack) ||
3053 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3054 rstreason = BANDLIM_RST_OPENPORT;
3055 goto dropwithreset;
3056 }
3057 #ifdef TCPDEBUG
3058 if (so->so_options & SO_DEBUG)
3059 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3060 &tcp_savetcp, 0);
3061 #endif
3062 if (ti_locked == TI_WLOCKED)
3063 INP_INFO_WUNLOCK(&V_tcbinfo);
3064 ti_locked = TI_UNLOCKED;
3065
3066 tp->t_flags |= TF_ACKNOW;
3067 (void) tcp_output(tp);
3068 INP_WUNLOCK(tp->t_inpcb);
3069 m_freem(m);
3070 return;
3071
3072 dropwithreset:
3073 if (ti_locked == TI_WLOCKED)
3074 INP_INFO_WUNLOCK(&V_tcbinfo);
3075 ti_locked = TI_UNLOCKED;
3076
3077 if (tp != NULL) {
3078 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3079 INP_WUNLOCK(tp->t_inpcb);
3080 } else
3081 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3082 return;
3083
3084 drop:
3085 if (ti_locked == TI_WLOCKED) {
3086 INP_INFO_WUNLOCK(&V_tcbinfo);
3087 ti_locked = TI_UNLOCKED;
3088 }
3089 #ifdef INVARIANTS
3090 else
3091 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3092 #endif
3093
3094 /*
3095 * Drop space held by incoming segment and return.
3096 */
3097 #ifdef TCPDEBUG
3098 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3099 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3100 &tcp_savetcp, 0);
3101 #endif
3102 if (tp != NULL)
3103 INP_WUNLOCK(tp->t_inpcb);
3104 m_freem(m);
3105 }
3106
3107 /*
3108 * Issue RST and make ACK acceptable to originator of segment.
3109 * The mbuf must still include the original packet header.
3110 * tp may be NULL.
3111 */
3112 static void
3113 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3114 int tlen, int rstreason)
3115 {
3116 #ifdef INET
3117 struct ip *ip;
3118 #endif
3119 #ifdef INET6
3120 struct ip6_hdr *ip6;
3121 #endif
3122
3123 if (tp != NULL) {
3124 INP_WLOCK_ASSERT(tp->t_inpcb);
3125 }
3126
3127 /* Don't bother if destination was broadcast/multicast. */
3128 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3129 goto drop;
3130 #ifdef INET6
3131 if (mtod(m, struct ip *)->ip_v == 6) {
3132 ip6 = mtod(m, struct ip6_hdr *);
3133 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3134 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3135 goto drop;
3136 /* IPv6 anycast check is done at tcp6_input() */
3137 }
3138 #endif
3139 #if defined(INET) && defined(INET6)
3140 else
3141 #endif
3142 #ifdef INET
3143 {
3144 ip = mtod(m, struct ip *);
3145 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3146 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3147 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3148 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3149 goto drop;
3150 }
3151 #endif
3152
3153 /* Perform bandwidth limiting. */
3154 if (badport_bandlim(rstreason) < 0)
3155 goto drop;
3156
3157 /* tcp_respond consumes the mbuf chain. */
3158 if (th->th_flags & TH_ACK) {
3159 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3160 th->th_ack, TH_RST);
3161 } else {
3162 if (th->th_flags & TH_SYN)
3163 tlen++;
3164 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3165 (tcp_seq)0, TH_RST|TH_ACK);
3166 }
3167 return;
3168 drop:
3169 m_freem(m);
3170 }
3171
3172 /*
3173 * Parse TCP options and place in tcpopt.
3174 */
3175 static void
3176 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3177 {
3178 int opt, optlen;
3179
3180 to->to_flags = 0;
3181 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3182 opt = cp[0];
3183 if (opt == TCPOPT_EOL)
3184 break;
3185 if (opt == TCPOPT_NOP)
3186 optlen = 1;
3187 else {
3188 if (cnt < 2)
3189 break;
3190 optlen = cp[1];
3191 if (optlen < 2 || optlen > cnt)
3192 break;
3193 }
3194 switch (opt) {
3195 case TCPOPT_MAXSEG:
3196 if (optlen != TCPOLEN_MAXSEG)
3197 continue;
3198 if (!(flags & TO_SYN))
3199 continue;
3200 to->to_flags |= TOF_MSS;
3201 bcopy((char *)cp + 2,
3202 (char *)&to->to_mss, sizeof(to->to_mss));
3203 to->to_mss = ntohs(to->to_mss);
3204 break;
3205 case TCPOPT_WINDOW:
3206 if (optlen != TCPOLEN_WINDOW)
3207 continue;
3208 if (!(flags & TO_SYN))
3209 continue;
3210 to->to_flags |= TOF_SCALE;
3211 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3212 break;
3213 case TCPOPT_TIMESTAMP:
3214 if (optlen != TCPOLEN_TIMESTAMP)
3215 continue;
3216 to->to_flags |= TOF_TS;
3217 bcopy((char *)cp + 2,
3218 (char *)&to->to_tsval, sizeof(to->to_tsval));
3219 to->to_tsval = ntohl(to->to_tsval);
3220 bcopy((char *)cp + 6,
3221 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3222 to->to_tsecr = ntohl(to->to_tsecr);
3223 break;
3224 #ifdef TCP_SIGNATURE
3225 /*
3226 * XXX In order to reply to a host which has set the
3227 * TCP_SIGNATURE option in its initial SYN, we have to
3228 * record the fact that the option was observed here
3229 * for the syncache code to perform the correct response.
3230 */
3231 case TCPOPT_SIGNATURE:
3232 if (optlen != TCPOLEN_SIGNATURE)
3233 continue;
3234 to->to_flags |= TOF_SIGNATURE;
3235 to->to_signature = cp + 2;
3236 break;
3237 #endif
3238 case TCPOPT_SACK_PERMITTED:
3239 if (optlen != TCPOLEN_SACK_PERMITTED)
3240 continue;
3241 if (!(flags & TO_SYN))
3242 continue;
3243 if (!V_tcp_do_sack)
3244 continue;
3245 to->to_flags |= TOF_SACKPERM;
3246 break;
3247 case TCPOPT_SACK:
3248 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3249 continue;
3250 if (flags & TO_SYN)
3251 continue;
3252 to->to_flags |= TOF_SACK;
3253 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3254 to->to_sacks = cp + 2;
3255 TCPSTAT_INC(tcps_sack_rcv_blocks);
3256 break;
3257 default:
3258 continue;
3259 }
3260 }
3261 }
3262
3263 /*
3264 * Pull out of band byte out of a segment so
3265 * it doesn't appear in the user's data queue.
3266 * It is still reflected in the segment length for
3267 * sequencing purposes.
3268 */
3269 static void
3270 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3271 int off)
3272 {
3273 int cnt = off + th->th_urp - 1;
3274
3275 while (cnt >= 0) {
3276 if (m->m_len > cnt) {
3277 char *cp = mtod(m, caddr_t) + cnt;
3278 struct tcpcb *tp = sototcpcb(so);
3279
3280 INP_WLOCK_ASSERT(tp->t_inpcb);
3281
3282 tp->t_iobc = *cp;
3283 tp->t_oobflags |= TCPOOB_HAVEDATA;
3284 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3285 m->m_len--;
3286 if (m->m_flags & M_PKTHDR)
3287 m->m_pkthdr.len--;
3288 return;
3289 }
3290 cnt -= m->m_len;
3291 m = m->m_next;
3292 if (m == NULL)
3293 break;
3294 }
3295 panic("tcp_pulloutofband");
3296 }
3297
3298 /*
3299 * Collect new round-trip time estimate
3300 * and update averages and current timeout.
3301 */
3302 static void
3303 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3304 {
3305 int delta;
3306
3307 INP_WLOCK_ASSERT(tp->t_inpcb);
3308
3309 TCPSTAT_INC(tcps_rttupdated);
3310 tp->t_rttupdated++;
3311 if (tp->t_srtt != 0) {
3312 /*
3313 * srtt is stored as fixed point with 5 bits after the
3314 * binary point (i.e., scaled by 8). The following magic
3315 * is equivalent to the smoothing algorithm in rfc793 with
3316 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3317 * point). Adjust rtt to origin 0.
3318 */
3319 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3320 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3321
3322 if ((tp->t_srtt += delta) <= 0)
3323 tp->t_srtt = 1;
3324
3325 /*
3326 * We accumulate a smoothed rtt variance (actually, a
3327 * smoothed mean difference), then set the retransmit
3328 * timer to smoothed rtt + 4 times the smoothed variance.
3329 * rttvar is stored as fixed point with 4 bits after the
3330 * binary point (scaled by 16). The following is
3331 * equivalent to rfc793 smoothing with an alpha of .75
3332 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3333 * rfc793's wired-in beta.
3334 */
3335 if (delta < 0)
3336 delta = -delta;
3337 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3338 if ((tp->t_rttvar += delta) <= 0)
3339 tp->t_rttvar = 1;
3340 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3341 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3342 } else {
3343 /*
3344 * No rtt measurement yet - use the unsmoothed rtt.
3345 * Set the variance to half the rtt (so our first
3346 * retransmit happens at 3*rtt).
3347 */
3348 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3349 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3350 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3351 }
3352 tp->t_rtttime = 0;
3353 tp->t_rxtshift = 0;
3354
3355 /*
3356 * the retransmit should happen at rtt + 4 * rttvar.
3357 * Because of the way we do the smoothing, srtt and rttvar
3358 * will each average +1/2 tick of bias. When we compute
3359 * the retransmit timer, we want 1/2 tick of rounding and
3360 * 1 extra tick because of +-1/2 tick uncertainty in the
3361 * firing of the timer. The bias will give us exactly the
3362 * 1.5 tick we need. But, because the bias is
3363 * statistical, we have to test that we don't drop below
3364 * the minimum feasible timer (which is 2 ticks).
3365 */
3366 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3367 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3368
3369 /*
3370 * We received an ack for a packet that wasn't retransmitted;
3371 * it is probably safe to discard any error indications we've
3372 * received recently. This isn't quite right, but close enough
3373 * for now (a route might have failed after we sent a segment,
3374 * and the return path might not be symmetrical).
3375 */
3376 tp->t_softerror = 0;
3377 }
3378
3379 /*
3380 * Determine a reasonable value for maxseg size.
3381 * If the route is known, check route for mtu.
3382 * If none, use an mss that can be handled on the outgoing interface
3383 * without forcing IP to fragment. If no route is found, route has no mtu,
3384 * or the destination isn't local, use a default, hopefully conservative
3385 * size (usually 512 or the default IP max size, but no more than the mtu
3386 * of the interface), as we can't discover anything about intervening
3387 * gateways or networks. We also initialize the congestion/slow start
3388 * window to be a single segment if the destination isn't local.
3389 * While looking at the routing entry, we also initialize other path-dependent
3390 * parameters from pre-set or cached values in the routing entry.
3391 *
3392 * Also take into account the space needed for options that we
3393 * send regularly. Make maxseg shorter by that amount to assure
3394 * that we can send maxseg amount of data even when the options
3395 * are present. Store the upper limit of the length of options plus
3396 * data in maxopd.
3397 *
3398 * NOTE that this routine is only called when we process an incoming
3399 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3400 * settings are handled in tcp_mssopt().
3401 */
3402 void
3403 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3404 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3405 {
3406 int mss = 0;
3407 u_long maxmtu = 0;
3408 struct inpcb *inp = tp->t_inpcb;
3409 struct hc_metrics_lite metrics;
3410 int origoffer;
3411 #ifdef INET6
3412 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3413 size_t min_protoh = isipv6 ?
3414 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3415 sizeof (struct tcpiphdr);
3416 #else
3417 const size_t min_protoh = sizeof(struct tcpiphdr);
3418 #endif
3419
3420 INP_WLOCK_ASSERT(tp->t_inpcb);
3421
3422 if (mtuoffer != -1) {
3423 KASSERT(offer == -1, ("%s: conflict", __func__));
3424 offer = mtuoffer - min_protoh;
3425 }
3426 origoffer = offer;
3427
3428 /* Initialize. */
3429 #ifdef INET6
3430 if (isipv6) {
3431 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3432 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt;
3433 }
3434 #endif
3435 #if defined(INET) && defined(INET6)
3436 else
3437 #endif
3438 #ifdef INET
3439 {
3440 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3441 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt;
3442 }
3443 #endif
3444
3445 /*
3446 * No route to sender, stay with default mss and return.
3447 */
3448 if (maxmtu == 0) {
3449 /*
3450 * In case we return early we need to initialize metrics
3451 * to a defined state as tcp_hc_get() would do for us
3452 * if there was no cache hit.
3453 */
3454 if (metricptr != NULL)
3455 bzero(metricptr, sizeof(struct hc_metrics_lite));
3456 return;
3457 }
3458
3459 /* What have we got? */
3460 switch (offer) {
3461 case 0:
3462 /*
3463 * Offer == 0 means that there was no MSS on the SYN
3464 * segment, in this case we use tcp_mssdflt as
3465 * already assigned to t_maxopd above.
3466 */
3467 offer = tp->t_maxopd;
3468 break;
3469
3470 case -1:
3471 /*
3472 * Offer == -1 means that we didn't receive SYN yet.
3473 */
3474 /* FALLTHROUGH */
3475
3476 default:
3477 /*
3478 * Prevent DoS attack with too small MSS. Round up
3479 * to at least minmss.
3480 */
3481 offer = max(offer, V_tcp_minmss);
3482 }
3483
3484 /*
3485 * rmx information is now retrieved from tcp_hostcache.
3486 */
3487 tcp_hc_get(&inp->inp_inc, &metrics);
3488 if (metricptr != NULL)
3489 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3490
3491 /*
3492 * If there's a discovered mtu int tcp hostcache, use it
3493 * else, use the link mtu.
3494 */
3495 if (metrics.rmx_mtu)
3496 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3497 else {
3498 #ifdef INET6
3499 if (isipv6) {
3500 mss = maxmtu - min_protoh;
3501 if (!V_path_mtu_discovery &&
3502 !in6_localaddr(&inp->in6p_faddr))
3503 mss = min(mss, V_tcp_v6mssdflt);
3504 }
3505 #endif
3506 #if defined(INET) && defined(INET6)
3507 else
3508 #endif
3509 #ifdef INET
3510 {
3511 mss = maxmtu - min_protoh;
3512 if (!V_path_mtu_discovery &&
3513 !in_localaddr(inp->inp_faddr))
3514 mss = min(mss, V_tcp_mssdflt);
3515 }
3516 #endif
3517 /*
3518 * XXX - The above conditional (mss = maxmtu - min_protoh)
3519 * probably violates the TCP spec.
3520 * The problem is that, since we don't know the
3521 * other end's MSS, we are supposed to use a conservative
3522 * default. But, if we do that, then MTU discovery will
3523 * never actually take place, because the conservative
3524 * default is much less than the MTUs typically seen
3525 * on the Internet today. For the moment, we'll sweep
3526 * this under the carpet.
3527 *
3528 * The conservative default might not actually be a problem
3529 * if the only case this occurs is when sending an initial
3530 * SYN with options and data to a host we've never talked
3531 * to before. Then, they will reply with an MSS value which
3532 * will get recorded and the new parameters should get
3533 * recomputed. For Further Study.
3534 */
3535 }
3536 mss = min(mss, offer);
3537
3538 /*
3539 * Sanity check: make sure that maxopd will be large
3540 * enough to allow some data on segments even if the
3541 * all the option space is used (40bytes). Otherwise
3542 * funny things may happen in tcp_output.
3543 */
3544 mss = max(mss, 64);
3545
3546 /*
3547 * maxopd stores the maximum length of data AND options
3548 * in a segment; maxseg is the amount of data in a normal
3549 * segment. We need to store this value (maxopd) apart
3550 * from maxseg, because now every segment carries options
3551 * and thus we normally have somewhat less data in segments.
3552 */
3553 tp->t_maxopd = mss;
3554
3555 /*
3556 * origoffer==-1 indicates that no segments were received yet.
3557 * In this case we just guess.
3558 */
3559 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
3560 (origoffer == -1 ||
3561 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3562 mss -= TCPOLEN_TSTAMP_APPA;
3563
3564 tp->t_maxseg = mss;
3565 }
3566
3567 void
3568 tcp_mss(struct tcpcb *tp, int offer)
3569 {
3570 int mss;
3571 u_long bufsize;
3572 struct inpcb *inp;
3573 struct socket *so;
3574 struct hc_metrics_lite metrics;
3575 struct tcp_ifcap cap;
3576
3577 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3578
3579 bzero(&cap, sizeof(cap));
3580 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3581
3582 mss = tp->t_maxseg;
3583 inp = tp->t_inpcb;
3584
3585 /*
3586 * If there's a pipesize, change the socket buffer to that size,
3587 * don't change if sb_hiwat is different than default (then it
3588 * has been changed on purpose with setsockopt).
3589 * Make the socket buffers an integral number of mss units;
3590 * if the mss is larger than the socket buffer, decrease the mss.
3591 */
3592 so = inp->inp_socket;
3593 SOCKBUF_LOCK(&so->so_snd);
3594 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3595 bufsize = metrics.rmx_sendpipe;
3596 else
3597 bufsize = so->so_snd.sb_hiwat;
3598 if (bufsize < mss)
3599 mss = bufsize;
3600 else {
3601 bufsize = roundup(bufsize, mss);
3602 if (bufsize > sb_max)
3603 bufsize = sb_max;
3604 if (bufsize > so->so_snd.sb_hiwat)
3605 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3606 }
3607 SOCKBUF_UNLOCK(&so->so_snd);
3608 tp->t_maxseg = mss;
3609
3610 SOCKBUF_LOCK(&so->so_rcv);
3611 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3612 bufsize = metrics.rmx_recvpipe;
3613 else
3614 bufsize = so->so_rcv.sb_hiwat;
3615 if (bufsize > mss) {
3616 bufsize = roundup(bufsize, mss);
3617 if (bufsize > sb_max)
3618 bufsize = sb_max;
3619 if (bufsize > so->so_rcv.sb_hiwat)
3620 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3621 }
3622 SOCKBUF_UNLOCK(&so->so_rcv);
3623
3624 /* Check the interface for TSO capabilities. */
3625 if (cap.ifcap & CSUM_TSO) {
3626 tp->t_flags |= TF_TSO;
3627 tp->t_tsomax = cap.tsomax;
3628 }
3629 }
3630
3631 /*
3632 * Determine the MSS option to send on an outgoing SYN.
3633 */
3634 int
3635 tcp_mssopt(struct in_conninfo *inc)
3636 {
3637 int mss = 0;
3638 u_long maxmtu = 0;
3639 u_long thcmtu = 0;
3640 size_t min_protoh;
3641
3642 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3643
3644 #ifdef INET6
3645 if (inc->inc_flags & INC_ISIPV6) {
3646 mss = V_tcp_v6mssdflt;
3647 maxmtu = tcp_maxmtu6(inc, NULL);
3648 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3649 }
3650 #endif
3651 #if defined(INET) && defined(INET6)
3652 else
3653 #endif
3654 #ifdef INET
3655 {
3656 mss = V_tcp_mssdflt;
3657 maxmtu = tcp_maxmtu(inc, NULL);
3658 min_protoh = sizeof(struct tcpiphdr);
3659 }
3660 #endif
3661 #if defined(INET6) || defined(INET)
3662 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3663 #endif
3664
3665 if (maxmtu && thcmtu)
3666 mss = min(maxmtu, thcmtu) - min_protoh;
3667 else if (maxmtu || thcmtu)
3668 mss = max(maxmtu, thcmtu) - min_protoh;
3669
3670 return (mss);
3671 }
3672
3673
3674 /*
3675 * On a partial ack arrives, force the retransmission of the
3676 * next unacknowledged segment. Do not clear tp->t_dupacks.
3677 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3678 * be started again.
3679 */
3680 static void
3681 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3682 {
3683 tcp_seq onxt = tp->snd_nxt;
3684 u_long ocwnd = tp->snd_cwnd;
3685
3686 INP_WLOCK_ASSERT(tp->t_inpcb);
3687
3688 tcp_timer_activate(tp, TT_REXMT, 0);
3689 tp->t_rtttime = 0;
3690 tp->snd_nxt = th->th_ack;
3691 /*
3692 * Set snd_cwnd to one segment beyond acknowledged offset.
3693 * (tp->snd_una has not yet been updated when this function is called.)
3694 */
3695 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th);
3696 tp->t_flags |= TF_ACKNOW;
3697 (void) tcp_output(tp);
3698 tp->snd_cwnd = ocwnd;
3699 if (SEQ_GT(onxt, tp->snd_nxt))
3700 tp->snd_nxt = onxt;
3701 /*
3702 * Partial window deflation. Relies on fact that tp->snd_una
3703 * not updated yet.
3704 */
3705 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3706 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3707 else
3708 tp->snd_cwnd = 0;
3709 tp->snd_cwnd += tp->t_maxseg;
3710 }
Cache object: 2da14fb1e8256915c43807d7940171d2
|