1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
8 * Copyright (c) 2010-2011 Juniper Networks, Inc.
9 * All rights reserved.
10 *
11 * Portions of this software were developed at the Centre for Advanced Internet
12 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
13 * James Healy and David Hayes, made possible in part by a grant from the Cisco
14 * University Research Program Fund at Community Foundation Silicon Valley.
15 *
16 * Portions of this software were developed at the Centre for Advanced
17 * Internet Architectures, Swinburne University of Technology, Melbourne,
18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
19 *
20 * Portions of this software were developed by Robert N. M. Watson under
21 * contract to Juniper Networks, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 *
47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/10.0/sys/netinet/tcp_input.c 271669 2014-09-16 09:50:19Z delphij $");
52
53 #include "opt_ipfw.h" /* for ipfw_fwd */
54 #include "opt_inet.h"
55 #include "opt_inet6.h"
56 #include "opt_ipsec.h"
57 #include "opt_kdtrace.h"
58 #include "opt_tcpdebug.h"
59
60 #include <sys/param.h>
61 #include <sys/kernel.h>
62 #include <sys/hhook.h>
63 #include <sys/malloc.h>
64 #include <sys/mbuf.h>
65 #include <sys/proc.h> /* for proc0 declaration */
66 #include <sys/protosw.h>
67 #include <sys/sdt.h>
68 #include <sys/signalvar.h>
69 #include <sys/socket.h>
70 #include <sys/socketvar.h>
71 #include <sys/sysctl.h>
72 #include <sys/syslog.h>
73 #include <sys/systm.h>
74
75 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
76
77 #include <vm/uma.h>
78
79 #include <net/if.h>
80 #include <net/route.h>
81 #include <net/vnet.h>
82
83 #define TCPSTATES /* for logging */
84
85 #include <netinet/cc.h>
86 #include <netinet/in.h>
87 #include <netinet/in_kdtrace.h>
88 #include <netinet/in_pcb.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip.h>
92 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
93 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
94 #include <netinet/ip_var.h>
95 #include <netinet/ip_options.h>
96 #include <netinet/ip6.h>
97 #include <netinet/icmp6.h>
98 #include <netinet6/in6_pcb.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet6/nd6.h>
101 #include <netinet/tcp_fsm.h>
102 #include <netinet/tcp_seq.h>
103 #include <netinet/tcp_timer.h>
104 #include <netinet/tcp_var.h>
105 #include <netinet6/tcp6_var.h>
106 #include <netinet/tcpip.h>
107 #include <netinet/tcp_syncache.h>
108 #ifdef TCPDEBUG
109 #include <netinet/tcp_debug.h>
110 #endif /* TCPDEBUG */
111 #ifdef TCP_OFFLOAD
112 #include <netinet/tcp_offload.h>
113 #endif
114
115 #ifdef IPSEC
116 #include <netipsec/ipsec.h>
117 #include <netipsec/ipsec6.h>
118 #endif /*IPSEC*/
119
120 #include <machine/in_cksum.h>
121
122 #include <security/mac/mac_framework.h>
123
124 const int tcprexmtthresh = 3;
125
126 int tcp_log_in_vain = 0;
127 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
128 &tcp_log_in_vain, 0,
129 "Log all incoming TCP segments to closed ports");
130
131 VNET_DEFINE(int, blackhole) = 0;
132 #define V_blackhole VNET(blackhole)
133 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
134 &VNET_NAME(blackhole), 0,
135 "Do not send RST on segments to closed ports");
136
137 VNET_DEFINE(int, tcp_delack_enabled) = 1;
138 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
139 &VNET_NAME(tcp_delack_enabled), 0,
140 "Delay ACK to try and piggyback it onto a data packet");
141
142 VNET_DEFINE(int, drop_synfin) = 0;
143 #define V_drop_synfin VNET(drop_synfin)
144 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
145 &VNET_NAME(drop_synfin), 0,
146 "Drop TCP packets with SYN+FIN set");
147
148 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
149 #define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
150 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
151 &VNET_NAME(tcp_do_rfc3042), 0,
152 "Enable RFC 3042 (Limited Transmit)");
153
154 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
155 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
156 &VNET_NAME(tcp_do_rfc3390), 0,
157 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
158
159 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, experimental, CTLFLAG_RW, 0,
160 "Experimental TCP extensions");
161
162 VNET_DEFINE(int, tcp_do_initcwnd10) = 1;
163 SYSCTL_VNET_INT(_net_inet_tcp_experimental, OID_AUTO, initcwnd10, CTLFLAG_RW,
164 &VNET_NAME(tcp_do_initcwnd10), 0,
165 "Enable draft-ietf-tcpm-initcwnd-05 (Increasing initial CWND to 10)");
166
167 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
168 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW,
169 &VNET_NAME(tcp_do_rfc3465), 0,
170 "Enable RFC 3465 (Appropriate Byte Counting)");
171
172 VNET_DEFINE(int, tcp_abc_l_var) = 2;
173 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW,
174 &VNET_NAME(tcp_abc_l_var), 2,
175 "Cap the max cwnd increment during slow-start to this number of segments");
176
177 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
178
179 VNET_DEFINE(int, tcp_do_ecn) = 0;
180 SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW,
181 &VNET_NAME(tcp_do_ecn), 0,
182 "TCP ECN support");
183
184 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
185 SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW,
186 &VNET_NAME(tcp_ecn_maxretries), 0,
187 "Max retries before giving up on ECN");
188
189 VNET_DEFINE(int, tcp_insecure_rst) = 0;
190 #define V_tcp_insecure_rst VNET(tcp_insecure_rst)
191 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
192 &VNET_NAME(tcp_insecure_rst), 0,
193 "Follow the old (insecure) criteria for accepting RST packets");
194
195 VNET_DEFINE(int, tcp_recvspace) = 1024*64;
196 #define V_tcp_recvspace VNET(tcp_recvspace)
197 SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
198 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
199
200 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
201 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
202 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
203 &VNET_NAME(tcp_do_autorcvbuf), 0,
204 "Enable automatic receive buffer sizing");
205
206 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
207 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
208 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
209 &VNET_NAME(tcp_autorcvbuf_inc), 0,
210 "Incrementor step size of automatic receive buffer");
211
212 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
213 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
214 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
215 &VNET_NAME(tcp_autorcvbuf_max), 0,
216 "Max size of automatic receive buffer");
217
218 VNET_DEFINE(struct inpcbhead, tcb);
219 #define tcb6 tcb /* for KAME src sync over BSD*'s */
220 VNET_DEFINE(struct inpcbinfo, tcbinfo);
221
222 static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
223 static void tcp_do_segment(struct mbuf *, struct tcphdr *,
224 struct socket *, struct tcpcb *, int, int, uint8_t,
225 int);
226 static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
227 struct tcpcb *, int, int);
228 static void tcp_pulloutofband(struct socket *,
229 struct tcphdr *, struct mbuf *, int);
230 static void tcp_xmit_timer(struct tcpcb *, int);
231 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
232 static void inline tcp_fields_to_host(struct tcphdr *);
233 #ifdef TCP_SIGNATURE
234 static void inline tcp_fields_to_net(struct tcphdr *);
235 static int inline tcp_signature_verify_input(struct mbuf *, int, int,
236 int, struct tcpopt *, struct tcphdr *, u_int);
237 #endif
238 static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th,
239 uint16_t type);
240 static void inline cc_conn_init(struct tcpcb *tp);
241 static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th);
242 static void inline hhook_run_tcp_est_in(struct tcpcb *tp,
243 struct tcphdr *th, struct tcpopt *to);
244
245 /*
246 * TCP statistics are stored in an "array" of counter(9)s.
247 */
248 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
249 VNET_PCPUSTAT_SYSINIT(tcpstat);
250 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
251 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
252
253 #ifdef VIMAGE
254 VNET_PCPUSTAT_SYSUNINIT(tcpstat);
255 #endif /* VIMAGE */
256 /*
257 * Kernel module interface for updating tcpstat. The argument is an index
258 * into tcpstat treated as an array.
259 */
260 void
261 kmod_tcpstat_inc(int statnum)
262 {
263
264 counter_u64_add(VNET(tcpstat)[statnum], 1);
265 }
266
267 /*
268 * Wrapper for the TCP established input helper hook.
269 */
270 static void inline
271 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
272 {
273 struct tcp_hhook_data hhook_data;
274
275 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
276 hhook_data.tp = tp;
277 hhook_data.th = th;
278 hhook_data.to = to;
279
280 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
281 tp->osd);
282 }
283 }
284
285 /*
286 * CC wrapper hook functions
287 */
288 static void inline
289 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
290 {
291 INP_WLOCK_ASSERT(tp->t_inpcb);
292
293 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
294 if (tp->snd_cwnd <= tp->snd_wnd)
295 tp->ccv->flags |= CCF_CWND_LIMITED;
296 else
297 tp->ccv->flags &= ~CCF_CWND_LIMITED;
298
299 if (type == CC_ACK) {
300 if (tp->snd_cwnd > tp->snd_ssthresh) {
301 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
302 V_tcp_abc_l_var * tp->t_maxseg);
303 if (tp->t_bytes_acked >= tp->snd_cwnd) {
304 tp->t_bytes_acked -= tp->snd_cwnd;
305 tp->ccv->flags |= CCF_ABC_SENTAWND;
306 }
307 } else {
308 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
309 tp->t_bytes_acked = 0;
310 }
311 }
312
313 if (CC_ALGO(tp)->ack_received != NULL) {
314 /* XXXLAS: Find a way to live without this */
315 tp->ccv->curack = th->th_ack;
316 CC_ALGO(tp)->ack_received(tp->ccv, type);
317 }
318 }
319
320 static void inline
321 cc_conn_init(struct tcpcb *tp)
322 {
323 struct hc_metrics_lite metrics;
324 struct inpcb *inp = tp->t_inpcb;
325 int rtt;
326
327 INP_WLOCK_ASSERT(tp->t_inpcb);
328
329 tcp_hc_get(&inp->inp_inc, &metrics);
330
331 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
332 tp->t_srtt = rtt;
333 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
334 TCPSTAT_INC(tcps_usedrtt);
335 if (metrics.rmx_rttvar) {
336 tp->t_rttvar = metrics.rmx_rttvar;
337 TCPSTAT_INC(tcps_usedrttvar);
338 } else {
339 /* default variation is +- 1 rtt */
340 tp->t_rttvar =
341 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
342 }
343 TCPT_RANGESET(tp->t_rxtcur,
344 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
345 tp->t_rttmin, TCPTV_REXMTMAX);
346 }
347 if (metrics.rmx_ssthresh) {
348 /*
349 * There's some sort of gateway or interface
350 * buffer limit on the path. Use this to set
351 * the slow start threshhold, but set the
352 * threshold to no less than 2*mss.
353 */
354 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh);
355 TCPSTAT_INC(tcps_usedssthresh);
356 }
357
358 /*
359 * Set the initial slow-start flight size.
360 *
361 * RFC5681 Section 3.1 specifies the default conservative values.
362 * RFC3390 specifies slightly more aggressive values.
363 * Draft-ietf-tcpm-initcwnd-05 increases it to ten segments.
364 *
365 * If a SYN or SYN/ACK was lost and retransmitted, we have to
366 * reduce the initial CWND to one segment as congestion is likely
367 * requiring us to be cautious.
368 */
369 if (tp->snd_cwnd == 1)
370 tp->snd_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
371 else if (V_tcp_do_initcwnd10)
372 tp->snd_cwnd = min(10 * tp->t_maxseg,
373 max(2 * tp->t_maxseg, 14600));
374 else if (V_tcp_do_rfc3390)
375 tp->snd_cwnd = min(4 * tp->t_maxseg,
376 max(2 * tp->t_maxseg, 4380));
377 else {
378 /* Per RFC5681 Section 3.1 */
379 if (tp->t_maxseg > 2190)
380 tp->snd_cwnd = 2 * tp->t_maxseg;
381 else if (tp->t_maxseg > 1095)
382 tp->snd_cwnd = 3 * tp->t_maxseg;
383 else
384 tp->snd_cwnd = 4 * tp->t_maxseg;
385 }
386
387 if (CC_ALGO(tp)->conn_init != NULL)
388 CC_ALGO(tp)->conn_init(tp->ccv);
389 }
390
391 void inline
392 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
393 {
394 INP_WLOCK_ASSERT(tp->t_inpcb);
395
396 switch(type) {
397 case CC_NDUPACK:
398 if (!IN_FASTRECOVERY(tp->t_flags)) {
399 tp->snd_recover = tp->snd_max;
400 if (tp->t_flags & TF_ECN_PERMIT)
401 tp->t_flags |= TF_ECN_SND_CWR;
402 }
403 break;
404 case CC_ECN:
405 if (!IN_CONGRECOVERY(tp->t_flags)) {
406 TCPSTAT_INC(tcps_ecn_rcwnd);
407 tp->snd_recover = tp->snd_max;
408 if (tp->t_flags & TF_ECN_PERMIT)
409 tp->t_flags |= TF_ECN_SND_CWR;
410 }
411 break;
412 case CC_RTO:
413 tp->t_dupacks = 0;
414 tp->t_bytes_acked = 0;
415 EXIT_RECOVERY(tp->t_flags);
416 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
417 tp->t_maxseg) * tp->t_maxseg;
418 tp->snd_cwnd = tp->t_maxseg;
419 break;
420 case CC_RTO_ERR:
421 TCPSTAT_INC(tcps_sndrexmitbad);
422 /* RTO was unnecessary, so reset everything. */
423 tp->snd_cwnd = tp->snd_cwnd_prev;
424 tp->snd_ssthresh = tp->snd_ssthresh_prev;
425 tp->snd_recover = tp->snd_recover_prev;
426 if (tp->t_flags & TF_WASFRECOVERY)
427 ENTER_FASTRECOVERY(tp->t_flags);
428 if (tp->t_flags & TF_WASCRECOVERY)
429 ENTER_CONGRECOVERY(tp->t_flags);
430 tp->snd_nxt = tp->snd_max;
431 tp->t_flags &= ~TF_PREVVALID;
432 tp->t_badrxtwin = 0;
433 break;
434 }
435
436 if (CC_ALGO(tp)->cong_signal != NULL) {
437 if (th != NULL)
438 tp->ccv->curack = th->th_ack;
439 CC_ALGO(tp)->cong_signal(tp->ccv, type);
440 }
441 }
442
443 static void inline
444 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
445 {
446 INP_WLOCK_ASSERT(tp->t_inpcb);
447
448 /* XXXLAS: KASSERT that we're in recovery? */
449
450 if (CC_ALGO(tp)->post_recovery != NULL) {
451 tp->ccv->curack = th->th_ack;
452 CC_ALGO(tp)->post_recovery(tp->ccv);
453 }
454 /* XXXLAS: EXIT_RECOVERY ? */
455 tp->t_bytes_acked = 0;
456 }
457
458 static inline void
459 tcp_fields_to_host(struct tcphdr *th)
460 {
461
462 th->th_seq = ntohl(th->th_seq);
463 th->th_ack = ntohl(th->th_ack);
464 th->th_win = ntohs(th->th_win);
465 th->th_urp = ntohs(th->th_urp);
466 }
467
468 #ifdef TCP_SIGNATURE
469 static inline void
470 tcp_fields_to_net(struct tcphdr *th)
471 {
472
473 th->th_seq = htonl(th->th_seq);
474 th->th_ack = htonl(th->th_ack);
475 th->th_win = htons(th->th_win);
476 th->th_urp = htons(th->th_urp);
477 }
478
479 static inline int
480 tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen,
481 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
482 {
483 int ret;
484
485 tcp_fields_to_net(th);
486 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag);
487 tcp_fields_to_host(th);
488 return (ret);
489 }
490 #endif
491
492 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
493 #ifdef INET6
494 #define ND6_HINT(tp) \
495 do { \
496 if ((tp) && (tp)->t_inpcb && \
497 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
498 nd6_nud_hint(NULL, NULL, 0); \
499 } while (0)
500 #else
501 #define ND6_HINT(tp)
502 #endif
503
504 /*
505 * Indicate whether this ack should be delayed. We can delay the ack if
506 * - there is no delayed ack timer in progress and
507 * - our last ack wasn't a 0-sized window. We never want to delay
508 * the ack that opens up a 0-sized window and
509 * - delayed acks are enabled or
510 * - this is a half-synchronized T/TCP connection.
511 * - the segment size is not larger than the MSS and LRO wasn't used
512 * for this segment.
513 */
514 #define DELAY_ACK(tp, tlen) \
515 ((!tcp_timer_active(tp, TT_DELACK) && \
516 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
517 (tlen <= tp->t_maxopd) && \
518 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
519
520 /*
521 * TCP input handling is split into multiple parts:
522 * tcp6_input is a thin wrapper around tcp_input for the extended
523 * ip6_protox[] call format in ip6_input
524 * tcp_input handles primary segment validation, inpcb lookup and
525 * SYN processing on listen sockets
526 * tcp_do_segment processes the ACK and text of the segment for
527 * establishing, established and closing connections
528 */
529 #ifdef INET6
530 int
531 tcp6_input(struct mbuf **mp, int *offp, int proto)
532 {
533 struct mbuf *m = *mp;
534 struct in6_ifaddr *ia6;
535
536 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
537
538 /*
539 * draft-itojun-ipv6-tcp-to-anycast
540 * better place to put this in?
541 */
542 ia6 = ip6_getdstifaddr(m);
543 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
544 struct ip6_hdr *ip6;
545
546 ifa_free(&ia6->ia_ifa);
547 ip6 = mtod(m, struct ip6_hdr *);
548 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
549 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
550 return IPPROTO_DONE;
551 }
552 if (ia6)
553 ifa_free(&ia6->ia_ifa);
554
555 tcp_input(m, *offp);
556 return IPPROTO_DONE;
557 }
558 #endif /* INET6 */
559
560 void
561 tcp_input(struct mbuf *m, int off0)
562 {
563 struct tcphdr *th = NULL;
564 struct ip *ip = NULL;
565 struct inpcb *inp = NULL;
566 struct tcpcb *tp = NULL;
567 struct socket *so = NULL;
568 u_char *optp = NULL;
569 int optlen = 0;
570 #ifdef INET
571 int len;
572 #endif
573 int tlen = 0, off;
574 int drop_hdrlen;
575 int thflags;
576 int rstreason = 0; /* For badport_bandlim accounting purposes */
577 #ifdef TCP_SIGNATURE
578 uint8_t sig_checked = 0;
579 #endif
580 uint8_t iptos = 0;
581 struct m_tag *fwd_tag = NULL;
582 #ifdef INET6
583 struct ip6_hdr *ip6 = NULL;
584 int isipv6;
585 #else
586 const void *ip6 = NULL;
587 #endif /* INET6 */
588 struct tcpopt to; /* options in this segment */
589 char *s = NULL; /* address and port logging */
590 int ti_locked;
591 #define TI_UNLOCKED 1
592 #define TI_WLOCKED 2
593
594 #ifdef TCPDEBUG
595 /*
596 * The size of tcp_saveipgen must be the size of the max ip header,
597 * now IPv6.
598 */
599 u_char tcp_saveipgen[IP6_HDR_LEN];
600 struct tcphdr tcp_savetcp;
601 short ostate = 0;
602 #endif
603
604 #ifdef INET6
605 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
606 #endif
607
608 to.to_flags = 0;
609 TCPSTAT_INC(tcps_rcvtotal);
610
611 #ifdef INET6
612 if (isipv6) {
613 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
614
615 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
616 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
617 if (m == NULL) {
618 TCPSTAT_INC(tcps_rcvshort);
619 return;
620 }
621 }
622
623 ip6 = mtod(m, struct ip6_hdr *);
624 th = (struct tcphdr *)((caddr_t)ip6 + off0);
625 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
626 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
627 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
628 th->th_sum = m->m_pkthdr.csum_data;
629 else
630 th->th_sum = in6_cksum_pseudo(ip6, tlen,
631 IPPROTO_TCP, m->m_pkthdr.csum_data);
632 th->th_sum ^= 0xffff;
633 } else
634 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
635 if (th->th_sum) {
636 TCPSTAT_INC(tcps_rcvbadsum);
637 goto drop;
638 }
639
640 /*
641 * Be proactive about unspecified IPv6 address in source.
642 * As we use all-zero to indicate unbounded/unconnected pcb,
643 * unspecified IPv6 address can be used to confuse us.
644 *
645 * Note that packets with unspecified IPv6 destination is
646 * already dropped in ip6_input.
647 */
648 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
649 /* XXX stat */
650 goto drop;
651 }
652 }
653 #endif
654 #if defined(INET) && defined(INET6)
655 else
656 #endif
657 #ifdef INET
658 {
659 /*
660 * Get IP and TCP header together in first mbuf.
661 * Note: IP leaves IP header in first mbuf.
662 */
663 if (off0 > sizeof (struct ip)) {
664 ip_stripoptions(m);
665 off0 = sizeof(struct ip);
666 }
667 if (m->m_len < sizeof (struct tcpiphdr)) {
668 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
669 == NULL) {
670 TCPSTAT_INC(tcps_rcvshort);
671 return;
672 }
673 }
674 ip = mtod(m, struct ip *);
675 th = (struct tcphdr *)((caddr_t)ip + off0);
676 tlen = ntohs(ip->ip_len) - off0;
677
678 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
679 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
680 th->th_sum = m->m_pkthdr.csum_data;
681 else
682 th->th_sum = in_pseudo(ip->ip_src.s_addr,
683 ip->ip_dst.s_addr,
684 htonl(m->m_pkthdr.csum_data + tlen +
685 IPPROTO_TCP));
686 th->th_sum ^= 0xffff;
687 } else {
688 struct ipovly *ipov = (struct ipovly *)ip;
689
690 /*
691 * Checksum extended TCP header and data.
692 */
693 len = off0 + tlen;
694 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
695 ipov->ih_len = htons(tlen);
696 th->th_sum = in_cksum(m, len);
697 /* Reset length for SDT probes. */
698 ip->ip_len = htons(tlen + off0);
699 }
700
701 if (th->th_sum) {
702 TCPSTAT_INC(tcps_rcvbadsum);
703 goto drop;
704 }
705 /* Re-initialization for later version check */
706 ip->ip_v = IPVERSION;
707 }
708 #endif /* INET */
709
710 #ifdef INET6
711 if (isipv6)
712 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
713 #endif
714 #if defined(INET) && defined(INET6)
715 else
716 #endif
717 #ifdef INET
718 iptos = ip->ip_tos;
719 #endif
720
721 /*
722 * Check that TCP offset makes sense,
723 * pull out TCP options and adjust length. XXX
724 */
725 off = th->th_off << 2;
726 if (off < sizeof (struct tcphdr) || off > tlen) {
727 TCPSTAT_INC(tcps_rcvbadoff);
728 goto drop;
729 }
730 tlen -= off; /* tlen is used instead of ti->ti_len */
731 if (off > sizeof (struct tcphdr)) {
732 #ifdef INET6
733 if (isipv6) {
734 IP6_EXTHDR_CHECK(m, off0, off, );
735 ip6 = mtod(m, struct ip6_hdr *);
736 th = (struct tcphdr *)((caddr_t)ip6 + off0);
737 }
738 #endif
739 #if defined(INET) && defined(INET6)
740 else
741 #endif
742 #ifdef INET
743 {
744 if (m->m_len < sizeof(struct ip) + off) {
745 if ((m = m_pullup(m, sizeof (struct ip) + off))
746 == NULL) {
747 TCPSTAT_INC(tcps_rcvshort);
748 return;
749 }
750 ip = mtod(m, struct ip *);
751 th = (struct tcphdr *)((caddr_t)ip + off0);
752 }
753 }
754 #endif
755 optlen = off - sizeof (struct tcphdr);
756 optp = (u_char *)(th + 1);
757 }
758 thflags = th->th_flags;
759
760 /*
761 * Convert TCP protocol specific fields to host format.
762 */
763 tcp_fields_to_host(th);
764
765 /*
766 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
767 */
768 drop_hdrlen = off0 + off;
769
770 /*
771 * Locate pcb for segment; if we're likely to add or remove a
772 * connection then first acquire pcbinfo lock. There are two cases
773 * where we might discover later we need a write lock despite the
774 * flags: ACKs moving a connection out of the syncache, and ACKs for
775 * a connection in TIMEWAIT.
776 */
777 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0) {
778 INP_INFO_WLOCK(&V_tcbinfo);
779 ti_locked = TI_WLOCKED;
780 } else
781 ti_locked = TI_UNLOCKED;
782
783 /*
784 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
785 */
786 if (
787 #ifdef INET6
788 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
789 #ifdef INET
790 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
791 #endif
792 #endif
793 #if defined(INET) && !defined(INET6)
794 (m->m_flags & M_IP_NEXTHOP)
795 #endif
796 )
797 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
798
799 findpcb:
800 #ifdef INVARIANTS
801 if (ti_locked == TI_WLOCKED) {
802 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
803 } else {
804 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
805 }
806 #endif
807 #ifdef INET6
808 if (isipv6 && fwd_tag != NULL) {
809 struct sockaddr_in6 *next_hop6;
810
811 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
812 /*
813 * Transparently forwarded. Pretend to be the destination.
814 * Already got one like this?
815 */
816 inp = in6_pcblookup_mbuf(&V_tcbinfo,
817 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
818 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
819 if (!inp) {
820 /*
821 * It's new. Try to find the ambushing socket.
822 * Because we've rewritten the destination address,
823 * any hardware-generated hash is ignored.
824 */
825 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
826 th->th_sport, &next_hop6->sin6_addr,
827 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
828 th->th_dport, INPLOOKUP_WILDCARD |
829 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
830 }
831 } else if (isipv6) {
832 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
833 th->th_sport, &ip6->ip6_dst, th->th_dport,
834 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
835 m->m_pkthdr.rcvif, m);
836 }
837 #endif /* INET6 */
838 #if defined(INET6) && defined(INET)
839 else
840 #endif
841 #ifdef INET
842 if (fwd_tag != NULL) {
843 struct sockaddr_in *next_hop;
844
845 next_hop = (struct sockaddr_in *)(fwd_tag+1);
846 /*
847 * Transparently forwarded. Pretend to be the destination.
848 * already got one like this?
849 */
850 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
851 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
852 m->m_pkthdr.rcvif, m);
853 if (!inp) {
854 /*
855 * It's new. Try to find the ambushing socket.
856 * Because we've rewritten the destination address,
857 * any hardware-generated hash is ignored.
858 */
859 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
860 th->th_sport, next_hop->sin_addr,
861 next_hop->sin_port ? ntohs(next_hop->sin_port) :
862 th->th_dport, INPLOOKUP_WILDCARD |
863 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
864 }
865 } else
866 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
867 th->th_sport, ip->ip_dst, th->th_dport,
868 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
869 m->m_pkthdr.rcvif, m);
870 #endif /* INET */
871
872 /*
873 * If the INPCB does not exist then all data in the incoming
874 * segment is discarded and an appropriate RST is sent back.
875 * XXX MRT Send RST using which routing table?
876 */
877 if (inp == NULL) {
878 /*
879 * Log communication attempts to ports that are not
880 * in use.
881 */
882 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
883 tcp_log_in_vain == 2) {
884 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
885 log(LOG_INFO, "%s; %s: Connection attempt "
886 "to closed port\n", s, __func__);
887 }
888 /*
889 * When blackholing do not respond with a RST but
890 * completely ignore the segment and drop it.
891 */
892 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
893 V_blackhole == 2)
894 goto dropunlock;
895
896 rstreason = BANDLIM_RST_CLOSEDPORT;
897 goto dropwithreset;
898 }
899 INP_WLOCK_ASSERT(inp);
900 if (!(inp->inp_flags & INP_HW_FLOWID)
901 && (m->m_flags & M_FLOWID)
902 && ((inp->inp_socket == NULL)
903 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) {
904 inp->inp_flags |= INP_HW_FLOWID;
905 inp->inp_flags &= ~INP_SW_FLOWID;
906 inp->inp_flowid = m->m_pkthdr.flowid;
907 }
908 #ifdef IPSEC
909 #ifdef INET6
910 if (isipv6 && ipsec6_in_reject(m, inp)) {
911 IPSEC6STAT_INC(ips_in_polvio);
912 goto dropunlock;
913 } else
914 #endif /* INET6 */
915 if (ipsec4_in_reject(m, inp) != 0) {
916 IPSECSTAT_INC(ips_in_polvio);
917 goto dropunlock;
918 }
919 #endif /* IPSEC */
920
921 /*
922 * Check the minimum TTL for socket.
923 */
924 if (inp->inp_ip_minttl != 0) {
925 #ifdef INET6
926 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
927 goto dropunlock;
928 else
929 #endif
930 if (inp->inp_ip_minttl > ip->ip_ttl)
931 goto dropunlock;
932 }
933
934 /*
935 * A previous connection in TIMEWAIT state is supposed to catch stray
936 * or duplicate segments arriving late. If this segment was a
937 * legitimate new connection attempt, the old INPCB gets removed and
938 * we can try again to find a listening socket.
939 *
940 * At this point, due to earlier optimism, we may hold only an inpcb
941 * lock, and not the inpcbinfo write lock. If so, we need to try to
942 * acquire it, or if that fails, acquire a reference on the inpcb,
943 * drop all locks, acquire a global write lock, and then re-acquire
944 * the inpcb lock. We may at that point discover that another thread
945 * has tried to free the inpcb, in which case we need to loop back
946 * and try to find a new inpcb to deliver to.
947 *
948 * XXXRW: It may be time to rethink timewait locking.
949 */
950 relocked:
951 if (inp->inp_flags & INP_TIMEWAIT) {
952 if (ti_locked == TI_UNLOCKED) {
953 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
954 in_pcbref(inp);
955 INP_WUNLOCK(inp);
956 INP_INFO_WLOCK(&V_tcbinfo);
957 ti_locked = TI_WLOCKED;
958 INP_WLOCK(inp);
959 if (in_pcbrele_wlocked(inp)) {
960 inp = NULL;
961 goto findpcb;
962 }
963 } else
964 ti_locked = TI_WLOCKED;
965 }
966 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
967
968 if (thflags & TH_SYN)
969 tcp_dooptions(&to, optp, optlen, TO_SYN);
970 /*
971 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
972 */
973 if (tcp_twcheck(inp, &to, th, m, tlen))
974 goto findpcb;
975 INP_INFO_WUNLOCK(&V_tcbinfo);
976 return;
977 }
978 /*
979 * The TCPCB may no longer exist if the connection is winding
980 * down or it is in the CLOSED state. Either way we drop the
981 * segment and send an appropriate response.
982 */
983 tp = intotcpcb(inp);
984 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
985 rstreason = BANDLIM_RST_CLOSEDPORT;
986 goto dropwithreset;
987 }
988
989 #ifdef TCP_OFFLOAD
990 if (tp->t_flags & TF_TOE) {
991 tcp_offload_input(tp, m);
992 m = NULL; /* consumed by the TOE driver */
993 goto dropunlock;
994 }
995 #endif
996
997 /*
998 * We've identified a valid inpcb, but it could be that we need an
999 * inpcbinfo write lock but don't hold it. In this case, attempt to
1000 * acquire using the same strategy as the TIMEWAIT case above. If we
1001 * relock, we have to jump back to 'relocked' as the connection might
1002 * now be in TIMEWAIT.
1003 */
1004 #ifdef INVARIANTS
1005 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0)
1006 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1007 #endif
1008 if (tp->t_state != TCPS_ESTABLISHED) {
1009 if (ti_locked == TI_UNLOCKED) {
1010 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
1011 in_pcbref(inp);
1012 INP_WUNLOCK(inp);
1013 INP_INFO_WLOCK(&V_tcbinfo);
1014 ti_locked = TI_WLOCKED;
1015 INP_WLOCK(inp);
1016 if (in_pcbrele_wlocked(inp)) {
1017 inp = NULL;
1018 goto findpcb;
1019 }
1020 goto relocked;
1021 } else
1022 ti_locked = TI_WLOCKED;
1023 }
1024 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1025 }
1026
1027 #ifdef MAC
1028 INP_WLOCK_ASSERT(inp);
1029 if (mac_inpcb_check_deliver(inp, m))
1030 goto dropunlock;
1031 #endif
1032 so = inp->inp_socket;
1033 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1034 #ifdef TCPDEBUG
1035 if (so->so_options & SO_DEBUG) {
1036 ostate = tp->t_state;
1037 #ifdef INET6
1038 if (isipv6) {
1039 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1040 } else
1041 #endif
1042 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1043 tcp_savetcp = *th;
1044 }
1045 #endif /* TCPDEBUG */
1046 /*
1047 * When the socket is accepting connections (the INPCB is in LISTEN
1048 * state) we look into the SYN cache if this is a new connection
1049 * attempt or the completion of a previous one. Because listen
1050 * sockets are never in TCPS_ESTABLISHED, the V_tcbinfo lock will be
1051 * held in this case.
1052 */
1053 if (so->so_options & SO_ACCEPTCONN) {
1054 struct in_conninfo inc;
1055
1056 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
1057 "tp not listening", __func__));
1058 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1059
1060 bzero(&inc, sizeof(inc));
1061 #ifdef INET6
1062 if (isipv6) {
1063 inc.inc_flags |= INC_ISIPV6;
1064 inc.inc6_faddr = ip6->ip6_src;
1065 inc.inc6_laddr = ip6->ip6_dst;
1066 } else
1067 #endif
1068 {
1069 inc.inc_faddr = ip->ip_src;
1070 inc.inc_laddr = ip->ip_dst;
1071 }
1072 inc.inc_fport = th->th_sport;
1073 inc.inc_lport = th->th_dport;
1074 inc.inc_fibnum = so->so_fibnum;
1075
1076 /*
1077 * Check for an existing connection attempt in syncache if
1078 * the flag is only ACK. A successful lookup creates a new
1079 * socket appended to the listen queue in SYN_RECEIVED state.
1080 */
1081 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1082 /*
1083 * Parse the TCP options here because
1084 * syncookies need access to the reflected
1085 * timestamp.
1086 */
1087 tcp_dooptions(&to, optp, optlen, 0);
1088 /*
1089 * NB: syncache_expand() doesn't unlock
1090 * inp and tcpinfo locks.
1091 */
1092 if (!syncache_expand(&inc, &to, th, &so, m)) {
1093 /*
1094 * No syncache entry or ACK was not
1095 * for our SYN/ACK. Send a RST.
1096 * NB: syncache did its own logging
1097 * of the failure cause.
1098 */
1099 rstreason = BANDLIM_RST_OPENPORT;
1100 goto dropwithreset;
1101 }
1102 if (so == NULL) {
1103 /*
1104 * We completed the 3-way handshake
1105 * but could not allocate a socket
1106 * either due to memory shortage,
1107 * listen queue length limits or
1108 * global socket limits. Send RST
1109 * or wait and have the remote end
1110 * retransmit the ACK for another
1111 * try.
1112 */
1113 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1114 log(LOG_DEBUG, "%s; %s: Listen socket: "
1115 "Socket allocation failed due to "
1116 "limits or memory shortage, %s\n",
1117 s, __func__,
1118 V_tcp_sc_rst_sock_fail ?
1119 "sending RST" : "try again");
1120 if (V_tcp_sc_rst_sock_fail) {
1121 rstreason = BANDLIM_UNLIMITED;
1122 goto dropwithreset;
1123 } else
1124 goto dropunlock;
1125 }
1126 /*
1127 * Socket is created in state SYN_RECEIVED.
1128 * Unlock the listen socket, lock the newly
1129 * created socket and update the tp variable.
1130 */
1131 INP_WUNLOCK(inp); /* listen socket */
1132 inp = sotoinpcb(so);
1133 INP_WLOCK(inp); /* new connection */
1134 tp = intotcpcb(inp);
1135 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1136 ("%s: ", __func__));
1137 #ifdef TCP_SIGNATURE
1138 if (sig_checked == 0) {
1139 tcp_dooptions(&to, optp, optlen,
1140 (thflags & TH_SYN) ? TO_SYN : 0);
1141 if (!tcp_signature_verify_input(m, off0, tlen,
1142 optlen, &to, th, tp->t_flags)) {
1143
1144 /*
1145 * In SYN_SENT state if it receives an
1146 * RST, it is allowed for further
1147 * processing.
1148 */
1149 if ((thflags & TH_RST) == 0 ||
1150 (tp->t_state == TCPS_SYN_SENT) == 0)
1151 goto dropunlock;
1152 }
1153 sig_checked = 1;
1154 }
1155 #endif
1156
1157 /*
1158 * Process the segment and the data it
1159 * contains. tcp_do_segment() consumes
1160 * the mbuf chain and unlocks the inpcb.
1161 */
1162 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1163 iptos, ti_locked);
1164 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1165 return;
1166 }
1167 /*
1168 * Segment flag validation for new connection attempts:
1169 *
1170 * Our (SYN|ACK) response was rejected.
1171 * Check with syncache and remove entry to prevent
1172 * retransmits.
1173 *
1174 * NB: syncache_chkrst does its own logging of failure
1175 * causes.
1176 */
1177 if (thflags & TH_RST) {
1178 syncache_chkrst(&inc, th);
1179 goto dropunlock;
1180 }
1181 /*
1182 * We can't do anything without SYN.
1183 */
1184 if ((thflags & TH_SYN) == 0) {
1185 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1186 log(LOG_DEBUG, "%s; %s: Listen socket: "
1187 "SYN is missing, segment ignored\n",
1188 s, __func__);
1189 TCPSTAT_INC(tcps_badsyn);
1190 goto dropunlock;
1191 }
1192 /*
1193 * (SYN|ACK) is bogus on a listen socket.
1194 */
1195 if (thflags & TH_ACK) {
1196 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1197 log(LOG_DEBUG, "%s; %s: Listen socket: "
1198 "SYN|ACK invalid, segment rejected\n",
1199 s, __func__);
1200 syncache_badack(&inc); /* XXX: Not needed! */
1201 TCPSTAT_INC(tcps_badsyn);
1202 rstreason = BANDLIM_RST_OPENPORT;
1203 goto dropwithreset;
1204 }
1205 /*
1206 * If the drop_synfin option is enabled, drop all
1207 * segments with both the SYN and FIN bits set.
1208 * This prevents e.g. nmap from identifying the
1209 * TCP/IP stack.
1210 * XXX: Poor reasoning. nmap has other methods
1211 * and is constantly refining its stack detection
1212 * strategies.
1213 * XXX: This is a violation of the TCP specification
1214 * and was used by RFC1644.
1215 */
1216 if ((thflags & TH_FIN) && V_drop_synfin) {
1217 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1218 log(LOG_DEBUG, "%s; %s: Listen socket: "
1219 "SYN|FIN segment ignored (based on "
1220 "sysctl setting)\n", s, __func__);
1221 TCPSTAT_INC(tcps_badsyn);
1222 goto dropunlock;
1223 }
1224 /*
1225 * Segment's flags are (SYN) or (SYN|FIN).
1226 *
1227 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1228 * as they do not affect the state of the TCP FSM.
1229 * The data pointed to by TH_URG and th_urp is ignored.
1230 */
1231 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1232 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1233 KASSERT(thflags & (TH_SYN),
1234 ("%s: Listen socket: TH_SYN not set", __func__));
1235 #ifdef INET6
1236 /*
1237 * If deprecated address is forbidden,
1238 * we do not accept SYN to deprecated interface
1239 * address to prevent any new inbound connection from
1240 * getting established.
1241 * When we do not accept SYN, we send a TCP RST,
1242 * with deprecated source address (instead of dropping
1243 * it). We compromise it as it is much better for peer
1244 * to send a RST, and RST will be the final packet
1245 * for the exchange.
1246 *
1247 * If we do not forbid deprecated addresses, we accept
1248 * the SYN packet. RFC2462 does not suggest dropping
1249 * SYN in this case.
1250 * If we decipher RFC2462 5.5.4, it says like this:
1251 * 1. use of deprecated addr with existing
1252 * communication is okay - "SHOULD continue to be
1253 * used"
1254 * 2. use of it with new communication:
1255 * (2a) "SHOULD NOT be used if alternate address
1256 * with sufficient scope is available"
1257 * (2b) nothing mentioned otherwise.
1258 * Here we fall into (2b) case as we have no choice in
1259 * our source address selection - we must obey the peer.
1260 *
1261 * The wording in RFC2462 is confusing, and there are
1262 * multiple description text for deprecated address
1263 * handling - worse, they are not exactly the same.
1264 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1265 */
1266 if (isipv6 && !V_ip6_use_deprecated) {
1267 struct in6_ifaddr *ia6;
1268
1269 ia6 = ip6_getdstifaddr(m);
1270 if (ia6 != NULL &&
1271 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1272 ifa_free(&ia6->ia_ifa);
1273 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1274 log(LOG_DEBUG, "%s; %s: Listen socket: "
1275 "Connection attempt to deprecated "
1276 "IPv6 address rejected\n",
1277 s, __func__);
1278 rstreason = BANDLIM_RST_OPENPORT;
1279 goto dropwithreset;
1280 }
1281 if (ia6)
1282 ifa_free(&ia6->ia_ifa);
1283 }
1284 #endif /* INET6 */
1285 /*
1286 * Basic sanity checks on incoming SYN requests:
1287 * Don't respond if the destination is a link layer
1288 * broadcast according to RFC1122 4.2.3.10, p. 104.
1289 * If it is from this socket it must be forged.
1290 * Don't respond if the source or destination is a
1291 * global or subnet broad- or multicast address.
1292 * Note that it is quite possible to receive unicast
1293 * link-layer packets with a broadcast IP address. Use
1294 * in_broadcast() to find them.
1295 */
1296 if (m->m_flags & (M_BCAST|M_MCAST)) {
1297 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1298 log(LOG_DEBUG, "%s; %s: Listen socket: "
1299 "Connection attempt from broad- or multicast "
1300 "link layer address ignored\n", s, __func__);
1301 goto dropunlock;
1302 }
1303 #ifdef INET6
1304 if (isipv6) {
1305 if (th->th_dport == th->th_sport &&
1306 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1307 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1308 log(LOG_DEBUG, "%s; %s: Listen socket: "
1309 "Connection attempt to/from self "
1310 "ignored\n", s, __func__);
1311 goto dropunlock;
1312 }
1313 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1314 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1315 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1316 log(LOG_DEBUG, "%s; %s: Listen socket: "
1317 "Connection attempt from/to multicast "
1318 "address ignored\n", s, __func__);
1319 goto dropunlock;
1320 }
1321 }
1322 #endif
1323 #if defined(INET) && defined(INET6)
1324 else
1325 #endif
1326 #ifdef INET
1327 {
1328 if (th->th_dport == th->th_sport &&
1329 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1330 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1331 log(LOG_DEBUG, "%s; %s: Listen socket: "
1332 "Connection attempt from/to self "
1333 "ignored\n", s, __func__);
1334 goto dropunlock;
1335 }
1336 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1337 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1338 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1339 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1340 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1341 log(LOG_DEBUG, "%s; %s: Listen socket: "
1342 "Connection attempt from/to broad- "
1343 "or multicast address ignored\n",
1344 s, __func__);
1345 goto dropunlock;
1346 }
1347 }
1348 #endif
1349 /*
1350 * SYN appears to be valid. Create compressed TCP state
1351 * for syncache.
1352 */
1353 #ifdef TCPDEBUG
1354 if (so->so_options & SO_DEBUG)
1355 tcp_trace(TA_INPUT, ostate, tp,
1356 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1357 #endif
1358 tcp_dooptions(&to, optp, optlen, TO_SYN);
1359 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL);
1360 /*
1361 * Entry added to syncache and mbuf consumed.
1362 * Everything already unlocked by syncache_add().
1363 */
1364 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1365 return;
1366 } else if (tp->t_state == TCPS_LISTEN) {
1367 /*
1368 * When a listen socket is torn down the SO_ACCEPTCONN
1369 * flag is removed first while connections are drained
1370 * from the accept queue in a unlock/lock cycle of the
1371 * ACCEPT_LOCK, opening a race condition allowing a SYN
1372 * attempt go through unhandled.
1373 */
1374 goto dropunlock;
1375 }
1376
1377 #ifdef TCP_SIGNATURE
1378 if (sig_checked == 0) {
1379 tcp_dooptions(&to, optp, optlen,
1380 (thflags & TH_SYN) ? TO_SYN : 0);
1381 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to,
1382 th, tp->t_flags)) {
1383
1384 /*
1385 * In SYN_SENT state if it receives an RST, it is
1386 * allowed for further processing.
1387 */
1388 if ((thflags & TH_RST) == 0 ||
1389 (tp->t_state == TCPS_SYN_SENT) == 0)
1390 goto dropunlock;
1391 }
1392 sig_checked = 1;
1393 }
1394 #endif
1395
1396 TCP_PROBE5(receive, NULL, tp, m->m_data, tp, th);
1397
1398 /*
1399 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1400 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1401 * the inpcb, and unlocks pcbinfo.
1402 */
1403 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1404 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1405 return;
1406
1407 dropwithreset:
1408 TCP_PROBE5(receive, NULL, tp, m->m_data, tp, th);
1409
1410 if (ti_locked == TI_WLOCKED) {
1411 INP_INFO_WUNLOCK(&V_tcbinfo);
1412 ti_locked = TI_UNLOCKED;
1413 }
1414 #ifdef INVARIANTS
1415 else {
1416 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1417 "ti_locked: %d", __func__, ti_locked));
1418 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1419 }
1420 #endif
1421
1422 if (inp != NULL) {
1423 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1424 INP_WUNLOCK(inp);
1425 } else
1426 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1427 m = NULL; /* mbuf chain got consumed. */
1428 goto drop;
1429
1430 dropunlock:
1431 if (m != NULL)
1432 TCP_PROBE5(receive, NULL, tp, m->m_data, tp, th);
1433
1434 if (ti_locked == TI_WLOCKED) {
1435 INP_INFO_WUNLOCK(&V_tcbinfo);
1436 ti_locked = TI_UNLOCKED;
1437 }
1438 #ifdef INVARIANTS
1439 else {
1440 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1441 "ti_locked: %d", __func__, ti_locked));
1442 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1443 }
1444 #endif
1445
1446 if (inp != NULL)
1447 INP_WUNLOCK(inp);
1448
1449 drop:
1450 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1451 if (s != NULL)
1452 free(s, M_TCPLOG);
1453 if (m != NULL)
1454 m_freem(m);
1455 }
1456
1457 static void
1458 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1459 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1460 int ti_locked)
1461 {
1462 int thflags, acked, ourfinisacked, needoutput = 0;
1463 int rstreason, todrop, win;
1464 u_long tiwin;
1465 char *s;
1466 struct in_conninfo *inc;
1467 struct mbuf *mfree;
1468 struct tcpopt to;
1469
1470 #ifdef TCPDEBUG
1471 /*
1472 * The size of tcp_saveipgen must be the size of the max ip header,
1473 * now IPv6.
1474 */
1475 u_char tcp_saveipgen[IP6_HDR_LEN];
1476 struct tcphdr tcp_savetcp;
1477 short ostate = 0;
1478 #endif
1479 thflags = th->th_flags;
1480 inc = &tp->t_inpcb->inp_inc;
1481 tp->sackhint.last_sack_ack = 0;
1482
1483 /*
1484 * If this is either a state-changing packet or current state isn't
1485 * established, we require a write lock on tcbinfo. Otherwise, we
1486 * allow the tcbinfo to be in either alocked or unlocked, as the
1487 * caller may have unnecessarily acquired a write lock due to a race.
1488 */
1489 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1490 tp->t_state != TCPS_ESTABLISHED) {
1491 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for "
1492 "SYN/FIN/RST/!EST", __func__, ti_locked));
1493 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1494 } else {
1495 #ifdef INVARIANTS
1496 if (ti_locked == TI_WLOCKED)
1497 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1498 else {
1499 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1500 "ti_locked: %d", __func__, ti_locked));
1501 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1502 }
1503 #endif
1504 }
1505 INP_WLOCK_ASSERT(tp->t_inpcb);
1506 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1507 __func__));
1508 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1509 __func__));
1510
1511 /*
1512 * Segment received on connection.
1513 * Reset idle time and keep-alive timer.
1514 * XXX: This should be done after segment
1515 * validation to ignore broken/spoofed segs.
1516 */
1517 tp->t_rcvtime = ticks;
1518 if (TCPS_HAVEESTABLISHED(tp->t_state))
1519 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
1520
1521 /*
1522 * Unscale the window into a 32-bit value.
1523 * For the SYN_SENT state the scale is zero.
1524 */
1525 tiwin = th->th_win << tp->snd_scale;
1526
1527 /*
1528 * TCP ECN processing.
1529 */
1530 if (tp->t_flags & TF_ECN_PERMIT) {
1531 if (thflags & TH_CWR)
1532 tp->t_flags &= ~TF_ECN_SND_ECE;
1533 switch (iptos & IPTOS_ECN_MASK) {
1534 case IPTOS_ECN_CE:
1535 tp->t_flags |= TF_ECN_SND_ECE;
1536 TCPSTAT_INC(tcps_ecn_ce);
1537 break;
1538 case IPTOS_ECN_ECT0:
1539 TCPSTAT_INC(tcps_ecn_ect0);
1540 break;
1541 case IPTOS_ECN_ECT1:
1542 TCPSTAT_INC(tcps_ecn_ect1);
1543 break;
1544 }
1545 /* Congestion experienced. */
1546 if (thflags & TH_ECE) {
1547 cc_cong_signal(tp, th, CC_ECN);
1548 }
1549 }
1550
1551 /*
1552 * Parse options on any incoming segment.
1553 */
1554 tcp_dooptions(&to, (u_char *)(th + 1),
1555 (th->th_off << 2) - sizeof(struct tcphdr),
1556 (thflags & TH_SYN) ? TO_SYN : 0);
1557
1558 /*
1559 * If echoed timestamp is later than the current time,
1560 * fall back to non RFC1323 RTT calculation. Normalize
1561 * timestamp if syncookies were used when this connection
1562 * was established.
1563 */
1564 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1565 to.to_tsecr -= tp->ts_offset;
1566 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1567 to.to_tsecr = 0;
1568 }
1569 /*
1570 * If timestamps were negotiated during SYN/ACK they should
1571 * appear on every segment during this session and vice versa.
1572 */
1573 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1574 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1575 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1576 "no action\n", s, __func__);
1577 free(s, M_TCPLOG);
1578 }
1579 }
1580 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1581 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1582 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1583 "no action\n", s, __func__);
1584 free(s, M_TCPLOG);
1585 }
1586 }
1587
1588 /*
1589 * Process options only when we get SYN/ACK back. The SYN case
1590 * for incoming connections is handled in tcp_syncache.
1591 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1592 * or <SYN,ACK>) segment itself is never scaled.
1593 * XXX this is traditional behavior, may need to be cleaned up.
1594 */
1595 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1596 if ((to.to_flags & TOF_SCALE) &&
1597 (tp->t_flags & TF_REQ_SCALE)) {
1598 tp->t_flags |= TF_RCVD_SCALE;
1599 tp->snd_scale = to.to_wscale;
1600 }
1601 /*
1602 * Initial send window. It will be updated with
1603 * the next incoming segment to the scaled value.
1604 */
1605 tp->snd_wnd = th->th_win;
1606 if (to.to_flags & TOF_TS) {
1607 tp->t_flags |= TF_RCVD_TSTMP;
1608 tp->ts_recent = to.to_tsval;
1609 tp->ts_recent_age = tcp_ts_getticks();
1610 }
1611 if (to.to_flags & TOF_MSS)
1612 tcp_mss(tp, to.to_mss);
1613 if ((tp->t_flags & TF_SACK_PERMIT) &&
1614 (to.to_flags & TOF_SACKPERM) == 0)
1615 tp->t_flags &= ~TF_SACK_PERMIT;
1616 }
1617
1618 /*
1619 * Header prediction: check for the two common cases
1620 * of a uni-directional data xfer. If the packet has
1621 * no control flags, is in-sequence, the window didn't
1622 * change and we're not retransmitting, it's a
1623 * candidate. If the length is zero and the ack moved
1624 * forward, we're the sender side of the xfer. Just
1625 * free the data acked & wake any higher level process
1626 * that was blocked waiting for space. If the length
1627 * is non-zero and the ack didn't move, we're the
1628 * receiver side. If we're getting packets in-order
1629 * (the reassembly queue is empty), add the data to
1630 * the socket buffer and note that we need a delayed ack.
1631 * Make sure that the hidden state-flags are also off.
1632 * Since we check for TCPS_ESTABLISHED first, it can only
1633 * be TH_NEEDSYN.
1634 */
1635 if (tp->t_state == TCPS_ESTABLISHED &&
1636 th->th_seq == tp->rcv_nxt &&
1637 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1638 tp->snd_nxt == tp->snd_max &&
1639 tiwin && tiwin == tp->snd_wnd &&
1640 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1641 LIST_EMPTY(&tp->t_segq) &&
1642 ((to.to_flags & TOF_TS) == 0 ||
1643 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1644
1645 /*
1646 * If last ACK falls within this segment's sequence numbers,
1647 * record the timestamp.
1648 * NOTE that the test is modified according to the latest
1649 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1650 */
1651 if ((to.to_flags & TOF_TS) != 0 &&
1652 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1653 tp->ts_recent_age = tcp_ts_getticks();
1654 tp->ts_recent = to.to_tsval;
1655 }
1656
1657 if (tlen == 0) {
1658 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1659 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1660 !IN_RECOVERY(tp->t_flags) &&
1661 (to.to_flags & TOF_SACK) == 0 &&
1662 TAILQ_EMPTY(&tp->snd_holes)) {
1663 /*
1664 * This is a pure ack for outstanding data.
1665 */
1666 if (ti_locked == TI_WLOCKED)
1667 INP_INFO_WUNLOCK(&V_tcbinfo);
1668 ti_locked = TI_UNLOCKED;
1669
1670 TCPSTAT_INC(tcps_predack);
1671
1672 /*
1673 * "bad retransmit" recovery.
1674 */
1675 if (tp->t_rxtshift == 1 &&
1676 tp->t_flags & TF_PREVVALID &&
1677 (int)(ticks - tp->t_badrxtwin) < 0) {
1678 cc_cong_signal(tp, th, CC_RTO_ERR);
1679 }
1680
1681 /*
1682 * Recalculate the transmit timer / rtt.
1683 *
1684 * Some boxes send broken timestamp replies
1685 * during the SYN+ACK phase, ignore
1686 * timestamps of 0 or we could calculate a
1687 * huge RTT and blow up the retransmit timer.
1688 */
1689 if ((to.to_flags & TOF_TS) != 0 &&
1690 to.to_tsecr) {
1691 u_int t;
1692
1693 t = tcp_ts_getticks() - to.to_tsecr;
1694 if (!tp->t_rttlow || tp->t_rttlow > t)
1695 tp->t_rttlow = t;
1696 tcp_xmit_timer(tp,
1697 TCP_TS_TO_TICKS(t) + 1);
1698 } else if (tp->t_rtttime &&
1699 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1700 if (!tp->t_rttlow ||
1701 tp->t_rttlow > ticks - tp->t_rtttime)
1702 tp->t_rttlow = ticks - tp->t_rtttime;
1703 tcp_xmit_timer(tp,
1704 ticks - tp->t_rtttime);
1705 }
1706 acked = BYTES_THIS_ACK(tp, th);
1707
1708 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1709 hhook_run_tcp_est_in(tp, th, &to);
1710
1711 TCPSTAT_INC(tcps_rcvackpack);
1712 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1713 sbdrop(&so->so_snd, acked);
1714 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1715 SEQ_LEQ(th->th_ack, tp->snd_recover))
1716 tp->snd_recover = th->th_ack - 1;
1717
1718 /*
1719 * Let the congestion control algorithm update
1720 * congestion control related information. This
1721 * typically means increasing the congestion
1722 * window.
1723 */
1724 cc_ack_received(tp, th, CC_ACK);
1725
1726 tp->snd_una = th->th_ack;
1727 /*
1728 * Pull snd_wl2 up to prevent seq wrap relative
1729 * to th_ack.
1730 */
1731 tp->snd_wl2 = th->th_ack;
1732 tp->t_dupacks = 0;
1733 m_freem(m);
1734 ND6_HINT(tp); /* Some progress has been made. */
1735
1736 /*
1737 * If all outstanding data are acked, stop
1738 * retransmit timer, otherwise restart timer
1739 * using current (possibly backed-off) value.
1740 * If process is waiting for space,
1741 * wakeup/selwakeup/signal. If data
1742 * are ready to send, let tcp_output
1743 * decide between more output or persist.
1744 */
1745 #ifdef TCPDEBUG
1746 if (so->so_options & SO_DEBUG)
1747 tcp_trace(TA_INPUT, ostate, tp,
1748 (void *)tcp_saveipgen,
1749 &tcp_savetcp, 0);
1750 #endif
1751 if (tp->snd_una == tp->snd_max)
1752 tcp_timer_activate(tp, TT_REXMT, 0);
1753 else if (!tcp_timer_active(tp, TT_PERSIST))
1754 tcp_timer_activate(tp, TT_REXMT,
1755 tp->t_rxtcur);
1756 sowwakeup(so);
1757 if (so->so_snd.sb_cc)
1758 (void) tcp_output(tp);
1759 goto check_delack;
1760 }
1761 } else if (th->th_ack == tp->snd_una &&
1762 tlen <= sbspace(&so->so_rcv)) {
1763 int newsize = 0; /* automatic sockbuf scaling */
1764
1765 /*
1766 * This is a pure, in-sequence data packet with
1767 * nothing on the reassembly queue and we have enough
1768 * buffer space to take it.
1769 */
1770 if (ti_locked == TI_WLOCKED)
1771 INP_INFO_WUNLOCK(&V_tcbinfo);
1772 ti_locked = TI_UNLOCKED;
1773
1774 /* Clean receiver SACK report if present */
1775 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1776 tcp_clean_sackreport(tp);
1777 TCPSTAT_INC(tcps_preddat);
1778 tp->rcv_nxt += tlen;
1779 /*
1780 * Pull snd_wl1 up to prevent seq wrap relative to
1781 * th_seq.
1782 */
1783 tp->snd_wl1 = th->th_seq;
1784 /*
1785 * Pull rcv_up up to prevent seq wrap relative to
1786 * rcv_nxt.
1787 */
1788 tp->rcv_up = tp->rcv_nxt;
1789 TCPSTAT_INC(tcps_rcvpack);
1790 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1791 ND6_HINT(tp); /* Some progress has been made */
1792 #ifdef TCPDEBUG
1793 if (so->so_options & SO_DEBUG)
1794 tcp_trace(TA_INPUT, ostate, tp,
1795 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1796 #endif
1797 /*
1798 * Automatic sizing of receive socket buffer. Often the send
1799 * buffer size is not optimally adjusted to the actual network
1800 * conditions at hand (delay bandwidth product). Setting the
1801 * buffer size too small limits throughput on links with high
1802 * bandwidth and high delay (eg. trans-continental/oceanic links).
1803 *
1804 * On the receive side the socket buffer memory is only rarely
1805 * used to any significant extent. This allows us to be much
1806 * more aggressive in scaling the receive socket buffer. For
1807 * the case that the buffer space is actually used to a large
1808 * extent and we run out of kernel memory we can simply drop
1809 * the new segments; TCP on the sender will just retransmit it
1810 * later. Setting the buffer size too big may only consume too
1811 * much kernel memory if the application doesn't read() from
1812 * the socket or packet loss or reordering makes use of the
1813 * reassembly queue.
1814 *
1815 * The criteria to step up the receive buffer one notch are:
1816 * 1. the number of bytes received during the time it takes
1817 * one timestamp to be reflected back to us (the RTT);
1818 * 2. received bytes per RTT is within seven eighth of the
1819 * current socket buffer size;
1820 * 3. receive buffer size has not hit maximal automatic size;
1821 *
1822 * This algorithm does one step per RTT at most and only if
1823 * we receive a bulk stream w/o packet losses or reorderings.
1824 * Shrinking the buffer during idle times is not necessary as
1825 * it doesn't consume any memory when idle.
1826 *
1827 * TODO: Only step up if the application is actually serving
1828 * the buffer to better manage the socket buffer resources.
1829 */
1830 if (V_tcp_do_autorcvbuf &&
1831 to.to_tsecr &&
1832 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1833 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) &&
1834 to.to_tsecr - tp->rfbuf_ts < hz) {
1835 if (tp->rfbuf_cnt >
1836 (so->so_rcv.sb_hiwat / 8 * 7) &&
1837 so->so_rcv.sb_hiwat <
1838 V_tcp_autorcvbuf_max) {
1839 newsize =
1840 min(so->so_rcv.sb_hiwat +
1841 V_tcp_autorcvbuf_inc,
1842 V_tcp_autorcvbuf_max);
1843 }
1844 /* Start over with next RTT. */
1845 tp->rfbuf_ts = 0;
1846 tp->rfbuf_cnt = 0;
1847 } else
1848 tp->rfbuf_cnt += tlen; /* add up */
1849 }
1850
1851 /* Add data to socket buffer. */
1852 SOCKBUF_LOCK(&so->so_rcv);
1853 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1854 m_freem(m);
1855 } else {
1856 /*
1857 * Set new socket buffer size.
1858 * Give up when limit is reached.
1859 */
1860 if (newsize)
1861 if (!sbreserve_locked(&so->so_rcv,
1862 newsize, so, NULL))
1863 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1864 m_adj(m, drop_hdrlen); /* delayed header drop */
1865 sbappendstream_locked(&so->so_rcv, m);
1866 }
1867 /* NB: sorwakeup_locked() does an implicit unlock. */
1868 sorwakeup_locked(so);
1869 if (DELAY_ACK(tp, tlen)) {
1870 tp->t_flags |= TF_DELACK;
1871 } else {
1872 tp->t_flags |= TF_ACKNOW;
1873 tcp_output(tp);
1874 }
1875 goto check_delack;
1876 }
1877 }
1878
1879 /*
1880 * Calculate amount of space in receive window,
1881 * and then do TCP input processing.
1882 * Receive window is amount of space in rcv queue,
1883 * but not less than advertised window.
1884 */
1885 win = sbspace(&so->so_rcv);
1886 if (win < 0)
1887 win = 0;
1888 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1889
1890 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1891 tp->rfbuf_ts = 0;
1892 tp->rfbuf_cnt = 0;
1893
1894 switch (tp->t_state) {
1895
1896 /*
1897 * If the state is SYN_RECEIVED:
1898 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1899 */
1900 case TCPS_SYN_RECEIVED:
1901 if ((thflags & TH_ACK) &&
1902 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1903 SEQ_GT(th->th_ack, tp->snd_max))) {
1904 rstreason = BANDLIM_RST_OPENPORT;
1905 goto dropwithreset;
1906 }
1907 break;
1908
1909 /*
1910 * If the state is SYN_SENT:
1911 * if seg contains an ACK, but not for our SYN, drop the input.
1912 * if seg contains a RST, then drop the connection.
1913 * if seg does not contain SYN, then drop it.
1914 * Otherwise this is an acceptable SYN segment
1915 * initialize tp->rcv_nxt and tp->irs
1916 * if seg contains ack then advance tp->snd_una
1917 * if seg contains an ECE and ECN support is enabled, the stream
1918 * is ECN capable.
1919 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1920 * arrange for segment to be acked (eventually)
1921 * continue processing rest of data/controls, beginning with URG
1922 */
1923 case TCPS_SYN_SENT:
1924 if ((thflags & TH_ACK) &&
1925 (SEQ_LEQ(th->th_ack, tp->iss) ||
1926 SEQ_GT(th->th_ack, tp->snd_max))) {
1927 rstreason = BANDLIM_UNLIMITED;
1928 goto dropwithreset;
1929 }
1930 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
1931 TCP_PROBE5(connect_refused, NULL, tp, m->m_data, tp,
1932 th);
1933 tp = tcp_drop(tp, ECONNREFUSED);
1934 }
1935 if (thflags & TH_RST)
1936 goto drop;
1937 if (!(thflags & TH_SYN))
1938 goto drop;
1939
1940 tp->irs = th->th_seq;
1941 tcp_rcvseqinit(tp);
1942 if (thflags & TH_ACK) {
1943 TCPSTAT_INC(tcps_connects);
1944 soisconnected(so);
1945 #ifdef MAC
1946 mac_socketpeer_set_from_mbuf(m, so);
1947 #endif
1948 /* Do window scaling on this connection? */
1949 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1950 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1951 tp->rcv_scale = tp->request_r_scale;
1952 }
1953 tp->rcv_adv += imin(tp->rcv_wnd,
1954 TCP_MAXWIN << tp->rcv_scale);
1955 tp->snd_una++; /* SYN is acked */
1956 /*
1957 * If there's data, delay ACK; if there's also a FIN
1958 * ACKNOW will be turned on later.
1959 */
1960 if (DELAY_ACK(tp, tlen) && tlen != 0)
1961 tcp_timer_activate(tp, TT_DELACK,
1962 tcp_delacktime);
1963 else
1964 tp->t_flags |= TF_ACKNOW;
1965
1966 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
1967 tp->t_flags |= TF_ECN_PERMIT;
1968 TCPSTAT_INC(tcps_ecn_shs);
1969 }
1970
1971 /*
1972 * Received <SYN,ACK> in SYN_SENT[*] state.
1973 * Transitions:
1974 * SYN_SENT --> ESTABLISHED
1975 * SYN_SENT* --> FIN_WAIT_1
1976 */
1977 tp->t_starttime = ticks;
1978 if (tp->t_flags & TF_NEEDFIN) {
1979 tcp_state_change(tp, TCPS_FIN_WAIT_1);
1980 tp->t_flags &= ~TF_NEEDFIN;
1981 thflags &= ~TH_SYN;
1982 } else {
1983 tcp_state_change(tp, TCPS_ESTABLISHED);
1984 TCP_PROBE5(connect_established, NULL, tp,
1985 m->m_data, tp, th);
1986 cc_conn_init(tp);
1987 tcp_timer_activate(tp, TT_KEEP,
1988 TP_KEEPIDLE(tp));
1989 }
1990 } else {
1991 /*
1992 * Received initial SYN in SYN-SENT[*] state =>
1993 * simultaneous open. If segment contains CC option
1994 * and there is a cached CC, apply TAO test.
1995 * If it succeeds, connection is * half-synchronized.
1996 * Otherwise, do 3-way handshake:
1997 * SYN-SENT -> SYN-RECEIVED
1998 * SYN-SENT* -> SYN-RECEIVED*
1999 * If there was no CC option, clear cached CC value.
2000 */
2001 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
2002 tcp_timer_activate(tp, TT_REXMT, 0);
2003 tcp_state_change(tp, TCPS_SYN_RECEIVED);
2004 }
2005
2006 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: "
2007 "ti_locked %d", __func__, ti_locked));
2008 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2009 INP_WLOCK_ASSERT(tp->t_inpcb);
2010
2011 /*
2012 * Advance th->th_seq to correspond to first data byte.
2013 * If data, trim to stay within window,
2014 * dropping FIN if necessary.
2015 */
2016 th->th_seq++;
2017 if (tlen > tp->rcv_wnd) {
2018 todrop = tlen - tp->rcv_wnd;
2019 m_adj(m, -todrop);
2020 tlen = tp->rcv_wnd;
2021 thflags &= ~TH_FIN;
2022 TCPSTAT_INC(tcps_rcvpackafterwin);
2023 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2024 }
2025 tp->snd_wl1 = th->th_seq - 1;
2026 tp->rcv_up = th->th_seq;
2027 /*
2028 * Client side of transaction: already sent SYN and data.
2029 * If the remote host used T/TCP to validate the SYN,
2030 * our data will be ACK'd; if so, enter normal data segment
2031 * processing in the middle of step 5, ack processing.
2032 * Otherwise, goto step 6.
2033 */
2034 if (thflags & TH_ACK)
2035 goto process_ACK;
2036
2037 goto step6;
2038
2039 /*
2040 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2041 * do normal processing.
2042 *
2043 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2044 */
2045 case TCPS_LAST_ACK:
2046 case TCPS_CLOSING:
2047 break; /* continue normal processing */
2048 }
2049
2050 /*
2051 * States other than LISTEN or SYN_SENT.
2052 * First check the RST flag and sequence number since reset segments
2053 * are exempt from the timestamp and connection count tests. This
2054 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2055 * below which allowed reset segments in half the sequence space
2056 * to fall though and be processed (which gives forged reset
2057 * segments with a random sequence number a 50 percent chance of
2058 * killing a connection).
2059 * Then check timestamp, if present.
2060 * Then check the connection count, if present.
2061 * Then check that at least some bytes of segment are within
2062 * receive window. If segment begins before rcv_nxt,
2063 * drop leading data (and SYN); if nothing left, just ack.
2064 *
2065 *
2066 * If the RST bit is set, check the sequence number to see
2067 * if this is a valid reset segment.
2068 * RFC 793 page 37:
2069 * In all states except SYN-SENT, all reset (RST) segments
2070 * are validated by checking their SEQ-fields. A reset is
2071 * valid if its sequence number is in the window.
2072 * Note: this does not take into account delayed ACKs, so
2073 * we should test against last_ack_sent instead of rcv_nxt.
2074 * The sequence number in the reset segment is normally an
2075 * echo of our outgoing acknowlegement numbers, but some hosts
2076 * send a reset with the sequence number at the rightmost edge
2077 * of our receive window, and we have to handle this case.
2078 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
2079 * that brute force RST attacks are possible. To combat this,
2080 * we use a much stricter check while in the ESTABLISHED state,
2081 * only accepting RSTs where the sequence number is equal to
2082 * last_ack_sent. In all other states (the states in which a
2083 * RST is more likely), the more permissive check is used.
2084 * If we have multiple segments in flight, the initial reset
2085 * segment sequence numbers will be to the left of last_ack_sent,
2086 * but they will eventually catch up.
2087 * In any case, it never made sense to trim reset segments to
2088 * fit the receive window since RFC 1122 says:
2089 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
2090 *
2091 * A TCP SHOULD allow a received RST segment to include data.
2092 *
2093 * DISCUSSION
2094 * It has been suggested that a RST segment could contain
2095 * ASCII text that encoded and explained the cause of the
2096 * RST. No standard has yet been established for such
2097 * data.
2098 *
2099 * If the reset segment passes the sequence number test examine
2100 * the state:
2101 * SYN_RECEIVED STATE:
2102 * If passive open, return to LISTEN state.
2103 * If active open, inform user that connection was refused.
2104 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
2105 * Inform user that connection was reset, and close tcb.
2106 * CLOSING, LAST_ACK STATES:
2107 * Close the tcb.
2108 * TIME_WAIT STATE:
2109 * Drop the segment - see Stevens, vol. 2, p. 964 and
2110 * RFC 1337.
2111 */
2112 if (thflags & TH_RST) {
2113 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2114 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2115 switch (tp->t_state) {
2116
2117 case TCPS_SYN_RECEIVED:
2118 so->so_error = ECONNREFUSED;
2119 goto close;
2120
2121 case TCPS_ESTABLISHED:
2122 if (V_tcp_insecure_rst == 0 &&
2123 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
2124 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
2125 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2126 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
2127 TCPSTAT_INC(tcps_badrst);
2128 goto drop;
2129 }
2130 /* FALLTHROUGH */
2131 case TCPS_FIN_WAIT_1:
2132 case TCPS_FIN_WAIT_2:
2133 case TCPS_CLOSE_WAIT:
2134 so->so_error = ECONNRESET;
2135 close:
2136 KASSERT(ti_locked == TI_WLOCKED,
2137 ("tcp_do_segment: TH_RST 1 ti_locked %d",
2138 ti_locked));
2139 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2140
2141 tcp_state_change(tp, TCPS_CLOSED);
2142 TCPSTAT_INC(tcps_drops);
2143 tp = tcp_close(tp);
2144 break;
2145
2146 case TCPS_CLOSING:
2147 case TCPS_LAST_ACK:
2148 KASSERT(ti_locked == TI_WLOCKED,
2149 ("tcp_do_segment: TH_RST 2 ti_locked %d",
2150 ti_locked));
2151 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2152
2153 tp = tcp_close(tp);
2154 break;
2155 }
2156 }
2157 goto drop;
2158 }
2159
2160 /*
2161 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2162 * and it's less than ts_recent, drop it.
2163 */
2164 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2165 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2166
2167 /* Check to see if ts_recent is over 24 days old. */
2168 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2169 /*
2170 * Invalidate ts_recent. If this segment updates
2171 * ts_recent, the age will be reset later and ts_recent
2172 * will get a valid value. If it does not, setting
2173 * ts_recent to zero will at least satisfy the
2174 * requirement that zero be placed in the timestamp
2175 * echo reply when ts_recent isn't valid. The
2176 * age isn't reset until we get a valid ts_recent
2177 * because we don't want out-of-order segments to be
2178 * dropped when ts_recent is old.
2179 */
2180 tp->ts_recent = 0;
2181 } else {
2182 TCPSTAT_INC(tcps_rcvduppack);
2183 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2184 TCPSTAT_INC(tcps_pawsdrop);
2185 if (tlen)
2186 goto dropafterack;
2187 goto drop;
2188 }
2189 }
2190
2191 /*
2192 * In the SYN-RECEIVED state, validate that the packet belongs to
2193 * this connection before trimming the data to fit the receive
2194 * window. Check the sequence number versus IRS since we know
2195 * the sequence numbers haven't wrapped. This is a partial fix
2196 * for the "LAND" DoS attack.
2197 */
2198 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2199 rstreason = BANDLIM_RST_OPENPORT;
2200 goto dropwithreset;
2201 }
2202
2203 todrop = tp->rcv_nxt - th->th_seq;
2204 if (todrop > 0) {
2205 if (thflags & TH_SYN) {
2206 thflags &= ~TH_SYN;
2207 th->th_seq++;
2208 if (th->th_urp > 1)
2209 th->th_urp--;
2210 else
2211 thflags &= ~TH_URG;
2212 todrop--;
2213 }
2214 /*
2215 * Following if statement from Stevens, vol. 2, p. 960.
2216 */
2217 if (todrop > tlen
2218 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2219 /*
2220 * Any valid FIN must be to the left of the window.
2221 * At this point the FIN must be a duplicate or out
2222 * of sequence; drop it.
2223 */
2224 thflags &= ~TH_FIN;
2225
2226 /*
2227 * Send an ACK to resynchronize and drop any data.
2228 * But keep on processing for RST or ACK.
2229 */
2230 tp->t_flags |= TF_ACKNOW;
2231 todrop = tlen;
2232 TCPSTAT_INC(tcps_rcvduppack);
2233 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2234 } else {
2235 TCPSTAT_INC(tcps_rcvpartduppack);
2236 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2237 }
2238 drop_hdrlen += todrop; /* drop from the top afterwards */
2239 th->th_seq += todrop;
2240 tlen -= todrop;
2241 if (th->th_urp > todrop)
2242 th->th_urp -= todrop;
2243 else {
2244 thflags &= ~TH_URG;
2245 th->th_urp = 0;
2246 }
2247 }
2248
2249 /*
2250 * If new data are received on a connection after the
2251 * user processes are gone, then RST the other end.
2252 */
2253 if ((so->so_state & SS_NOFDREF) &&
2254 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2255 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && "
2256 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2257 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2258
2259 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2260 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2261 "after socket was closed, "
2262 "sending RST and removing tcpcb\n",
2263 s, __func__, tcpstates[tp->t_state], tlen);
2264 free(s, M_TCPLOG);
2265 }
2266 tp = tcp_close(tp);
2267 TCPSTAT_INC(tcps_rcvafterclose);
2268 rstreason = BANDLIM_UNLIMITED;
2269 goto dropwithreset;
2270 }
2271
2272 /*
2273 * If segment ends after window, drop trailing data
2274 * (and PUSH and FIN); if nothing left, just ACK.
2275 */
2276 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2277 if (todrop > 0) {
2278 TCPSTAT_INC(tcps_rcvpackafterwin);
2279 if (todrop >= tlen) {
2280 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2281 /*
2282 * If window is closed can only take segments at
2283 * window edge, and have to drop data and PUSH from
2284 * incoming segments. Continue processing, but
2285 * remember to ack. Otherwise, drop segment
2286 * and ack.
2287 */
2288 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2289 tp->t_flags |= TF_ACKNOW;
2290 TCPSTAT_INC(tcps_rcvwinprobe);
2291 } else
2292 goto dropafterack;
2293 } else
2294 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2295 m_adj(m, -todrop);
2296 tlen -= todrop;
2297 thflags &= ~(TH_PUSH|TH_FIN);
2298 }
2299
2300 /*
2301 * If last ACK falls within this segment's sequence numbers,
2302 * record its timestamp.
2303 * NOTE:
2304 * 1) That the test incorporates suggestions from the latest
2305 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2306 * 2) That updating only on newer timestamps interferes with
2307 * our earlier PAWS tests, so this check should be solely
2308 * predicated on the sequence space of this segment.
2309 * 3) That we modify the segment boundary check to be
2310 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2311 * instead of RFC1323's
2312 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2313 * This modified check allows us to overcome RFC1323's
2314 * limitations as described in Stevens TCP/IP Illustrated
2315 * Vol. 2 p.869. In such cases, we can still calculate the
2316 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2317 */
2318 if ((to.to_flags & TOF_TS) != 0 &&
2319 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2320 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2321 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2322 tp->ts_recent_age = tcp_ts_getticks();
2323 tp->ts_recent = to.to_tsval;
2324 }
2325
2326 /*
2327 * If a SYN is in the window, then this is an
2328 * error and we send an RST and drop the connection.
2329 */
2330 if (thflags & TH_SYN) {
2331 KASSERT(ti_locked == TI_WLOCKED,
2332 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2333 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2334
2335 tp = tcp_drop(tp, ECONNRESET);
2336 rstreason = BANDLIM_UNLIMITED;
2337 goto drop;
2338 }
2339
2340 /*
2341 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2342 * flag is on (half-synchronized state), then queue data for
2343 * later processing; else drop segment and return.
2344 */
2345 if ((thflags & TH_ACK) == 0) {
2346 if (tp->t_state == TCPS_SYN_RECEIVED ||
2347 (tp->t_flags & TF_NEEDSYN))
2348 goto step6;
2349 else if (tp->t_flags & TF_ACKNOW)
2350 goto dropafterack;
2351 else
2352 goto drop;
2353 }
2354
2355 /*
2356 * Ack processing.
2357 */
2358 switch (tp->t_state) {
2359
2360 /*
2361 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2362 * ESTABLISHED state and continue processing.
2363 * The ACK was checked above.
2364 */
2365 case TCPS_SYN_RECEIVED:
2366
2367 TCPSTAT_INC(tcps_connects);
2368 soisconnected(so);
2369 /* Do window scaling? */
2370 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2371 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2372 tp->rcv_scale = tp->request_r_scale;
2373 tp->snd_wnd = tiwin;
2374 }
2375 /*
2376 * Make transitions:
2377 * SYN-RECEIVED -> ESTABLISHED
2378 * SYN-RECEIVED* -> FIN-WAIT-1
2379 */
2380 tp->t_starttime = ticks;
2381 if (tp->t_flags & TF_NEEDFIN) {
2382 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2383 tp->t_flags &= ~TF_NEEDFIN;
2384 } else {
2385 tcp_state_change(tp, TCPS_ESTABLISHED);
2386 TCP_PROBE5(accept_established, NULL, tp, m->m_data, tp,
2387 th);
2388 cc_conn_init(tp);
2389 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2390 }
2391 /*
2392 * If segment contains data or ACK, will call tcp_reass()
2393 * later; if not, do so now to pass queued data to user.
2394 */
2395 if (tlen == 0 && (thflags & TH_FIN) == 0)
2396 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2397 (struct mbuf *)0);
2398 tp->snd_wl1 = th->th_seq - 1;
2399 /* FALLTHROUGH */
2400
2401 /*
2402 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2403 * ACKs. If the ack is in the range
2404 * tp->snd_una < th->th_ack <= tp->snd_max
2405 * then advance tp->snd_una to th->th_ack and drop
2406 * data from the retransmission queue. If this ACK reflects
2407 * more up to date window information we update our window information.
2408 */
2409 case TCPS_ESTABLISHED:
2410 case TCPS_FIN_WAIT_1:
2411 case TCPS_FIN_WAIT_2:
2412 case TCPS_CLOSE_WAIT:
2413 case TCPS_CLOSING:
2414 case TCPS_LAST_ACK:
2415 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2416 TCPSTAT_INC(tcps_rcvacktoomuch);
2417 goto dropafterack;
2418 }
2419 if ((tp->t_flags & TF_SACK_PERMIT) &&
2420 ((to.to_flags & TOF_SACK) ||
2421 !TAILQ_EMPTY(&tp->snd_holes)))
2422 tcp_sack_doack(tp, &to, th->th_ack);
2423
2424 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2425 hhook_run_tcp_est_in(tp, th, &to);
2426
2427 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2428 if (tlen == 0 && tiwin == tp->snd_wnd) {
2429 TCPSTAT_INC(tcps_rcvdupack);
2430 /*
2431 * If we have outstanding data (other than
2432 * a window probe), this is a completely
2433 * duplicate ack (ie, window info didn't
2434 * change), the ack is the biggest we've
2435 * seen and we've seen exactly our rexmt
2436 * threshhold of them, assume a packet
2437 * has been dropped and retransmit it.
2438 * Kludge snd_nxt & the congestion
2439 * window so we send only this one
2440 * packet.
2441 *
2442 * We know we're losing at the current
2443 * window size so do congestion avoidance
2444 * (set ssthresh to half the current window
2445 * and pull our congestion window back to
2446 * the new ssthresh).
2447 *
2448 * Dup acks mean that packets have left the
2449 * network (they're now cached at the receiver)
2450 * so bump cwnd by the amount in the receiver
2451 * to keep a constant cwnd packets in the
2452 * network.
2453 *
2454 * When using TCP ECN, notify the peer that
2455 * we reduced the cwnd.
2456 */
2457 if (!tcp_timer_active(tp, TT_REXMT) ||
2458 th->th_ack != tp->snd_una)
2459 tp->t_dupacks = 0;
2460 else if (++tp->t_dupacks > tcprexmtthresh ||
2461 IN_FASTRECOVERY(tp->t_flags)) {
2462 cc_ack_received(tp, th, CC_DUPACK);
2463 if ((tp->t_flags & TF_SACK_PERMIT) &&
2464 IN_FASTRECOVERY(tp->t_flags)) {
2465 int awnd;
2466
2467 /*
2468 * Compute the amount of data in flight first.
2469 * We can inject new data into the pipe iff
2470 * we have less than 1/2 the original window's
2471 * worth of data in flight.
2472 */
2473 awnd = (tp->snd_nxt - tp->snd_fack) +
2474 tp->sackhint.sack_bytes_rexmit;
2475 if (awnd < tp->snd_ssthresh) {
2476 tp->snd_cwnd += tp->t_maxseg;
2477 if (tp->snd_cwnd > tp->snd_ssthresh)
2478 tp->snd_cwnd = tp->snd_ssthresh;
2479 }
2480 } else
2481 tp->snd_cwnd += tp->t_maxseg;
2482 if ((thflags & TH_FIN) &&
2483 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2484 /*
2485 * If its a fin we need to process
2486 * it to avoid a race where both
2487 * sides enter FIN-WAIT and send FIN|ACK
2488 * at the same time.
2489 */
2490 break;
2491 }
2492 (void) tcp_output(tp);
2493 goto drop;
2494 } else if (tp->t_dupacks == tcprexmtthresh) {
2495 tcp_seq onxt = tp->snd_nxt;
2496
2497 /*
2498 * If we're doing sack, check to
2499 * see if we're already in sack
2500 * recovery. If we're not doing sack,
2501 * check to see if we're in newreno
2502 * recovery.
2503 */
2504 if (tp->t_flags & TF_SACK_PERMIT) {
2505 if (IN_FASTRECOVERY(tp->t_flags)) {
2506 tp->t_dupacks = 0;
2507 break;
2508 }
2509 } else {
2510 if (SEQ_LEQ(th->th_ack,
2511 tp->snd_recover)) {
2512 tp->t_dupacks = 0;
2513 break;
2514 }
2515 }
2516 /* Congestion signal before ack. */
2517 cc_cong_signal(tp, th, CC_NDUPACK);
2518 cc_ack_received(tp, th, CC_DUPACK);
2519 tcp_timer_activate(tp, TT_REXMT, 0);
2520 tp->t_rtttime = 0;
2521 if (tp->t_flags & TF_SACK_PERMIT) {
2522 TCPSTAT_INC(
2523 tcps_sack_recovery_episode);
2524 tp->sack_newdata = tp->snd_nxt;
2525 tp->snd_cwnd = tp->t_maxseg;
2526 (void) tcp_output(tp);
2527 goto drop;
2528 }
2529 tp->snd_nxt = th->th_ack;
2530 tp->snd_cwnd = tp->t_maxseg;
2531 if ((thflags & TH_FIN) &&
2532 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2533 /*
2534 * If its a fin we need to process
2535 * it to avoid a race where both
2536 * sides enter FIN-WAIT and send FIN|ACK
2537 * at the same time.
2538 */
2539 break;
2540 }
2541 (void) tcp_output(tp);
2542 KASSERT(tp->snd_limited <= 2,
2543 ("%s: tp->snd_limited too big",
2544 __func__));
2545 tp->snd_cwnd = tp->snd_ssthresh +
2546 tp->t_maxseg *
2547 (tp->t_dupacks - tp->snd_limited);
2548 if (SEQ_GT(onxt, tp->snd_nxt))
2549 tp->snd_nxt = onxt;
2550 goto drop;
2551 } else if (V_tcp_do_rfc3042) {
2552 cc_ack_received(tp, th, CC_DUPACK);
2553 u_long oldcwnd = tp->snd_cwnd;
2554 tcp_seq oldsndmax = tp->snd_max;
2555 u_int sent;
2556 int avail;
2557
2558 KASSERT(tp->t_dupacks == 1 ||
2559 tp->t_dupacks == 2,
2560 ("%s: dupacks not 1 or 2",
2561 __func__));
2562 if (tp->t_dupacks == 1)
2563 tp->snd_limited = 0;
2564 tp->snd_cwnd =
2565 (tp->snd_nxt - tp->snd_una) +
2566 (tp->t_dupacks - tp->snd_limited) *
2567 tp->t_maxseg;
2568 if ((thflags & TH_FIN) &&
2569 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2570 /*
2571 * If its a fin we need to process
2572 * it to avoid a race where both
2573 * sides enter FIN-WAIT and send FIN|ACK
2574 * at the same time.
2575 */
2576 break;
2577 }
2578 /*
2579 * Only call tcp_output when there
2580 * is new data available to be sent.
2581 * Otherwise we would send pure ACKs.
2582 */
2583 SOCKBUF_LOCK(&so->so_snd);
2584 avail = so->so_snd.sb_cc -
2585 (tp->snd_nxt - tp->snd_una);
2586 SOCKBUF_UNLOCK(&so->so_snd);
2587 if (avail > 0)
2588 (void) tcp_output(tp);
2589 sent = tp->snd_max - oldsndmax;
2590 if (sent > tp->t_maxseg) {
2591 KASSERT((tp->t_dupacks == 2 &&
2592 tp->snd_limited == 0) ||
2593 (sent == tp->t_maxseg + 1 &&
2594 tp->t_flags & TF_SENTFIN),
2595 ("%s: sent too much",
2596 __func__));
2597 tp->snd_limited = 2;
2598 } else if (sent > 0)
2599 ++tp->snd_limited;
2600 tp->snd_cwnd = oldcwnd;
2601 goto drop;
2602 }
2603 } else
2604 tp->t_dupacks = 0;
2605 break;
2606 }
2607
2608 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2609 ("%s: th_ack <= snd_una", __func__));
2610
2611 /*
2612 * If the congestion window was inflated to account
2613 * for the other side's cached packets, retract it.
2614 */
2615 if (IN_FASTRECOVERY(tp->t_flags)) {
2616 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2617 if (tp->t_flags & TF_SACK_PERMIT)
2618 tcp_sack_partialack(tp, th);
2619 else
2620 tcp_newreno_partial_ack(tp, th);
2621 } else
2622 cc_post_recovery(tp, th);
2623 }
2624 tp->t_dupacks = 0;
2625 /*
2626 * If we reach this point, ACK is not a duplicate,
2627 * i.e., it ACKs something we sent.
2628 */
2629 if (tp->t_flags & TF_NEEDSYN) {
2630 /*
2631 * T/TCP: Connection was half-synchronized, and our
2632 * SYN has been ACK'd (so connection is now fully
2633 * synchronized). Go to non-starred state,
2634 * increment snd_una for ACK of SYN, and check if
2635 * we can do window scaling.
2636 */
2637 tp->t_flags &= ~TF_NEEDSYN;
2638 tp->snd_una++;
2639 /* Do window scaling? */
2640 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2641 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2642 tp->rcv_scale = tp->request_r_scale;
2643 /* Send window already scaled. */
2644 }
2645 }
2646
2647 process_ACK:
2648 INP_WLOCK_ASSERT(tp->t_inpcb);
2649
2650 acked = BYTES_THIS_ACK(tp, th);
2651 TCPSTAT_INC(tcps_rcvackpack);
2652 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2653
2654 /*
2655 * If we just performed our first retransmit, and the ACK
2656 * arrives within our recovery window, then it was a mistake
2657 * to do the retransmit in the first place. Recover our
2658 * original cwnd and ssthresh, and proceed to transmit where
2659 * we left off.
2660 */
2661 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2662 (int)(ticks - tp->t_badrxtwin) < 0)
2663 cc_cong_signal(tp, th, CC_RTO_ERR);
2664
2665 /*
2666 * If we have a timestamp reply, update smoothed
2667 * round trip time. If no timestamp is present but
2668 * transmit timer is running and timed sequence
2669 * number was acked, update smoothed round trip time.
2670 * Since we now have an rtt measurement, cancel the
2671 * timer backoff (cf., Phil Karn's retransmit alg.).
2672 * Recompute the initial retransmit timer.
2673 *
2674 * Some boxes send broken timestamp replies
2675 * during the SYN+ACK phase, ignore
2676 * timestamps of 0 or we could calculate a
2677 * huge RTT and blow up the retransmit timer.
2678 */
2679 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2680 u_int t;
2681
2682 t = tcp_ts_getticks() - to.to_tsecr;
2683 if (!tp->t_rttlow || tp->t_rttlow > t)
2684 tp->t_rttlow = t;
2685 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2686 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2687 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2688 tp->t_rttlow = ticks - tp->t_rtttime;
2689 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2690 }
2691
2692 /*
2693 * If all outstanding data is acked, stop retransmit
2694 * timer and remember to restart (more output or persist).
2695 * If there is more data to be acked, restart retransmit
2696 * timer, using current (possibly backed-off) value.
2697 */
2698 if (th->th_ack == tp->snd_max) {
2699 tcp_timer_activate(tp, TT_REXMT, 0);
2700 needoutput = 1;
2701 } else if (!tcp_timer_active(tp, TT_PERSIST))
2702 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2703
2704 /*
2705 * If no data (only SYN) was ACK'd,
2706 * skip rest of ACK processing.
2707 */
2708 if (acked == 0)
2709 goto step6;
2710
2711 /*
2712 * Let the congestion control algorithm update congestion
2713 * control related information. This typically means increasing
2714 * the congestion window.
2715 */
2716 cc_ack_received(tp, th, CC_ACK);
2717
2718 SOCKBUF_LOCK(&so->so_snd);
2719 if (acked > so->so_snd.sb_cc) {
2720 tp->snd_wnd -= so->so_snd.sb_cc;
2721 mfree = sbcut_locked(&so->so_snd,
2722 (int)so->so_snd.sb_cc);
2723 ourfinisacked = 1;
2724 } else {
2725 mfree = sbcut_locked(&so->so_snd, acked);
2726 tp->snd_wnd -= acked;
2727 ourfinisacked = 0;
2728 }
2729 /* NB: sowwakeup_locked() does an implicit unlock. */
2730 sowwakeup_locked(so);
2731 m_freem(mfree);
2732 /* Detect una wraparound. */
2733 if (!IN_RECOVERY(tp->t_flags) &&
2734 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2735 SEQ_LEQ(th->th_ack, tp->snd_recover))
2736 tp->snd_recover = th->th_ack - 1;
2737 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2738 if (IN_RECOVERY(tp->t_flags) &&
2739 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2740 EXIT_RECOVERY(tp->t_flags);
2741 }
2742 tp->snd_una = th->th_ack;
2743 if (tp->t_flags & TF_SACK_PERMIT) {
2744 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2745 tp->snd_recover = tp->snd_una;
2746 }
2747 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2748 tp->snd_nxt = tp->snd_una;
2749
2750 switch (tp->t_state) {
2751
2752 /*
2753 * In FIN_WAIT_1 STATE in addition to the processing
2754 * for the ESTABLISHED state if our FIN is now acknowledged
2755 * then enter FIN_WAIT_2.
2756 */
2757 case TCPS_FIN_WAIT_1:
2758 if (ourfinisacked) {
2759 /*
2760 * If we can't receive any more
2761 * data, then closing user can proceed.
2762 * Starting the timer is contrary to the
2763 * specification, but if we don't get a FIN
2764 * we'll hang forever.
2765 *
2766 * XXXjl:
2767 * we should release the tp also, and use a
2768 * compressed state.
2769 */
2770 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2771 soisdisconnected(so);
2772 tcp_timer_activate(tp, TT_2MSL,
2773 (tcp_fast_finwait2_recycle ?
2774 tcp_finwait2_timeout :
2775 TP_MAXIDLE(tp)));
2776 }
2777 tcp_state_change(tp, TCPS_FIN_WAIT_2);
2778 }
2779 break;
2780
2781 /*
2782 * In CLOSING STATE in addition to the processing for
2783 * the ESTABLISHED state if the ACK acknowledges our FIN
2784 * then enter the TIME-WAIT state, otherwise ignore
2785 * the segment.
2786 */
2787 case TCPS_CLOSING:
2788 if (ourfinisacked) {
2789 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2790 tcp_twstart(tp);
2791 INP_INFO_WUNLOCK(&V_tcbinfo);
2792 m_freem(m);
2793 return;
2794 }
2795 break;
2796
2797 /*
2798 * In LAST_ACK, we may still be waiting for data to drain
2799 * and/or to be acked, as well as for the ack of our FIN.
2800 * If our FIN is now acknowledged, delete the TCB,
2801 * enter the closed state and return.
2802 */
2803 case TCPS_LAST_ACK:
2804 if (ourfinisacked) {
2805 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2806 tp = tcp_close(tp);
2807 goto drop;
2808 }
2809 break;
2810 }
2811 }
2812
2813 step6:
2814 INP_WLOCK_ASSERT(tp->t_inpcb);
2815
2816 /*
2817 * Update window information.
2818 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2819 */
2820 if ((thflags & TH_ACK) &&
2821 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2822 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2823 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2824 /* keep track of pure window updates */
2825 if (tlen == 0 &&
2826 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2827 TCPSTAT_INC(tcps_rcvwinupd);
2828 tp->snd_wnd = tiwin;
2829 tp->snd_wl1 = th->th_seq;
2830 tp->snd_wl2 = th->th_ack;
2831 if (tp->snd_wnd > tp->max_sndwnd)
2832 tp->max_sndwnd = tp->snd_wnd;
2833 needoutput = 1;
2834 }
2835
2836 /*
2837 * Process segments with URG.
2838 */
2839 if ((thflags & TH_URG) && th->th_urp &&
2840 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2841 /*
2842 * This is a kludge, but if we receive and accept
2843 * random urgent pointers, we'll crash in
2844 * soreceive. It's hard to imagine someone
2845 * actually wanting to send this much urgent data.
2846 */
2847 SOCKBUF_LOCK(&so->so_rcv);
2848 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2849 th->th_urp = 0; /* XXX */
2850 thflags &= ~TH_URG; /* XXX */
2851 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2852 goto dodata; /* XXX */
2853 }
2854 /*
2855 * If this segment advances the known urgent pointer,
2856 * then mark the data stream. This should not happen
2857 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2858 * a FIN has been received from the remote side.
2859 * In these states we ignore the URG.
2860 *
2861 * According to RFC961 (Assigned Protocols),
2862 * the urgent pointer points to the last octet
2863 * of urgent data. We continue, however,
2864 * to consider it to indicate the first octet
2865 * of data past the urgent section as the original
2866 * spec states (in one of two places).
2867 */
2868 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2869 tp->rcv_up = th->th_seq + th->th_urp;
2870 so->so_oobmark = so->so_rcv.sb_cc +
2871 (tp->rcv_up - tp->rcv_nxt) - 1;
2872 if (so->so_oobmark == 0)
2873 so->so_rcv.sb_state |= SBS_RCVATMARK;
2874 sohasoutofband(so);
2875 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2876 }
2877 SOCKBUF_UNLOCK(&so->so_rcv);
2878 /*
2879 * Remove out of band data so doesn't get presented to user.
2880 * This can happen independent of advancing the URG pointer,
2881 * but if two URG's are pending at once, some out-of-band
2882 * data may creep in... ick.
2883 */
2884 if (th->th_urp <= (u_long)tlen &&
2885 !(so->so_options & SO_OOBINLINE)) {
2886 /* hdr drop is delayed */
2887 tcp_pulloutofband(so, th, m, drop_hdrlen);
2888 }
2889 } else {
2890 /*
2891 * If no out of band data is expected,
2892 * pull receive urgent pointer along
2893 * with the receive window.
2894 */
2895 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2896 tp->rcv_up = tp->rcv_nxt;
2897 }
2898 dodata: /* XXX */
2899 INP_WLOCK_ASSERT(tp->t_inpcb);
2900
2901 /*
2902 * Process the segment text, merging it into the TCP sequencing queue,
2903 * and arranging for acknowledgment of receipt if necessary.
2904 * This process logically involves adjusting tp->rcv_wnd as data
2905 * is presented to the user (this happens in tcp_usrreq.c,
2906 * case PRU_RCVD). If a FIN has already been received on this
2907 * connection then we just ignore the text.
2908 */
2909 if ((tlen || (thflags & TH_FIN)) &&
2910 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2911 tcp_seq save_start = th->th_seq;
2912 m_adj(m, drop_hdrlen); /* delayed header drop */
2913 /*
2914 * Insert segment which includes th into TCP reassembly queue
2915 * with control block tp. Set thflags to whether reassembly now
2916 * includes a segment with FIN. This handles the common case
2917 * inline (segment is the next to be received on an established
2918 * connection, and the queue is empty), avoiding linkage into
2919 * and removal from the queue and repetition of various
2920 * conversions.
2921 * Set DELACK for segments received in order, but ack
2922 * immediately when segments are out of order (so
2923 * fast retransmit can work).
2924 */
2925 if (th->th_seq == tp->rcv_nxt &&
2926 LIST_EMPTY(&tp->t_segq) &&
2927 TCPS_HAVEESTABLISHED(tp->t_state)) {
2928 if (DELAY_ACK(tp, tlen))
2929 tp->t_flags |= TF_DELACK;
2930 else
2931 tp->t_flags |= TF_ACKNOW;
2932 tp->rcv_nxt += tlen;
2933 thflags = th->th_flags & TH_FIN;
2934 TCPSTAT_INC(tcps_rcvpack);
2935 TCPSTAT_ADD(tcps_rcvbyte, tlen);
2936 ND6_HINT(tp);
2937 SOCKBUF_LOCK(&so->so_rcv);
2938 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2939 m_freem(m);
2940 else
2941 sbappendstream_locked(&so->so_rcv, m);
2942 /* NB: sorwakeup_locked() does an implicit unlock. */
2943 sorwakeup_locked(so);
2944 } else {
2945 /*
2946 * XXX: Due to the header drop above "th" is
2947 * theoretically invalid by now. Fortunately
2948 * m_adj() doesn't actually frees any mbufs
2949 * when trimming from the head.
2950 */
2951 thflags = tcp_reass(tp, th, &tlen, m);
2952 tp->t_flags |= TF_ACKNOW;
2953 }
2954 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2955 tcp_update_sack_list(tp, save_start, save_start + tlen);
2956 #if 0
2957 /*
2958 * Note the amount of data that peer has sent into
2959 * our window, in order to estimate the sender's
2960 * buffer size.
2961 * XXX: Unused.
2962 */
2963 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
2964 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2965 else
2966 len = so->so_rcv.sb_hiwat;
2967 #endif
2968 } else {
2969 m_freem(m);
2970 thflags &= ~TH_FIN;
2971 }
2972
2973 /*
2974 * If FIN is received ACK the FIN and let the user know
2975 * that the connection is closing.
2976 */
2977 if (thflags & TH_FIN) {
2978 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2979 socantrcvmore(so);
2980 /*
2981 * If connection is half-synchronized
2982 * (ie NEEDSYN flag on) then delay ACK,
2983 * so it may be piggybacked when SYN is sent.
2984 * Otherwise, since we received a FIN then no
2985 * more input can be expected, send ACK now.
2986 */
2987 if (tp->t_flags & TF_NEEDSYN)
2988 tp->t_flags |= TF_DELACK;
2989 else
2990 tp->t_flags |= TF_ACKNOW;
2991 tp->rcv_nxt++;
2992 }
2993 switch (tp->t_state) {
2994
2995 /*
2996 * In SYN_RECEIVED and ESTABLISHED STATES
2997 * enter the CLOSE_WAIT state.
2998 */
2999 case TCPS_SYN_RECEIVED:
3000 tp->t_starttime = ticks;
3001 /* FALLTHROUGH */
3002 case TCPS_ESTABLISHED:
3003 tcp_state_change(tp, TCPS_CLOSE_WAIT);
3004 break;
3005
3006 /*
3007 * If still in FIN_WAIT_1 STATE FIN has not been acked so
3008 * enter the CLOSING state.
3009 */
3010 case TCPS_FIN_WAIT_1:
3011 tcp_state_change(tp, TCPS_CLOSING);
3012 break;
3013
3014 /*
3015 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3016 * starting the time-wait timer, turning off the other
3017 * standard timers.
3018 */
3019 case TCPS_FIN_WAIT_2:
3020 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
3021 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata "
3022 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
3023 ti_locked));
3024
3025 tcp_twstart(tp);
3026 INP_INFO_WUNLOCK(&V_tcbinfo);
3027 return;
3028 }
3029 }
3030 if (ti_locked == TI_WLOCKED)
3031 INP_INFO_WUNLOCK(&V_tcbinfo);
3032 ti_locked = TI_UNLOCKED;
3033
3034 #ifdef TCPDEBUG
3035 if (so->so_options & SO_DEBUG)
3036 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3037 &tcp_savetcp, 0);
3038 #endif
3039
3040 /*
3041 * Return any desired output.
3042 */
3043 if (needoutput || (tp->t_flags & TF_ACKNOW))
3044 (void) tcp_output(tp);
3045
3046 check_delack:
3047 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
3048 __func__, ti_locked));
3049 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3050 INP_WLOCK_ASSERT(tp->t_inpcb);
3051
3052 if (tp->t_flags & TF_DELACK) {
3053 tp->t_flags &= ~TF_DELACK;
3054 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3055 }
3056 INP_WUNLOCK(tp->t_inpcb);
3057 return;
3058
3059 dropafterack:
3060 /*
3061 * Generate an ACK dropping incoming segment if it occupies
3062 * sequence space, where the ACK reflects our state.
3063 *
3064 * We can now skip the test for the RST flag since all
3065 * paths to this code happen after packets containing
3066 * RST have been dropped.
3067 *
3068 * In the SYN-RECEIVED state, don't send an ACK unless the
3069 * segment we received passes the SYN-RECEIVED ACK test.
3070 * If it fails send a RST. This breaks the loop in the
3071 * "LAND" DoS attack, and also prevents an ACK storm
3072 * between two listening ports that have been sent forged
3073 * SYN segments, each with the source address of the other.
3074 */
3075 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3076 (SEQ_GT(tp->snd_una, th->th_ack) ||
3077 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3078 rstreason = BANDLIM_RST_OPENPORT;
3079 goto dropwithreset;
3080 }
3081 #ifdef TCPDEBUG
3082 if (so->so_options & SO_DEBUG)
3083 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3084 &tcp_savetcp, 0);
3085 #endif
3086 if (ti_locked == TI_WLOCKED)
3087 INP_INFO_WUNLOCK(&V_tcbinfo);
3088 ti_locked = TI_UNLOCKED;
3089
3090 tp->t_flags |= TF_ACKNOW;
3091 (void) tcp_output(tp);
3092 INP_WUNLOCK(tp->t_inpcb);
3093 m_freem(m);
3094 return;
3095
3096 dropwithreset:
3097 if (ti_locked == TI_WLOCKED)
3098 INP_INFO_WUNLOCK(&V_tcbinfo);
3099 ti_locked = TI_UNLOCKED;
3100
3101 if (tp != NULL) {
3102 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3103 INP_WUNLOCK(tp->t_inpcb);
3104 } else
3105 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3106 return;
3107
3108 drop:
3109 if (ti_locked == TI_WLOCKED) {
3110 INP_INFO_WUNLOCK(&V_tcbinfo);
3111 ti_locked = TI_UNLOCKED;
3112 }
3113 #ifdef INVARIANTS
3114 else
3115 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3116 #endif
3117
3118 /*
3119 * Drop space held by incoming segment and return.
3120 */
3121 #ifdef TCPDEBUG
3122 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3123 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3124 &tcp_savetcp, 0);
3125 #endif
3126 if (tp != NULL)
3127 INP_WUNLOCK(tp->t_inpcb);
3128 m_freem(m);
3129 }
3130
3131 /*
3132 * Issue RST and make ACK acceptable to originator of segment.
3133 * The mbuf must still include the original packet header.
3134 * tp may be NULL.
3135 */
3136 static void
3137 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3138 int tlen, int rstreason)
3139 {
3140 #ifdef INET
3141 struct ip *ip;
3142 #endif
3143 #ifdef INET6
3144 struct ip6_hdr *ip6;
3145 #endif
3146
3147 if (tp != NULL) {
3148 INP_WLOCK_ASSERT(tp->t_inpcb);
3149 }
3150
3151 /* Don't bother if destination was broadcast/multicast. */
3152 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3153 goto drop;
3154 #ifdef INET6
3155 if (mtod(m, struct ip *)->ip_v == 6) {
3156 ip6 = mtod(m, struct ip6_hdr *);
3157 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3158 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3159 goto drop;
3160 /* IPv6 anycast check is done at tcp6_input() */
3161 }
3162 #endif
3163 #if defined(INET) && defined(INET6)
3164 else
3165 #endif
3166 #ifdef INET
3167 {
3168 ip = mtod(m, struct ip *);
3169 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3170 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3171 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3172 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3173 goto drop;
3174 }
3175 #endif
3176
3177 /* Perform bandwidth limiting. */
3178 if (badport_bandlim(rstreason) < 0)
3179 goto drop;
3180
3181 /* tcp_respond consumes the mbuf chain. */
3182 if (th->th_flags & TH_ACK) {
3183 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3184 th->th_ack, TH_RST);
3185 } else {
3186 if (th->th_flags & TH_SYN)
3187 tlen++;
3188 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3189 (tcp_seq)0, TH_RST|TH_ACK);
3190 }
3191 return;
3192 drop:
3193 m_freem(m);
3194 }
3195
3196 /*
3197 * Parse TCP options and place in tcpopt.
3198 */
3199 static void
3200 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3201 {
3202 int opt, optlen;
3203
3204 to->to_flags = 0;
3205 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3206 opt = cp[0];
3207 if (opt == TCPOPT_EOL)
3208 break;
3209 if (opt == TCPOPT_NOP)
3210 optlen = 1;
3211 else {
3212 if (cnt < 2)
3213 break;
3214 optlen = cp[1];
3215 if (optlen < 2 || optlen > cnt)
3216 break;
3217 }
3218 switch (opt) {
3219 case TCPOPT_MAXSEG:
3220 if (optlen != TCPOLEN_MAXSEG)
3221 continue;
3222 if (!(flags & TO_SYN))
3223 continue;
3224 to->to_flags |= TOF_MSS;
3225 bcopy((char *)cp + 2,
3226 (char *)&to->to_mss, sizeof(to->to_mss));
3227 to->to_mss = ntohs(to->to_mss);
3228 break;
3229 case TCPOPT_WINDOW:
3230 if (optlen != TCPOLEN_WINDOW)
3231 continue;
3232 if (!(flags & TO_SYN))
3233 continue;
3234 to->to_flags |= TOF_SCALE;
3235 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3236 break;
3237 case TCPOPT_TIMESTAMP:
3238 if (optlen != TCPOLEN_TIMESTAMP)
3239 continue;
3240 to->to_flags |= TOF_TS;
3241 bcopy((char *)cp + 2,
3242 (char *)&to->to_tsval, sizeof(to->to_tsval));
3243 to->to_tsval = ntohl(to->to_tsval);
3244 bcopy((char *)cp + 6,
3245 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3246 to->to_tsecr = ntohl(to->to_tsecr);
3247 break;
3248 #ifdef TCP_SIGNATURE
3249 /*
3250 * XXX In order to reply to a host which has set the
3251 * TCP_SIGNATURE option in its initial SYN, we have to
3252 * record the fact that the option was observed here
3253 * for the syncache code to perform the correct response.
3254 */
3255 case TCPOPT_SIGNATURE:
3256 if (optlen != TCPOLEN_SIGNATURE)
3257 continue;
3258 to->to_flags |= TOF_SIGNATURE;
3259 to->to_signature = cp + 2;
3260 break;
3261 #endif
3262 case TCPOPT_SACK_PERMITTED:
3263 if (optlen != TCPOLEN_SACK_PERMITTED)
3264 continue;
3265 if (!(flags & TO_SYN))
3266 continue;
3267 if (!V_tcp_do_sack)
3268 continue;
3269 to->to_flags |= TOF_SACKPERM;
3270 break;
3271 case TCPOPT_SACK:
3272 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3273 continue;
3274 if (flags & TO_SYN)
3275 continue;
3276 to->to_flags |= TOF_SACK;
3277 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3278 to->to_sacks = cp + 2;
3279 TCPSTAT_INC(tcps_sack_rcv_blocks);
3280 break;
3281 default:
3282 continue;
3283 }
3284 }
3285 }
3286
3287 /*
3288 * Pull out of band byte out of a segment so
3289 * it doesn't appear in the user's data queue.
3290 * It is still reflected in the segment length for
3291 * sequencing purposes.
3292 */
3293 static void
3294 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3295 int off)
3296 {
3297 int cnt = off + th->th_urp - 1;
3298
3299 while (cnt >= 0) {
3300 if (m->m_len > cnt) {
3301 char *cp = mtod(m, caddr_t) + cnt;
3302 struct tcpcb *tp = sototcpcb(so);
3303
3304 INP_WLOCK_ASSERT(tp->t_inpcb);
3305
3306 tp->t_iobc = *cp;
3307 tp->t_oobflags |= TCPOOB_HAVEDATA;
3308 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3309 m->m_len--;
3310 if (m->m_flags & M_PKTHDR)
3311 m->m_pkthdr.len--;
3312 return;
3313 }
3314 cnt -= m->m_len;
3315 m = m->m_next;
3316 if (m == NULL)
3317 break;
3318 }
3319 panic("tcp_pulloutofband");
3320 }
3321
3322 /*
3323 * Collect new round-trip time estimate
3324 * and update averages and current timeout.
3325 */
3326 static void
3327 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3328 {
3329 int delta;
3330
3331 INP_WLOCK_ASSERT(tp->t_inpcb);
3332
3333 TCPSTAT_INC(tcps_rttupdated);
3334 tp->t_rttupdated++;
3335 if (tp->t_srtt != 0) {
3336 /*
3337 * srtt is stored as fixed point with 5 bits after the
3338 * binary point (i.e., scaled by 8). The following magic
3339 * is equivalent to the smoothing algorithm in rfc793 with
3340 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3341 * point). Adjust rtt to origin 0.
3342 */
3343 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3344 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3345
3346 if ((tp->t_srtt += delta) <= 0)
3347 tp->t_srtt = 1;
3348
3349 /*
3350 * We accumulate a smoothed rtt variance (actually, a
3351 * smoothed mean difference), then set the retransmit
3352 * timer to smoothed rtt + 4 times the smoothed variance.
3353 * rttvar is stored as fixed point with 4 bits after the
3354 * binary point (scaled by 16). The following is
3355 * equivalent to rfc793 smoothing with an alpha of .75
3356 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3357 * rfc793's wired-in beta.
3358 */
3359 if (delta < 0)
3360 delta = -delta;
3361 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3362 if ((tp->t_rttvar += delta) <= 0)
3363 tp->t_rttvar = 1;
3364 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3365 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3366 } else {
3367 /*
3368 * No rtt measurement yet - use the unsmoothed rtt.
3369 * Set the variance to half the rtt (so our first
3370 * retransmit happens at 3*rtt).
3371 */
3372 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3373 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3374 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3375 }
3376 tp->t_rtttime = 0;
3377 tp->t_rxtshift = 0;
3378
3379 /*
3380 * the retransmit should happen at rtt + 4 * rttvar.
3381 * Because of the way we do the smoothing, srtt and rttvar
3382 * will each average +1/2 tick of bias. When we compute
3383 * the retransmit timer, we want 1/2 tick of rounding and
3384 * 1 extra tick because of +-1/2 tick uncertainty in the
3385 * firing of the timer. The bias will give us exactly the
3386 * 1.5 tick we need. But, because the bias is
3387 * statistical, we have to test that we don't drop below
3388 * the minimum feasible timer (which is 2 ticks).
3389 */
3390 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3391 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3392
3393 /*
3394 * We received an ack for a packet that wasn't retransmitted;
3395 * it is probably safe to discard any error indications we've
3396 * received recently. This isn't quite right, but close enough
3397 * for now (a route might have failed after we sent a segment,
3398 * and the return path might not be symmetrical).
3399 */
3400 tp->t_softerror = 0;
3401 }
3402
3403 /*
3404 * Determine a reasonable value for maxseg size.
3405 * If the route is known, check route for mtu.
3406 * If none, use an mss that can be handled on the outgoing interface
3407 * without forcing IP to fragment. If no route is found, route has no mtu,
3408 * or the destination isn't local, use a default, hopefully conservative
3409 * size (usually 512 or the default IP max size, but no more than the mtu
3410 * of the interface), as we can't discover anything about intervening
3411 * gateways or networks. We also initialize the congestion/slow start
3412 * window to be a single segment if the destination isn't local.
3413 * While looking at the routing entry, we also initialize other path-dependent
3414 * parameters from pre-set or cached values in the routing entry.
3415 *
3416 * Also take into account the space needed for options that we
3417 * send regularly. Make maxseg shorter by that amount to assure
3418 * that we can send maxseg amount of data even when the options
3419 * are present. Store the upper limit of the length of options plus
3420 * data in maxopd.
3421 *
3422 * NOTE that this routine is only called when we process an incoming
3423 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3424 * settings are handled in tcp_mssopt().
3425 */
3426 void
3427 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3428 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3429 {
3430 int mss = 0;
3431 u_long maxmtu = 0;
3432 struct inpcb *inp = tp->t_inpcb;
3433 struct hc_metrics_lite metrics;
3434 int origoffer;
3435 #ifdef INET6
3436 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3437 size_t min_protoh = isipv6 ?
3438 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3439 sizeof (struct tcpiphdr);
3440 #else
3441 const size_t min_protoh = sizeof(struct tcpiphdr);
3442 #endif
3443
3444 INP_WLOCK_ASSERT(tp->t_inpcb);
3445
3446 if (mtuoffer != -1) {
3447 KASSERT(offer == -1, ("%s: conflict", __func__));
3448 offer = mtuoffer - min_protoh;
3449 }
3450 origoffer = offer;
3451
3452 /* Initialize. */
3453 #ifdef INET6
3454 if (isipv6) {
3455 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3456 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt;
3457 }
3458 #endif
3459 #if defined(INET) && defined(INET6)
3460 else
3461 #endif
3462 #ifdef INET
3463 {
3464 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3465 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt;
3466 }
3467 #endif
3468
3469 /*
3470 * No route to sender, stay with default mss and return.
3471 */
3472 if (maxmtu == 0) {
3473 /*
3474 * In case we return early we need to initialize metrics
3475 * to a defined state as tcp_hc_get() would do for us
3476 * if there was no cache hit.
3477 */
3478 if (metricptr != NULL)
3479 bzero(metricptr, sizeof(struct hc_metrics_lite));
3480 return;
3481 }
3482
3483 /* What have we got? */
3484 switch (offer) {
3485 case 0:
3486 /*
3487 * Offer == 0 means that there was no MSS on the SYN
3488 * segment, in this case we use tcp_mssdflt as
3489 * already assigned to t_maxopd above.
3490 */
3491 offer = tp->t_maxopd;
3492 break;
3493
3494 case -1:
3495 /*
3496 * Offer == -1 means that we didn't receive SYN yet.
3497 */
3498 /* FALLTHROUGH */
3499
3500 default:
3501 /*
3502 * Prevent DoS attack with too small MSS. Round up
3503 * to at least minmss.
3504 */
3505 offer = max(offer, V_tcp_minmss);
3506 }
3507
3508 /*
3509 * rmx information is now retrieved from tcp_hostcache.
3510 */
3511 tcp_hc_get(&inp->inp_inc, &metrics);
3512 if (metricptr != NULL)
3513 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3514
3515 /*
3516 * If there's a discovered mtu int tcp hostcache, use it
3517 * else, use the link mtu.
3518 */
3519 if (metrics.rmx_mtu)
3520 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3521 else {
3522 #ifdef INET6
3523 if (isipv6) {
3524 mss = maxmtu - min_protoh;
3525 if (!V_path_mtu_discovery &&
3526 !in6_localaddr(&inp->in6p_faddr))
3527 mss = min(mss, V_tcp_v6mssdflt);
3528 }
3529 #endif
3530 #if defined(INET) && defined(INET6)
3531 else
3532 #endif
3533 #ifdef INET
3534 {
3535 mss = maxmtu - min_protoh;
3536 if (!V_path_mtu_discovery &&
3537 !in_localaddr(inp->inp_faddr))
3538 mss = min(mss, V_tcp_mssdflt);
3539 }
3540 #endif
3541 /*
3542 * XXX - The above conditional (mss = maxmtu - min_protoh)
3543 * probably violates the TCP spec.
3544 * The problem is that, since we don't know the
3545 * other end's MSS, we are supposed to use a conservative
3546 * default. But, if we do that, then MTU discovery will
3547 * never actually take place, because the conservative
3548 * default is much less than the MTUs typically seen
3549 * on the Internet today. For the moment, we'll sweep
3550 * this under the carpet.
3551 *
3552 * The conservative default might not actually be a problem
3553 * if the only case this occurs is when sending an initial
3554 * SYN with options and data to a host we've never talked
3555 * to before. Then, they will reply with an MSS value which
3556 * will get recorded and the new parameters should get
3557 * recomputed. For Further Study.
3558 */
3559 }
3560 mss = min(mss, offer);
3561
3562 /*
3563 * Sanity check: make sure that maxopd will be large
3564 * enough to allow some data on segments even if the
3565 * all the option space is used (40bytes). Otherwise
3566 * funny things may happen in tcp_output.
3567 */
3568 mss = max(mss, 64);
3569
3570 /*
3571 * maxopd stores the maximum length of data AND options
3572 * in a segment; maxseg is the amount of data in a normal
3573 * segment. We need to store this value (maxopd) apart
3574 * from maxseg, because now every segment carries options
3575 * and thus we normally have somewhat less data in segments.
3576 */
3577 tp->t_maxopd = mss;
3578
3579 /*
3580 * origoffer==-1 indicates that no segments were received yet.
3581 * In this case we just guess.
3582 */
3583 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
3584 (origoffer == -1 ||
3585 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3586 mss -= TCPOLEN_TSTAMP_APPA;
3587
3588 tp->t_maxseg = mss;
3589 }
3590
3591 void
3592 tcp_mss(struct tcpcb *tp, int offer)
3593 {
3594 int mss;
3595 u_long bufsize;
3596 struct inpcb *inp;
3597 struct socket *so;
3598 struct hc_metrics_lite metrics;
3599 struct tcp_ifcap cap;
3600
3601 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3602
3603 bzero(&cap, sizeof(cap));
3604 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3605
3606 mss = tp->t_maxseg;
3607 inp = tp->t_inpcb;
3608
3609 /*
3610 * If there's a pipesize, change the socket buffer to that size,
3611 * don't change if sb_hiwat is different than default (then it
3612 * has been changed on purpose with setsockopt).
3613 * Make the socket buffers an integral number of mss units;
3614 * if the mss is larger than the socket buffer, decrease the mss.
3615 */
3616 so = inp->inp_socket;
3617 SOCKBUF_LOCK(&so->so_snd);
3618 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3619 bufsize = metrics.rmx_sendpipe;
3620 else
3621 bufsize = so->so_snd.sb_hiwat;
3622 if (bufsize < mss)
3623 mss = bufsize;
3624 else {
3625 bufsize = roundup(bufsize, mss);
3626 if (bufsize > sb_max)
3627 bufsize = sb_max;
3628 if (bufsize > so->so_snd.sb_hiwat)
3629 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3630 }
3631 SOCKBUF_UNLOCK(&so->so_snd);
3632 tp->t_maxseg = mss;
3633
3634 SOCKBUF_LOCK(&so->so_rcv);
3635 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3636 bufsize = metrics.rmx_recvpipe;
3637 else
3638 bufsize = so->so_rcv.sb_hiwat;
3639 if (bufsize > mss) {
3640 bufsize = roundup(bufsize, mss);
3641 if (bufsize > sb_max)
3642 bufsize = sb_max;
3643 if (bufsize > so->so_rcv.sb_hiwat)
3644 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3645 }
3646 SOCKBUF_UNLOCK(&so->so_rcv);
3647
3648 /* Check the interface for TSO capabilities. */
3649 if (cap.ifcap & CSUM_TSO) {
3650 tp->t_flags |= TF_TSO;
3651 tp->t_tsomax = cap.tsomax;
3652 }
3653 }
3654
3655 /*
3656 * Determine the MSS option to send on an outgoing SYN.
3657 */
3658 int
3659 tcp_mssopt(struct in_conninfo *inc)
3660 {
3661 int mss = 0;
3662 u_long maxmtu = 0;
3663 u_long thcmtu = 0;
3664 size_t min_protoh;
3665
3666 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3667
3668 #ifdef INET6
3669 if (inc->inc_flags & INC_ISIPV6) {
3670 mss = V_tcp_v6mssdflt;
3671 maxmtu = tcp_maxmtu6(inc, NULL);
3672 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3673 }
3674 #endif
3675 #if defined(INET) && defined(INET6)
3676 else
3677 #endif
3678 #ifdef INET
3679 {
3680 mss = V_tcp_mssdflt;
3681 maxmtu = tcp_maxmtu(inc, NULL);
3682 min_protoh = sizeof(struct tcpiphdr);
3683 }
3684 #endif
3685 #if defined(INET6) || defined(INET)
3686 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3687 #endif
3688
3689 if (maxmtu && thcmtu)
3690 mss = min(maxmtu, thcmtu) - min_protoh;
3691 else if (maxmtu || thcmtu)
3692 mss = max(maxmtu, thcmtu) - min_protoh;
3693
3694 return (mss);
3695 }
3696
3697
3698 /*
3699 * On a partial ack arrives, force the retransmission of the
3700 * next unacknowledged segment. Do not clear tp->t_dupacks.
3701 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3702 * be started again.
3703 */
3704 static void
3705 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3706 {
3707 tcp_seq onxt = tp->snd_nxt;
3708 u_long ocwnd = tp->snd_cwnd;
3709
3710 INP_WLOCK_ASSERT(tp->t_inpcb);
3711
3712 tcp_timer_activate(tp, TT_REXMT, 0);
3713 tp->t_rtttime = 0;
3714 tp->snd_nxt = th->th_ack;
3715 /*
3716 * Set snd_cwnd to one segment beyond acknowledged offset.
3717 * (tp->snd_una has not yet been updated when this function is called.)
3718 */
3719 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th);
3720 tp->t_flags |= TF_ACKNOW;
3721 (void) tcp_output(tp);
3722 tp->snd_cwnd = ocwnd;
3723 if (SEQ_GT(onxt, tp->snd_nxt))
3724 tp->snd_nxt = onxt;
3725 /*
3726 * Partial window deflation. Relies on fact that tp->snd_una
3727 * not updated yet.
3728 */
3729 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3730 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3731 else
3732 tp->snd_cwnd = 0;
3733 tp->snd_cwnd += tp->t_maxseg;
3734 }
Cache object: c7edf2642108fc222cb73eb8c728565b
|