1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
8 * Copyright (c) 2010-2011 Juniper Networks, Inc.
9 * All rights reserved.
10 *
11 * Portions of this software were developed at the Centre for Advanced Internet
12 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
13 * James Healy and David Hayes, made possible in part by a grant from the Cisco
14 * University Research Program Fund at Community Foundation Silicon Valley.
15 *
16 * Portions of this software were developed at the Centre for Advanced
17 * Internet Architectures, Swinburne University of Technology, Melbourne,
18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
19 *
20 * Portions of this software were developed by Robert N. M. Watson under
21 * contract to Juniper Networks, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 *
47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/9.0/sys/netinet/tcp_input.c 236953 2012-06-12 12:10:10Z bz $");
52
53 #include "opt_ipfw.h" /* for ipfw_fwd */
54 #include "opt_inet.h"
55 #include "opt_inet6.h"
56 #include "opt_ipsec.h"
57 #include "opt_tcpdebug.h"
58
59 #include <sys/param.h>
60 #include <sys/kernel.h>
61 #include <sys/hhook.h>
62 #include <sys/malloc.h>
63 #include <sys/mbuf.h>
64 #include <sys/proc.h> /* for proc0 declaration */
65 #include <sys/protosw.h>
66 #include <sys/signalvar.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/sysctl.h>
70 #include <sys/syslog.h>
71 #include <sys/systm.h>
72
73 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
74
75 #include <vm/uma.h>
76
77 #include <net/if.h>
78 #include <net/route.h>
79 #include <net/vnet.h>
80
81 #define TCPSTATES /* for logging */
82
83 #include <netinet/cc.h>
84 #include <netinet/in.h>
85 #include <netinet/in_pcb.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in_var.h>
88 #include <netinet/ip.h>
89 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
90 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
91 #include <netinet/ip_var.h>
92 #include <netinet/ip_options.h>
93 #include <netinet/ip6.h>
94 #include <netinet/icmp6.h>
95 #include <netinet6/in6_pcb.h>
96 #include <netinet6/ip6_var.h>
97 #include <netinet6/nd6.h>
98 #include <netinet/tcp_fsm.h>
99 #include <netinet/tcp_seq.h>
100 #include <netinet/tcp_timer.h>
101 #include <netinet/tcp_var.h>
102 #include <netinet6/tcp6_var.h>
103 #include <netinet/tcpip.h>
104 #include <netinet/tcp_syncache.h>
105 #ifdef TCPDEBUG
106 #include <netinet/tcp_debug.h>
107 #endif /* TCPDEBUG */
108
109 #ifdef IPSEC
110 #include <netipsec/ipsec.h>
111 #include <netipsec/ipsec6.h>
112 #endif /*IPSEC*/
113
114 #include <machine/in_cksum.h>
115
116 #include <security/mac/mac_framework.h>
117
118 const int tcprexmtthresh = 3;
119
120 VNET_DEFINE(struct tcpstat, tcpstat);
121 SYSCTL_VNET_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
122 &VNET_NAME(tcpstat), tcpstat,
123 "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
124
125 int tcp_log_in_vain = 0;
126 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
127 &tcp_log_in_vain, 0,
128 "Log all incoming TCP segments to closed ports");
129
130 VNET_DEFINE(int, blackhole) = 0;
131 #define V_blackhole VNET(blackhole)
132 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
133 &VNET_NAME(blackhole), 0,
134 "Do not send RST on segments to closed ports");
135
136 VNET_DEFINE(int, tcp_delack_enabled) = 1;
137 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
138 &VNET_NAME(tcp_delack_enabled), 0,
139 "Delay ACK to try and piggyback it onto a data packet");
140
141 VNET_DEFINE(int, drop_synfin) = 0;
142 #define V_drop_synfin VNET(drop_synfin)
143 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
144 &VNET_NAME(drop_synfin), 0,
145 "Drop TCP packets with SYN+FIN set");
146
147 VNET_DEFINE(int, tcp_do_rfc3042) = 1;
148 #define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
149 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
150 &VNET_NAME(tcp_do_rfc3042), 0,
151 "Enable RFC 3042 (Limited Transmit)");
152
153 VNET_DEFINE(int, tcp_do_rfc3390) = 1;
154 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
155 &VNET_NAME(tcp_do_rfc3390), 0,
156 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
157
158 VNET_DEFINE(int, tcp_do_rfc3465) = 1;
159 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW,
160 &VNET_NAME(tcp_do_rfc3465), 0,
161 "Enable RFC 3465 (Appropriate Byte Counting)");
162
163 VNET_DEFINE(int, tcp_abc_l_var) = 2;
164 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW,
165 &VNET_NAME(tcp_abc_l_var), 2,
166 "Cap the max cwnd increment during slow-start to this number of segments");
167
168 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
169
170 VNET_DEFINE(int, tcp_do_ecn) = 0;
171 SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW,
172 &VNET_NAME(tcp_do_ecn), 0,
173 "TCP ECN support");
174
175 VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
176 SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW,
177 &VNET_NAME(tcp_ecn_maxretries), 0,
178 "Max retries before giving up on ECN");
179
180 VNET_DEFINE(int, tcp_insecure_rst) = 0;
181 #define V_tcp_insecure_rst VNET(tcp_insecure_rst)
182 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
183 &VNET_NAME(tcp_insecure_rst), 0,
184 "Follow the old (insecure) criteria for accepting RST packets");
185
186 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
187 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
188 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
189 &VNET_NAME(tcp_do_autorcvbuf), 0,
190 "Enable automatic receive buffer sizing");
191
192 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
193 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
194 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
195 &VNET_NAME(tcp_autorcvbuf_inc), 0,
196 "Incrementor step size of automatic receive buffer");
197
198 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
199 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
200 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
201 &VNET_NAME(tcp_autorcvbuf_max), 0,
202 "Max size of automatic receive buffer");
203
204 VNET_DEFINE(struct inpcbhead, tcb);
205 #define tcb6 tcb /* for KAME src sync over BSD*'s */
206 VNET_DEFINE(struct inpcbinfo, tcbinfo);
207
208 static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
209 static void tcp_do_segment(struct mbuf *, struct tcphdr *,
210 struct socket *, struct tcpcb *, int, int, uint8_t,
211 int);
212 static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
213 struct tcpcb *, int, int);
214 static void tcp_pulloutofband(struct socket *,
215 struct tcphdr *, struct mbuf *, int);
216 static void tcp_xmit_timer(struct tcpcb *, int);
217 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
218 static void inline tcp_fields_to_host(struct tcphdr *);
219 #ifdef TCP_SIGNATURE
220 static void inline tcp_fields_to_net(struct tcphdr *);
221 static int inline tcp_signature_verify_input(struct mbuf *, int, int,
222 int, struct tcpopt *, struct tcphdr *, u_int);
223 #endif
224 static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th,
225 uint16_t type);
226 static void inline cc_conn_init(struct tcpcb *tp);
227 static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th);
228 static void inline hhook_run_tcp_est_in(struct tcpcb *tp,
229 struct tcphdr *th, struct tcpopt *to);
230
231 /*
232 * Kernel module interface for updating tcpstat. The argument is an index
233 * into tcpstat treated as an array of u_long. While this encodes the
234 * general layout of tcpstat into the caller, it doesn't encode its location,
235 * so that future changes to add, for example, per-CPU stats support won't
236 * cause binary compatibility problems for kernel modules.
237 */
238 void
239 kmod_tcpstat_inc(int statnum)
240 {
241
242 (*((u_long *)&V_tcpstat + statnum))++;
243 }
244
245 /*
246 * Wrapper for the TCP established input helper hook.
247 */
248 static void inline
249 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
250 {
251 struct tcp_hhook_data hhook_data;
252
253 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
254 hhook_data.tp = tp;
255 hhook_data.th = th;
256 hhook_data.to = to;
257
258 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
259 tp->osd);
260 }
261 }
262
263 /*
264 * CC wrapper hook functions
265 */
266 static void inline
267 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
268 {
269 INP_WLOCK_ASSERT(tp->t_inpcb);
270
271 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
272 if (tp->snd_cwnd == min(tp->snd_cwnd, tp->snd_wnd))
273 tp->ccv->flags |= CCF_CWND_LIMITED;
274 else
275 tp->ccv->flags &= ~CCF_CWND_LIMITED;
276
277 if (type == CC_ACK) {
278 if (tp->snd_cwnd > tp->snd_ssthresh) {
279 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
280 V_tcp_abc_l_var * tp->t_maxseg);
281 if (tp->t_bytes_acked >= tp->snd_cwnd) {
282 tp->t_bytes_acked -= tp->snd_cwnd;
283 tp->ccv->flags |= CCF_ABC_SENTAWND;
284 }
285 } else {
286 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
287 tp->t_bytes_acked = 0;
288 }
289 }
290
291 if (CC_ALGO(tp)->ack_received != NULL) {
292 /* XXXLAS: Find a way to live without this */
293 tp->ccv->curack = th->th_ack;
294 CC_ALGO(tp)->ack_received(tp->ccv, type);
295 }
296 }
297
298 static void inline
299 cc_conn_init(struct tcpcb *tp)
300 {
301 struct hc_metrics_lite metrics;
302 struct inpcb *inp = tp->t_inpcb;
303 int rtt;
304 #ifdef INET6
305 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
306 #endif
307
308 INP_WLOCK_ASSERT(tp->t_inpcb);
309
310 tcp_hc_get(&inp->inp_inc, &metrics);
311
312 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
313 tp->t_srtt = rtt;
314 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
315 TCPSTAT_INC(tcps_usedrtt);
316 if (metrics.rmx_rttvar) {
317 tp->t_rttvar = metrics.rmx_rttvar;
318 TCPSTAT_INC(tcps_usedrttvar);
319 } else {
320 /* default variation is +- 1 rtt */
321 tp->t_rttvar =
322 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
323 }
324 TCPT_RANGESET(tp->t_rxtcur,
325 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
326 tp->t_rttmin, TCPTV_REXMTMAX);
327 }
328 if (metrics.rmx_ssthresh) {
329 /*
330 * There's some sort of gateway or interface
331 * buffer limit on the path. Use this to set
332 * the slow start threshhold, but set the
333 * threshold to no less than 2*mss.
334 */
335 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh);
336 TCPSTAT_INC(tcps_usedssthresh);
337 }
338
339 /*
340 * Set the slow-start flight size depending on whether this
341 * is a local network or not.
342 *
343 * Extend this so we cache the cwnd too and retrieve it here.
344 * Make cwnd even bigger than RFC3390 suggests but only if we
345 * have previous experience with the remote host. Be careful
346 * not make cwnd bigger than remote receive window or our own
347 * send socket buffer. Maybe put some additional upper bound
348 * on the retrieved cwnd. Should do incremental updates to
349 * hostcache when cwnd collapses so next connection doesn't
350 * overloads the path again.
351 *
352 * XXXAO: Initializing the CWND from the hostcache is broken
353 * and in its current form not RFC conformant. It is disabled
354 * until fixed or removed entirely.
355 *
356 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost.
357 * We currently check only in syncache_socket for that.
358 */
359 /* #define TCP_METRICS_CWND */
360 #ifdef TCP_METRICS_CWND
361 if (metrics.rmx_cwnd)
362 tp->snd_cwnd = max(tp->t_maxseg, min(metrics.rmx_cwnd / 2,
363 min(tp->snd_wnd, so->so_snd.sb_hiwat)));
364 else
365 #endif
366 if (V_tcp_do_rfc3390)
367 tp->snd_cwnd = min(4 * tp->t_maxseg,
368 max(2 * tp->t_maxseg, 4380));
369 #ifdef INET6
370 else if (isipv6 && in6_localaddr(&inp->in6p_faddr))
371 tp->snd_cwnd = tp->t_maxseg * V_ss_fltsz_local;
372 #endif
373 #if defined(INET) && defined(INET6)
374 else if (!isipv6 && in_localaddr(inp->inp_faddr))
375 tp->snd_cwnd = tp->t_maxseg * V_ss_fltsz_local;
376 #endif
377 #ifdef INET
378 else if (in_localaddr(inp->inp_faddr))
379 tp->snd_cwnd = tp->t_maxseg * V_ss_fltsz_local;
380 #endif
381 else
382 tp->snd_cwnd = tp->t_maxseg * V_ss_fltsz;
383
384 if (CC_ALGO(tp)->conn_init != NULL)
385 CC_ALGO(tp)->conn_init(tp->ccv);
386 }
387
388 void inline
389 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
390 {
391 INP_WLOCK_ASSERT(tp->t_inpcb);
392
393 switch(type) {
394 case CC_NDUPACK:
395 if (!IN_FASTRECOVERY(tp->t_flags)) {
396 tp->snd_recover = tp->snd_max;
397 if (tp->t_flags & TF_ECN_PERMIT)
398 tp->t_flags |= TF_ECN_SND_CWR;
399 }
400 break;
401 case CC_ECN:
402 if (!IN_CONGRECOVERY(tp->t_flags)) {
403 TCPSTAT_INC(tcps_ecn_rcwnd);
404 tp->snd_recover = tp->snd_max;
405 if (tp->t_flags & TF_ECN_PERMIT)
406 tp->t_flags |= TF_ECN_SND_CWR;
407 }
408 break;
409 case CC_RTO:
410 tp->t_dupacks = 0;
411 tp->t_bytes_acked = 0;
412 EXIT_RECOVERY(tp->t_flags);
413 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
414 tp->t_maxseg) * tp->t_maxseg;
415 tp->snd_cwnd = tp->t_maxseg;
416 break;
417 case CC_RTO_ERR:
418 TCPSTAT_INC(tcps_sndrexmitbad);
419 /* RTO was unnecessary, so reset everything. */
420 tp->snd_cwnd = tp->snd_cwnd_prev;
421 tp->snd_ssthresh = tp->snd_ssthresh_prev;
422 tp->snd_recover = tp->snd_recover_prev;
423 if (tp->t_flags & TF_WASFRECOVERY)
424 ENTER_FASTRECOVERY(tp->t_flags);
425 if (tp->t_flags & TF_WASCRECOVERY)
426 ENTER_CONGRECOVERY(tp->t_flags);
427 tp->snd_nxt = tp->snd_max;
428 tp->t_flags &= ~TF_PREVVALID;
429 tp->t_badrxtwin = 0;
430 break;
431 }
432
433 if (CC_ALGO(tp)->cong_signal != NULL) {
434 if (th != NULL)
435 tp->ccv->curack = th->th_ack;
436 CC_ALGO(tp)->cong_signal(tp->ccv, type);
437 }
438 }
439
440 static void inline
441 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
442 {
443 INP_WLOCK_ASSERT(tp->t_inpcb);
444
445 /* XXXLAS: KASSERT that we're in recovery? */
446
447 if (CC_ALGO(tp)->post_recovery != NULL) {
448 tp->ccv->curack = th->th_ack;
449 CC_ALGO(tp)->post_recovery(tp->ccv);
450 }
451 /* XXXLAS: EXIT_RECOVERY ? */
452 tp->t_bytes_acked = 0;
453 }
454
455 static inline void
456 tcp_fields_to_host(struct tcphdr *th)
457 {
458
459 th->th_seq = ntohl(th->th_seq);
460 th->th_ack = ntohl(th->th_ack);
461 th->th_win = ntohs(th->th_win);
462 th->th_urp = ntohs(th->th_urp);
463 }
464
465 #ifdef TCP_SIGNATURE
466 static inline void
467 tcp_fields_to_net(struct tcphdr *th)
468 {
469
470 th->th_seq = htonl(th->th_seq);
471 th->th_ack = htonl(th->th_ack);
472 th->th_win = htons(th->th_win);
473 th->th_urp = htons(th->th_urp);
474 }
475
476 static inline int
477 tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen,
478 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
479 {
480 int ret;
481
482 tcp_fields_to_net(th);
483 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag);
484 tcp_fields_to_host(th);
485 return (ret);
486 }
487 #endif
488
489 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
490 #ifdef INET6
491 #define ND6_HINT(tp) \
492 do { \
493 if ((tp) && (tp)->t_inpcb && \
494 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
495 nd6_nud_hint(NULL, NULL, 0); \
496 } while (0)
497 #else
498 #define ND6_HINT(tp)
499 #endif
500
501 /*
502 * Indicate whether this ack should be delayed. We can delay the ack if
503 * - there is no delayed ack timer in progress and
504 * - our last ack wasn't a 0-sized window. We never want to delay
505 * the ack that opens up a 0-sized window and
506 * - delayed acks are enabled or
507 * - this is a half-synchronized T/TCP connection.
508 */
509 #define DELAY_ACK(tp) \
510 ((!tcp_timer_active(tp, TT_DELACK) && \
511 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
512 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
513
514 /*
515 * TCP input handling is split into multiple parts:
516 * tcp6_input is a thin wrapper around tcp_input for the extended
517 * ip6_protox[] call format in ip6_input
518 * tcp_input handles primary segment validation, inpcb lookup and
519 * SYN processing on listen sockets
520 * tcp_do_segment processes the ACK and text of the segment for
521 * establishing, established and closing connections
522 */
523 #ifdef INET6
524 int
525 tcp6_input(struct mbuf **mp, int *offp, int proto)
526 {
527 struct mbuf *m = *mp;
528 struct in6_ifaddr *ia6;
529
530 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
531
532 /*
533 * draft-itojun-ipv6-tcp-to-anycast
534 * better place to put this in?
535 */
536 ia6 = ip6_getdstifaddr(m);
537 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
538 struct ip6_hdr *ip6;
539
540 ifa_free(&ia6->ia_ifa);
541 ip6 = mtod(m, struct ip6_hdr *);
542 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
543 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
544 return IPPROTO_DONE;
545 }
546 if (ia6)
547 ifa_free(&ia6->ia_ifa);
548
549 tcp_input(m, *offp);
550 return IPPROTO_DONE;
551 }
552 #endif /* INET6 */
553
554 void
555 tcp_input(struct mbuf *m, int off0)
556 {
557 struct tcphdr *th = NULL;
558 struct ip *ip = NULL;
559 #ifdef INET
560 struct ipovly *ipov;
561 #endif
562 struct inpcb *inp = NULL;
563 struct tcpcb *tp = NULL;
564 struct socket *so = NULL;
565 u_char *optp = NULL;
566 int optlen = 0;
567 #ifdef INET
568 int len;
569 #endif
570 int tlen = 0, off;
571 int drop_hdrlen;
572 int thflags;
573 int rstreason = 0; /* For badport_bandlim accounting purposes */
574 #ifdef TCP_SIGNATURE
575 uint8_t sig_checked = 0;
576 #endif
577 uint8_t iptos = 0;
578 #ifdef IPFIREWALL_FORWARD
579 struct m_tag *fwd_tag;
580 #endif
581 #ifdef INET6
582 struct ip6_hdr *ip6 = NULL;
583 int isipv6;
584 #else
585 const void *ip6 = NULL;
586 #endif /* INET6 */
587 struct tcpopt to; /* options in this segment */
588 char *s = NULL; /* address and port logging */
589 int ti_locked;
590 #define TI_UNLOCKED 1
591 #define TI_WLOCKED 2
592
593 #ifdef TCPDEBUG
594 /*
595 * The size of tcp_saveipgen must be the size of the max ip header,
596 * now IPv6.
597 */
598 u_char tcp_saveipgen[IP6_HDR_LEN];
599 struct tcphdr tcp_savetcp;
600 short ostate = 0;
601 #endif
602
603 #ifdef INET6
604 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
605 #endif
606
607 to.to_flags = 0;
608 TCPSTAT_INC(tcps_rcvtotal);
609
610 #ifdef INET6
611 if (isipv6) {
612 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
613 ip6 = mtod(m, struct ip6_hdr *);
614 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
615 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
616 TCPSTAT_INC(tcps_rcvbadsum);
617 goto drop;
618 }
619 th = (struct tcphdr *)((caddr_t)ip6 + off0);
620
621 /*
622 * Be proactive about unspecified IPv6 address in source.
623 * As we use all-zero to indicate unbounded/unconnected pcb,
624 * unspecified IPv6 address can be used to confuse us.
625 *
626 * Note that packets with unspecified IPv6 destination is
627 * already dropped in ip6_input.
628 */
629 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
630 /* XXX stat */
631 goto drop;
632 }
633 }
634 #endif
635 #if defined(INET) && defined(INET6)
636 else
637 #endif
638 #ifdef INET
639 {
640 /*
641 * Get IP and TCP header together in first mbuf.
642 * Note: IP leaves IP header in first mbuf.
643 */
644 if (off0 > sizeof (struct ip)) {
645 ip_stripoptions(m, (struct mbuf *)0);
646 off0 = sizeof(struct ip);
647 }
648 if (m->m_len < sizeof (struct tcpiphdr)) {
649 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
650 == NULL) {
651 TCPSTAT_INC(tcps_rcvshort);
652 return;
653 }
654 }
655 ip = mtod(m, struct ip *);
656 ipov = (struct ipovly *)ip;
657 th = (struct tcphdr *)((caddr_t)ip + off0);
658 tlen = ip->ip_len;
659
660 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
661 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
662 th->th_sum = m->m_pkthdr.csum_data;
663 else
664 th->th_sum = in_pseudo(ip->ip_src.s_addr,
665 ip->ip_dst.s_addr,
666 htonl(m->m_pkthdr.csum_data +
667 ip->ip_len +
668 IPPROTO_TCP));
669 th->th_sum ^= 0xffff;
670 #ifdef TCPDEBUG
671 ipov->ih_len = (u_short)tlen;
672 ipov->ih_len = htons(ipov->ih_len);
673 #endif
674 } else {
675 /*
676 * Checksum extended TCP header and data.
677 */
678 len = sizeof (struct ip) + tlen;
679 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
680 ipov->ih_len = (u_short)tlen;
681 ipov->ih_len = htons(ipov->ih_len);
682 th->th_sum = in_cksum(m, len);
683 }
684 if (th->th_sum) {
685 TCPSTAT_INC(tcps_rcvbadsum);
686 goto drop;
687 }
688 /* Re-initialization for later version check */
689 ip->ip_v = IPVERSION;
690 }
691 #endif /* INET */
692
693 #ifdef INET6
694 if (isipv6)
695 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
696 #endif
697 #if defined(INET) && defined(INET6)
698 else
699 #endif
700 #ifdef INET
701 iptos = ip->ip_tos;
702 #endif
703
704 /*
705 * Check that TCP offset makes sense,
706 * pull out TCP options and adjust length. XXX
707 */
708 off = th->th_off << 2;
709 if (off < sizeof (struct tcphdr) || off > tlen) {
710 TCPSTAT_INC(tcps_rcvbadoff);
711 goto drop;
712 }
713 tlen -= off; /* tlen is used instead of ti->ti_len */
714 if (off > sizeof (struct tcphdr)) {
715 #ifdef INET6
716 if (isipv6) {
717 IP6_EXTHDR_CHECK(m, off0, off, );
718 ip6 = mtod(m, struct ip6_hdr *);
719 th = (struct tcphdr *)((caddr_t)ip6 + off0);
720 }
721 #endif
722 #if defined(INET) && defined(INET6)
723 else
724 #endif
725 #ifdef INET
726 {
727 if (m->m_len < sizeof(struct ip) + off) {
728 if ((m = m_pullup(m, sizeof (struct ip) + off))
729 == NULL) {
730 TCPSTAT_INC(tcps_rcvshort);
731 return;
732 }
733 ip = mtod(m, struct ip *);
734 ipov = (struct ipovly *)ip;
735 th = (struct tcphdr *)((caddr_t)ip + off0);
736 }
737 }
738 #endif
739 optlen = off - sizeof (struct tcphdr);
740 optp = (u_char *)(th + 1);
741 }
742 thflags = th->th_flags;
743
744 /*
745 * Convert TCP protocol specific fields to host format.
746 */
747 tcp_fields_to_host(th);
748
749 /*
750 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
751 */
752 drop_hdrlen = off0 + off;
753
754 /*
755 * Locate pcb for segment; if we're likely to add or remove a
756 * connection then first acquire pcbinfo lock. There are two cases
757 * where we might discover later we need a write lock despite the
758 * flags: ACKs moving a connection out of the syncache, and ACKs for
759 * a connection in TIMEWAIT.
760 */
761 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0) {
762 INP_INFO_WLOCK(&V_tcbinfo);
763 ti_locked = TI_WLOCKED;
764 } else
765 ti_locked = TI_UNLOCKED;
766
767 findpcb:
768 #ifdef INVARIANTS
769 if (ti_locked == TI_WLOCKED) {
770 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
771 } else {
772 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
773 }
774 #endif
775
776 #ifdef IPFIREWALL_FORWARD
777 /*
778 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
779 */
780 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
781 #endif /* IPFIREWALL_FORWARD */
782
783 #ifdef INET6
784 #ifdef IPFIREWALL_FORWARD
785 if (isipv6 && fwd_tag != NULL) {
786 struct sockaddr_in6 *next_hop6;
787
788 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
789 /*
790 * Transparently forwarded. Pretend to be the destination.
791 * Already got one like this?
792 */
793 inp = in6_pcblookup_mbuf(&V_tcbinfo,
794 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
795 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
796 if (!inp) {
797 /*
798 * It's new. Try to find the ambushing socket.
799 * Because we've rewritten the destination address,
800 * any hardware-generated hash is ignored.
801 */
802 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
803 th->th_sport, &next_hop6->sin6_addr,
804 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
805 th->th_dport, INPLOOKUP_WILDCARD |
806 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
807 }
808 /* Remove the tag from the packet. We don't need it anymore. */
809 m_tag_delete(m, fwd_tag);
810 } else
811 #endif /* IPFIREWALL_FORWARD */
812 if (isipv6) {
813 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
814 th->th_sport, &ip6->ip6_dst, th->th_dport,
815 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
816 m->m_pkthdr.rcvif, m);
817 }
818 #endif /* INET6 */
819 #if defined(INET6) && defined(INET)
820 else
821 #endif
822 #ifdef INET
823 #ifdef IPFIREWALL_FORWARD
824 if (fwd_tag != NULL) {
825 struct sockaddr_in *next_hop;
826
827 next_hop = (struct sockaddr_in *)(fwd_tag+1);
828 /*
829 * Transparently forwarded. Pretend to be the destination.
830 * already got one like this?
831 */
832 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
833 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
834 m->m_pkthdr.rcvif, m);
835 if (!inp) {
836 /*
837 * It's new. Try to find the ambushing socket.
838 * Because we've rewritten the destination address,
839 * any hardware-generated hash is ignored.
840 */
841 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
842 th->th_sport, next_hop->sin_addr,
843 next_hop->sin_port ? ntohs(next_hop->sin_port) :
844 th->th_dport, INPLOOKUP_WILDCARD |
845 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
846 }
847 /* Remove the tag from the packet. We don't need it anymore. */
848 m_tag_delete(m, fwd_tag);
849 } else
850 #endif /* IPFIREWALL_FORWARD */
851 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
852 th->th_sport, ip->ip_dst, th->th_dport,
853 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
854 m->m_pkthdr.rcvif, m);
855 #endif /* INET */
856
857 /*
858 * If the INPCB does not exist then all data in the incoming
859 * segment is discarded and an appropriate RST is sent back.
860 * XXX MRT Send RST using which routing table?
861 */
862 if (inp == NULL) {
863 /*
864 * Log communication attempts to ports that are not
865 * in use.
866 */
867 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
868 tcp_log_in_vain == 2) {
869 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
870 log(LOG_INFO, "%s; %s: Connection attempt "
871 "to closed port\n", s, __func__);
872 }
873 /*
874 * When blackholing do not respond with a RST but
875 * completely ignore the segment and drop it.
876 */
877 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
878 V_blackhole == 2)
879 goto dropunlock;
880
881 rstreason = BANDLIM_RST_CLOSEDPORT;
882 goto dropwithreset;
883 }
884 INP_WLOCK_ASSERT(inp);
885 if (!(inp->inp_flags & INP_HW_FLOWID)
886 && (m->m_flags & M_FLOWID)
887 && ((inp->inp_socket == NULL)
888 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) {
889 inp->inp_flags |= INP_HW_FLOWID;
890 inp->inp_flags &= ~INP_SW_FLOWID;
891 inp->inp_flowid = m->m_pkthdr.flowid;
892 }
893 #ifdef IPSEC
894 #ifdef INET6
895 if (isipv6 && ipsec6_in_reject(m, inp)) {
896 V_ipsec6stat.in_polvio++;
897 goto dropunlock;
898 } else
899 #endif /* INET6 */
900 if (ipsec4_in_reject(m, inp) != 0) {
901 V_ipsec4stat.in_polvio++;
902 goto dropunlock;
903 }
904 #endif /* IPSEC */
905
906 /*
907 * Check the minimum TTL for socket.
908 */
909 if (inp->inp_ip_minttl != 0) {
910 #ifdef INET6
911 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
912 goto dropunlock;
913 else
914 #endif
915 if (inp->inp_ip_minttl > ip->ip_ttl)
916 goto dropunlock;
917 }
918
919 /*
920 * A previous connection in TIMEWAIT state is supposed to catch stray
921 * or duplicate segments arriving late. If this segment was a
922 * legitimate new connection attempt the old INPCB gets removed and
923 * we can try again to find a listening socket.
924 *
925 * At this point, due to earlier optimism, we may hold only an inpcb
926 * lock, and not the inpcbinfo write lock. If so, we need to try to
927 * acquire it, or if that fails, acquire a reference on the inpcb,
928 * drop all locks, acquire a global write lock, and then re-acquire
929 * the inpcb lock. We may at that point discover that another thread
930 * has tried to free the inpcb, in which case we need to loop back
931 * and try to find a new inpcb to deliver to.
932 *
933 * XXXRW: It may be time to rethink timewait locking.
934 */
935 relocked:
936 if (inp->inp_flags & INP_TIMEWAIT) {
937 if (ti_locked == TI_UNLOCKED) {
938 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
939 in_pcbref(inp);
940 INP_WUNLOCK(inp);
941 INP_INFO_WLOCK(&V_tcbinfo);
942 ti_locked = TI_WLOCKED;
943 INP_WLOCK(inp);
944 if (in_pcbrele_wlocked(inp)) {
945 inp = NULL;
946 goto findpcb;
947 }
948 } else
949 ti_locked = TI_WLOCKED;
950 }
951 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
952
953 if (thflags & TH_SYN)
954 tcp_dooptions(&to, optp, optlen, TO_SYN);
955 /*
956 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
957 */
958 if (tcp_twcheck(inp, &to, th, m, tlen))
959 goto findpcb;
960 INP_INFO_WUNLOCK(&V_tcbinfo);
961 return;
962 }
963 /*
964 * The TCPCB may no longer exist if the connection is winding
965 * down or it is in the CLOSED state. Either way we drop the
966 * segment and send an appropriate response.
967 */
968 tp = intotcpcb(inp);
969 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
970 rstreason = BANDLIM_RST_CLOSEDPORT;
971 goto dropwithreset;
972 }
973
974 /*
975 * We've identified a valid inpcb, but it could be that we need an
976 * inpcbinfo write lock but don't hold it. In this case, attempt to
977 * acquire using the same strategy as the TIMEWAIT case above. If we
978 * relock, we have to jump back to 'relocked' as the connection might
979 * now be in TIMEWAIT.
980 */
981 #ifdef INVARIANTS
982 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0)
983 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
984 #endif
985 if (tp->t_state != TCPS_ESTABLISHED) {
986 if (ti_locked == TI_UNLOCKED) {
987 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
988 in_pcbref(inp);
989 INP_WUNLOCK(inp);
990 INP_INFO_WLOCK(&V_tcbinfo);
991 ti_locked = TI_WLOCKED;
992 INP_WLOCK(inp);
993 if (in_pcbrele_wlocked(inp)) {
994 inp = NULL;
995 goto findpcb;
996 }
997 goto relocked;
998 } else
999 ti_locked = TI_WLOCKED;
1000 }
1001 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1002 }
1003
1004 #ifdef MAC
1005 INP_WLOCK_ASSERT(inp);
1006 if (mac_inpcb_check_deliver(inp, m))
1007 goto dropunlock;
1008 #endif
1009 so = inp->inp_socket;
1010 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1011 #ifdef TCPDEBUG
1012 if (so->so_options & SO_DEBUG) {
1013 ostate = tp->t_state;
1014 #ifdef INET6
1015 if (isipv6) {
1016 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1017 } else
1018 #endif
1019 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1020 tcp_savetcp = *th;
1021 }
1022 #endif /* TCPDEBUG */
1023 /*
1024 * When the socket is accepting connections (the INPCB is in LISTEN
1025 * state) we look into the SYN cache if this is a new connection
1026 * attempt or the completion of a previous one. Because listen
1027 * sockets are never in TCPS_ESTABLISHED, the V_tcbinfo lock will be
1028 * held in this case.
1029 */
1030 if (so->so_options & SO_ACCEPTCONN) {
1031 struct in_conninfo inc;
1032
1033 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
1034 "tp not listening", __func__));
1035 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1036
1037 bzero(&inc, sizeof(inc));
1038 #ifdef INET6
1039 if (isipv6) {
1040 inc.inc_flags |= INC_ISIPV6;
1041 inc.inc6_faddr = ip6->ip6_src;
1042 inc.inc6_laddr = ip6->ip6_dst;
1043 } else
1044 #endif
1045 {
1046 inc.inc_faddr = ip->ip_src;
1047 inc.inc_laddr = ip->ip_dst;
1048 }
1049 inc.inc_fport = th->th_sport;
1050 inc.inc_lport = th->th_dport;
1051 inc.inc_fibnum = so->so_fibnum;
1052
1053 /*
1054 * Check for an existing connection attempt in syncache if
1055 * the flag is only ACK. A successful lookup creates a new
1056 * socket appended to the listen queue in SYN_RECEIVED state.
1057 */
1058 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1059 /*
1060 * Parse the TCP options here because
1061 * syncookies need access to the reflected
1062 * timestamp.
1063 */
1064 tcp_dooptions(&to, optp, optlen, 0);
1065 /*
1066 * NB: syncache_expand() doesn't unlock
1067 * inp and tcpinfo locks.
1068 */
1069 if (!syncache_expand(&inc, &to, th, &so, m)) {
1070 /*
1071 * No syncache entry or ACK was not
1072 * for our SYN/ACK. Send a RST.
1073 * NB: syncache did its own logging
1074 * of the failure cause.
1075 */
1076 rstreason = BANDLIM_RST_OPENPORT;
1077 goto dropwithreset;
1078 }
1079 if (so == NULL) {
1080 /*
1081 * We completed the 3-way handshake
1082 * but could not allocate a socket
1083 * either due to memory shortage,
1084 * listen queue length limits or
1085 * global socket limits. Send RST
1086 * or wait and have the remote end
1087 * retransmit the ACK for another
1088 * try.
1089 */
1090 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1091 log(LOG_DEBUG, "%s; %s: Listen socket: "
1092 "Socket allocation failed due to "
1093 "limits or memory shortage, %s\n",
1094 s, __func__,
1095 V_tcp_sc_rst_sock_fail ?
1096 "sending RST" : "try again");
1097 if (V_tcp_sc_rst_sock_fail) {
1098 rstreason = BANDLIM_UNLIMITED;
1099 goto dropwithreset;
1100 } else
1101 goto dropunlock;
1102 }
1103 /*
1104 * Socket is created in state SYN_RECEIVED.
1105 * Unlock the listen socket, lock the newly
1106 * created socket and update the tp variable.
1107 */
1108 INP_WUNLOCK(inp); /* listen socket */
1109 inp = sotoinpcb(so);
1110 INP_WLOCK(inp); /* new connection */
1111 tp = intotcpcb(inp);
1112 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1113 ("%s: ", __func__));
1114 #ifdef TCP_SIGNATURE
1115 if (sig_checked == 0) {
1116 tcp_dooptions(&to, optp, optlen,
1117 (thflags & TH_SYN) ? TO_SYN : 0);
1118 if (!tcp_signature_verify_input(m, off0, tlen,
1119 optlen, &to, th, tp->t_flags)) {
1120
1121 /*
1122 * In SYN_SENT state if it receives an
1123 * RST, it is allowed for further
1124 * processing.
1125 */
1126 if ((thflags & TH_RST) == 0 ||
1127 (tp->t_state == TCPS_SYN_SENT) == 0)
1128 goto dropunlock;
1129 }
1130 sig_checked = 1;
1131 }
1132 #endif
1133
1134 /*
1135 * Process the segment and the data it
1136 * contains. tcp_do_segment() consumes
1137 * the mbuf chain and unlocks the inpcb.
1138 */
1139 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1140 iptos, ti_locked);
1141 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1142 return;
1143 }
1144 /*
1145 * Segment flag validation for new connection attempts:
1146 *
1147 * Our (SYN|ACK) response was rejected.
1148 * Check with syncache and remove entry to prevent
1149 * retransmits.
1150 *
1151 * NB: syncache_chkrst does its own logging of failure
1152 * causes.
1153 */
1154 if (thflags & TH_RST) {
1155 syncache_chkrst(&inc, th);
1156 goto dropunlock;
1157 }
1158 /*
1159 * We can't do anything without SYN.
1160 */
1161 if ((thflags & TH_SYN) == 0) {
1162 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1163 log(LOG_DEBUG, "%s; %s: Listen socket: "
1164 "SYN is missing, segment ignored\n",
1165 s, __func__);
1166 TCPSTAT_INC(tcps_badsyn);
1167 goto dropunlock;
1168 }
1169 /*
1170 * (SYN|ACK) is bogus on a listen socket.
1171 */
1172 if (thflags & TH_ACK) {
1173 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1174 log(LOG_DEBUG, "%s; %s: Listen socket: "
1175 "SYN|ACK invalid, segment rejected\n",
1176 s, __func__);
1177 syncache_badack(&inc); /* XXX: Not needed! */
1178 TCPSTAT_INC(tcps_badsyn);
1179 rstreason = BANDLIM_RST_OPENPORT;
1180 goto dropwithreset;
1181 }
1182 /*
1183 * If the drop_synfin option is enabled, drop all
1184 * segments with both the SYN and FIN bits set.
1185 * This prevents e.g. nmap from identifying the
1186 * TCP/IP stack.
1187 * XXX: Poor reasoning. nmap has other methods
1188 * and is constantly refining its stack detection
1189 * strategies.
1190 * XXX: This is a violation of the TCP specification
1191 * and was used by RFC1644.
1192 */
1193 if ((thflags & TH_FIN) && V_drop_synfin) {
1194 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1195 log(LOG_DEBUG, "%s; %s: Listen socket: "
1196 "SYN|FIN segment ignored (based on "
1197 "sysctl setting)\n", s, __func__);
1198 TCPSTAT_INC(tcps_badsyn);
1199 goto dropunlock;
1200 }
1201 /*
1202 * Segment's flags are (SYN) or (SYN|FIN).
1203 *
1204 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1205 * as they do not affect the state of the TCP FSM.
1206 * The data pointed to by TH_URG and th_urp is ignored.
1207 */
1208 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1209 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1210 KASSERT(thflags & (TH_SYN),
1211 ("%s: Listen socket: TH_SYN not set", __func__));
1212 #ifdef INET6
1213 /*
1214 * If deprecated address is forbidden,
1215 * we do not accept SYN to deprecated interface
1216 * address to prevent any new inbound connection from
1217 * getting established.
1218 * When we do not accept SYN, we send a TCP RST,
1219 * with deprecated source address (instead of dropping
1220 * it). We compromise it as it is much better for peer
1221 * to send a RST, and RST will be the final packet
1222 * for the exchange.
1223 *
1224 * If we do not forbid deprecated addresses, we accept
1225 * the SYN packet. RFC2462 does not suggest dropping
1226 * SYN in this case.
1227 * If we decipher RFC2462 5.5.4, it says like this:
1228 * 1. use of deprecated addr with existing
1229 * communication is okay - "SHOULD continue to be
1230 * used"
1231 * 2. use of it with new communication:
1232 * (2a) "SHOULD NOT be used if alternate address
1233 * with sufficient scope is available"
1234 * (2b) nothing mentioned otherwise.
1235 * Here we fall into (2b) case as we have no choice in
1236 * our source address selection - we must obey the peer.
1237 *
1238 * The wording in RFC2462 is confusing, and there are
1239 * multiple description text for deprecated address
1240 * handling - worse, they are not exactly the same.
1241 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1242 */
1243 if (isipv6 && !V_ip6_use_deprecated) {
1244 struct in6_ifaddr *ia6;
1245
1246 ia6 = ip6_getdstifaddr(m);
1247 if (ia6 != NULL &&
1248 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1249 ifa_free(&ia6->ia_ifa);
1250 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1251 log(LOG_DEBUG, "%s; %s: Listen socket: "
1252 "Connection attempt to deprecated "
1253 "IPv6 address rejected\n",
1254 s, __func__);
1255 rstreason = BANDLIM_RST_OPENPORT;
1256 goto dropwithreset;
1257 }
1258 if (ia6)
1259 ifa_free(&ia6->ia_ifa);
1260 }
1261 #endif /* INET6 */
1262 /*
1263 * Basic sanity checks on incoming SYN requests:
1264 * Don't respond if the destination is a link layer
1265 * broadcast according to RFC1122 4.2.3.10, p. 104.
1266 * If it is from this socket it must be forged.
1267 * Don't respond if the source or destination is a
1268 * global or subnet broad- or multicast address.
1269 * Note that it is quite possible to receive unicast
1270 * link-layer packets with a broadcast IP address. Use
1271 * in_broadcast() to find them.
1272 */
1273 if (m->m_flags & (M_BCAST|M_MCAST)) {
1274 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1275 log(LOG_DEBUG, "%s; %s: Listen socket: "
1276 "Connection attempt from broad- or multicast "
1277 "link layer address ignored\n", s, __func__);
1278 goto dropunlock;
1279 }
1280 #ifdef INET6
1281 if (isipv6) {
1282 if (th->th_dport == th->th_sport &&
1283 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1284 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1285 log(LOG_DEBUG, "%s; %s: Listen socket: "
1286 "Connection attempt to/from self "
1287 "ignored\n", s, __func__);
1288 goto dropunlock;
1289 }
1290 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1291 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1292 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1293 log(LOG_DEBUG, "%s; %s: Listen socket: "
1294 "Connection attempt from/to multicast "
1295 "address ignored\n", s, __func__);
1296 goto dropunlock;
1297 }
1298 }
1299 #endif
1300 #if defined(INET) && defined(INET6)
1301 else
1302 #endif
1303 #ifdef INET
1304 {
1305 if (th->th_dport == th->th_sport &&
1306 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1307 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1308 log(LOG_DEBUG, "%s; %s: Listen socket: "
1309 "Connection attempt from/to self "
1310 "ignored\n", s, __func__);
1311 goto dropunlock;
1312 }
1313 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1314 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1315 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1316 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1317 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1318 log(LOG_DEBUG, "%s; %s: Listen socket: "
1319 "Connection attempt from/to broad- "
1320 "or multicast address ignored\n",
1321 s, __func__);
1322 goto dropunlock;
1323 }
1324 }
1325 #endif
1326 /*
1327 * SYN appears to be valid. Create compressed TCP state
1328 * for syncache.
1329 */
1330 #ifdef TCPDEBUG
1331 if (so->so_options & SO_DEBUG)
1332 tcp_trace(TA_INPUT, ostate, tp,
1333 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1334 #endif
1335 tcp_dooptions(&to, optp, optlen, TO_SYN);
1336 syncache_add(&inc, &to, th, inp, &so, m);
1337 /*
1338 * Entry added to syncache and mbuf consumed.
1339 * Everything already unlocked by syncache_add().
1340 */
1341 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1342 return;
1343 }
1344
1345 #ifdef TCP_SIGNATURE
1346 if (sig_checked == 0) {
1347 tcp_dooptions(&to, optp, optlen,
1348 (thflags & TH_SYN) ? TO_SYN : 0);
1349 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to,
1350 th, tp->t_flags)) {
1351
1352 /*
1353 * In SYN_SENT state if it receives an RST, it is
1354 * allowed for further processing.
1355 */
1356 if ((thflags & TH_RST) == 0 ||
1357 (tp->t_state == TCPS_SYN_SENT) == 0)
1358 goto dropunlock;
1359 }
1360 sig_checked = 1;
1361 }
1362 #endif
1363
1364 /*
1365 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1366 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1367 * the inpcb, and unlocks pcbinfo.
1368 */
1369 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1370 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1371 return;
1372
1373 dropwithreset:
1374 if (ti_locked == TI_WLOCKED) {
1375 INP_INFO_WUNLOCK(&V_tcbinfo);
1376 ti_locked = TI_UNLOCKED;
1377 }
1378 #ifdef INVARIANTS
1379 else {
1380 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1381 "ti_locked: %d", __func__, ti_locked));
1382 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1383 }
1384 #endif
1385
1386 if (inp != NULL) {
1387 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1388 INP_WUNLOCK(inp);
1389 } else
1390 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1391 m = NULL; /* mbuf chain got consumed. */
1392 goto drop;
1393
1394 dropunlock:
1395 if (ti_locked == TI_WLOCKED) {
1396 INP_INFO_WUNLOCK(&V_tcbinfo);
1397 ti_locked = TI_UNLOCKED;
1398 }
1399 #ifdef INVARIANTS
1400 else {
1401 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1402 "ti_locked: %d", __func__, ti_locked));
1403 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1404 }
1405 #endif
1406
1407 if (inp != NULL)
1408 INP_WUNLOCK(inp);
1409
1410 drop:
1411 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1412 if (s != NULL)
1413 free(s, M_TCPLOG);
1414 if (m != NULL)
1415 m_freem(m);
1416 }
1417
1418 static void
1419 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1420 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1421 int ti_locked)
1422 {
1423 int thflags, acked, ourfinisacked, needoutput = 0;
1424 int rstreason, todrop, win;
1425 u_long tiwin;
1426 struct tcpopt to;
1427
1428 #ifdef TCPDEBUG
1429 /*
1430 * The size of tcp_saveipgen must be the size of the max ip header,
1431 * now IPv6.
1432 */
1433 u_char tcp_saveipgen[IP6_HDR_LEN];
1434 struct tcphdr tcp_savetcp;
1435 short ostate = 0;
1436 #endif
1437 thflags = th->th_flags;
1438 tp->sackhint.last_sack_ack = 0;
1439
1440 /*
1441 * If this is either a state-changing packet or current state isn't
1442 * established, we require a write lock on tcbinfo. Otherwise, we
1443 * allow either a read lock or a write lock, as we may have acquired
1444 * a write lock due to a race.
1445 *
1446 * Require a global write lock for SYN/FIN/RST segments or
1447 * non-established connections; otherwise accept either a read or
1448 * write lock, as we may have conservatively acquired a write lock in
1449 * certain cases in tcp_input() (is this still true?). Currently we
1450 * will never enter with no lock, so we try to drop it quickly in the
1451 * common pure ack/pure data cases.
1452 */
1453 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1454 tp->t_state != TCPS_ESTABLISHED) {
1455 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for "
1456 "SYN/FIN/RST/!EST", __func__, ti_locked));
1457 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1458 } else {
1459 #ifdef INVARIANTS
1460 if (ti_locked == TI_WLOCKED)
1461 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1462 else {
1463 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1464 "ti_locked: %d", __func__, ti_locked));
1465 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1466 }
1467 #endif
1468 }
1469 INP_WLOCK_ASSERT(tp->t_inpcb);
1470 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1471 __func__));
1472 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1473 __func__));
1474
1475 /*
1476 * Segment received on connection.
1477 * Reset idle time and keep-alive timer.
1478 * XXX: This should be done after segment
1479 * validation to ignore broken/spoofed segs.
1480 */
1481 tp->t_rcvtime = ticks;
1482 if (TCPS_HAVEESTABLISHED(tp->t_state))
1483 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
1484
1485 /*
1486 * Unscale the window into a 32-bit value.
1487 * For the SYN_SENT state the scale is zero.
1488 */
1489 tiwin = th->th_win << tp->snd_scale;
1490
1491 /*
1492 * TCP ECN processing.
1493 */
1494 if (tp->t_flags & TF_ECN_PERMIT) {
1495 if (thflags & TH_CWR)
1496 tp->t_flags &= ~TF_ECN_SND_ECE;
1497 switch (iptos & IPTOS_ECN_MASK) {
1498 case IPTOS_ECN_CE:
1499 tp->t_flags |= TF_ECN_SND_ECE;
1500 TCPSTAT_INC(tcps_ecn_ce);
1501 break;
1502 case IPTOS_ECN_ECT0:
1503 TCPSTAT_INC(tcps_ecn_ect0);
1504 break;
1505 case IPTOS_ECN_ECT1:
1506 TCPSTAT_INC(tcps_ecn_ect1);
1507 break;
1508 }
1509 /* Congestion experienced. */
1510 if (thflags & TH_ECE) {
1511 cc_cong_signal(tp, th, CC_ECN);
1512 }
1513 }
1514
1515 /*
1516 * Parse options on any incoming segment.
1517 */
1518 tcp_dooptions(&to, (u_char *)(th + 1),
1519 (th->th_off << 2) - sizeof(struct tcphdr),
1520 (thflags & TH_SYN) ? TO_SYN : 0);
1521
1522 /*
1523 * If echoed timestamp is later than the current time,
1524 * fall back to non RFC1323 RTT calculation. Normalize
1525 * timestamp if syncookies were used when this connection
1526 * was established.
1527 */
1528 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1529 to.to_tsecr -= tp->ts_offset;
1530 if (TSTMP_GT(to.to_tsecr, ticks))
1531 to.to_tsecr = 0;
1532 }
1533
1534 /*
1535 * Process options only when we get SYN/ACK back. The SYN case
1536 * for incoming connections is handled in tcp_syncache.
1537 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1538 * or <SYN,ACK>) segment itself is never scaled.
1539 * XXX this is traditional behavior, may need to be cleaned up.
1540 */
1541 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1542 if ((to.to_flags & TOF_SCALE) &&
1543 (tp->t_flags & TF_REQ_SCALE)) {
1544 tp->t_flags |= TF_RCVD_SCALE;
1545 tp->snd_scale = to.to_wscale;
1546 }
1547 /*
1548 * Initial send window. It will be updated with
1549 * the next incoming segment to the scaled value.
1550 */
1551 tp->snd_wnd = th->th_win;
1552 if (to.to_flags & TOF_TS) {
1553 tp->t_flags |= TF_RCVD_TSTMP;
1554 tp->ts_recent = to.to_tsval;
1555 tp->ts_recent_age = ticks;
1556 }
1557 if (to.to_flags & TOF_MSS)
1558 tcp_mss(tp, to.to_mss);
1559 if ((tp->t_flags & TF_SACK_PERMIT) &&
1560 (to.to_flags & TOF_SACKPERM) == 0)
1561 tp->t_flags &= ~TF_SACK_PERMIT;
1562 }
1563
1564 /*
1565 * Header prediction: check for the two common cases
1566 * of a uni-directional data xfer. If the packet has
1567 * no control flags, is in-sequence, the window didn't
1568 * change and we're not retransmitting, it's a
1569 * candidate. If the length is zero and the ack moved
1570 * forward, we're the sender side of the xfer. Just
1571 * free the data acked & wake any higher level process
1572 * that was blocked waiting for space. If the length
1573 * is non-zero and the ack didn't move, we're the
1574 * receiver side. If we're getting packets in-order
1575 * (the reassembly queue is empty), add the data to
1576 * the socket buffer and note that we need a delayed ack.
1577 * Make sure that the hidden state-flags are also off.
1578 * Since we check for TCPS_ESTABLISHED first, it can only
1579 * be TH_NEEDSYN.
1580 */
1581 if (tp->t_state == TCPS_ESTABLISHED &&
1582 th->th_seq == tp->rcv_nxt &&
1583 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1584 tp->snd_nxt == tp->snd_max &&
1585 tiwin && tiwin == tp->snd_wnd &&
1586 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1587 LIST_EMPTY(&tp->t_segq) &&
1588 ((to.to_flags & TOF_TS) == 0 ||
1589 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1590
1591 /*
1592 * If last ACK falls within this segment's sequence numbers,
1593 * record the timestamp.
1594 * NOTE that the test is modified according to the latest
1595 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1596 */
1597 if ((to.to_flags & TOF_TS) != 0 &&
1598 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1599 tp->ts_recent_age = ticks;
1600 tp->ts_recent = to.to_tsval;
1601 }
1602
1603 if (tlen == 0) {
1604 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1605 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1606 !IN_RECOVERY(tp->t_flags) &&
1607 (to.to_flags & TOF_SACK) == 0 &&
1608 TAILQ_EMPTY(&tp->snd_holes)) {
1609 /*
1610 * This is a pure ack for outstanding data.
1611 */
1612 if (ti_locked == TI_WLOCKED)
1613 INP_INFO_WUNLOCK(&V_tcbinfo);
1614 ti_locked = TI_UNLOCKED;
1615
1616 TCPSTAT_INC(tcps_predack);
1617
1618 /*
1619 * "bad retransmit" recovery.
1620 */
1621 if (tp->t_rxtshift == 1 &&
1622 tp->t_flags & TF_PREVVALID &&
1623 (int)(ticks - tp->t_badrxtwin) < 0) {
1624 cc_cong_signal(tp, th, CC_RTO_ERR);
1625 }
1626
1627 /*
1628 * Recalculate the transmit timer / rtt.
1629 *
1630 * Some boxes send broken timestamp replies
1631 * during the SYN+ACK phase, ignore
1632 * timestamps of 0 or we could calculate a
1633 * huge RTT and blow up the retransmit timer.
1634 */
1635 if ((to.to_flags & TOF_TS) != 0 &&
1636 to.to_tsecr) {
1637 if (!tp->t_rttlow ||
1638 tp->t_rttlow > ticks - to.to_tsecr)
1639 tp->t_rttlow = ticks - to.to_tsecr;
1640 tcp_xmit_timer(tp,
1641 ticks - to.to_tsecr + 1);
1642 } else if (tp->t_rtttime &&
1643 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1644 if (!tp->t_rttlow ||
1645 tp->t_rttlow > ticks - tp->t_rtttime)
1646 tp->t_rttlow = ticks - tp->t_rtttime;
1647 tcp_xmit_timer(tp,
1648 ticks - tp->t_rtttime);
1649 }
1650 acked = BYTES_THIS_ACK(tp, th);
1651
1652 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1653 hhook_run_tcp_est_in(tp, th, &to);
1654
1655 TCPSTAT_INC(tcps_rcvackpack);
1656 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1657 sbdrop(&so->so_snd, acked);
1658 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1659 SEQ_LEQ(th->th_ack, tp->snd_recover))
1660 tp->snd_recover = th->th_ack - 1;
1661
1662 /*
1663 * Let the congestion control algorithm update
1664 * congestion control related information. This
1665 * typically means increasing the congestion
1666 * window.
1667 */
1668 cc_ack_received(tp, th, CC_ACK);
1669
1670 tp->snd_una = th->th_ack;
1671 /*
1672 * Pull snd_wl2 up to prevent seq wrap relative
1673 * to th_ack.
1674 */
1675 tp->snd_wl2 = th->th_ack;
1676 tp->t_dupacks = 0;
1677 m_freem(m);
1678 ND6_HINT(tp); /* Some progress has been made. */
1679
1680 /*
1681 * If all outstanding data are acked, stop
1682 * retransmit timer, otherwise restart timer
1683 * using current (possibly backed-off) value.
1684 * If process is waiting for space,
1685 * wakeup/selwakeup/signal. If data
1686 * are ready to send, let tcp_output
1687 * decide between more output or persist.
1688 */
1689 #ifdef TCPDEBUG
1690 if (so->so_options & SO_DEBUG)
1691 tcp_trace(TA_INPUT, ostate, tp,
1692 (void *)tcp_saveipgen,
1693 &tcp_savetcp, 0);
1694 #endif
1695 if (tp->snd_una == tp->snd_max)
1696 tcp_timer_activate(tp, TT_REXMT, 0);
1697 else if (!tcp_timer_active(tp, TT_PERSIST))
1698 tcp_timer_activate(tp, TT_REXMT,
1699 tp->t_rxtcur);
1700 sowwakeup(so);
1701 if (so->so_snd.sb_cc)
1702 (void) tcp_output(tp);
1703 goto check_delack;
1704 }
1705 } else if (th->th_ack == tp->snd_una &&
1706 tlen <= sbspace(&so->so_rcv)) {
1707 int newsize = 0; /* automatic sockbuf scaling */
1708
1709 /*
1710 * This is a pure, in-sequence data packet with
1711 * nothing on the reassembly queue and we have enough
1712 * buffer space to take it.
1713 */
1714 if (ti_locked == TI_WLOCKED)
1715 INP_INFO_WUNLOCK(&V_tcbinfo);
1716 ti_locked = TI_UNLOCKED;
1717
1718 /* Clean receiver SACK report if present */
1719 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1720 tcp_clean_sackreport(tp);
1721 TCPSTAT_INC(tcps_preddat);
1722 tp->rcv_nxt += tlen;
1723 /*
1724 * Pull snd_wl1 up to prevent seq wrap relative to
1725 * th_seq.
1726 */
1727 tp->snd_wl1 = th->th_seq;
1728 /*
1729 * Pull rcv_up up to prevent seq wrap relative to
1730 * rcv_nxt.
1731 */
1732 tp->rcv_up = tp->rcv_nxt;
1733 TCPSTAT_INC(tcps_rcvpack);
1734 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1735 ND6_HINT(tp); /* Some progress has been made */
1736 #ifdef TCPDEBUG
1737 if (so->so_options & SO_DEBUG)
1738 tcp_trace(TA_INPUT, ostate, tp,
1739 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1740 #endif
1741 /*
1742 * Automatic sizing of receive socket buffer. Often the send
1743 * buffer size is not optimally adjusted to the actual network
1744 * conditions at hand (delay bandwidth product). Setting the
1745 * buffer size too small limits throughput on links with high
1746 * bandwidth and high delay (eg. trans-continental/oceanic links).
1747 *
1748 * On the receive side the socket buffer memory is only rarely
1749 * used to any significant extent. This allows us to be much
1750 * more aggressive in scaling the receive socket buffer. For
1751 * the case that the buffer space is actually used to a large
1752 * extent and we run out of kernel memory we can simply drop
1753 * the new segments; TCP on the sender will just retransmit it
1754 * later. Setting the buffer size too big may only consume too
1755 * much kernel memory if the application doesn't read() from
1756 * the socket or packet loss or reordering makes use of the
1757 * reassembly queue.
1758 *
1759 * The criteria to step up the receive buffer one notch are:
1760 * 1. the number of bytes received during the time it takes
1761 * one timestamp to be reflected back to us (the RTT);
1762 * 2. received bytes per RTT is within seven eighth of the
1763 * current socket buffer size;
1764 * 3. receive buffer size has not hit maximal automatic size;
1765 *
1766 * This algorithm does one step per RTT at most and only if
1767 * we receive a bulk stream w/o packet losses or reorderings.
1768 * Shrinking the buffer during idle times is not necessary as
1769 * it doesn't consume any memory when idle.
1770 *
1771 * TODO: Only step up if the application is actually serving
1772 * the buffer to better manage the socket buffer resources.
1773 */
1774 if (V_tcp_do_autorcvbuf &&
1775 to.to_tsecr &&
1776 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1777 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) &&
1778 to.to_tsecr - tp->rfbuf_ts < hz) {
1779 if (tp->rfbuf_cnt >
1780 (so->so_rcv.sb_hiwat / 8 * 7) &&
1781 so->so_rcv.sb_hiwat <
1782 V_tcp_autorcvbuf_max) {
1783 newsize =
1784 min(so->so_rcv.sb_hiwat +
1785 V_tcp_autorcvbuf_inc,
1786 V_tcp_autorcvbuf_max);
1787 }
1788 /* Start over with next RTT. */
1789 tp->rfbuf_ts = 0;
1790 tp->rfbuf_cnt = 0;
1791 } else
1792 tp->rfbuf_cnt += tlen; /* add up */
1793 }
1794
1795 /* Add data to socket buffer. */
1796 SOCKBUF_LOCK(&so->so_rcv);
1797 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1798 m_freem(m);
1799 } else {
1800 /*
1801 * Set new socket buffer size.
1802 * Give up when limit is reached.
1803 */
1804 if (newsize)
1805 if (!sbreserve_locked(&so->so_rcv,
1806 newsize, so, NULL))
1807 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1808 m_adj(m, drop_hdrlen); /* delayed header drop */
1809 sbappendstream_locked(&so->so_rcv, m);
1810 }
1811 /* NB: sorwakeup_locked() does an implicit unlock. */
1812 sorwakeup_locked(so);
1813 if (DELAY_ACK(tp)) {
1814 tp->t_flags |= TF_DELACK;
1815 } else {
1816 tp->t_flags |= TF_ACKNOW;
1817 tcp_output(tp);
1818 }
1819 goto check_delack;
1820 }
1821 }
1822
1823 /*
1824 * Calculate amount of space in receive window,
1825 * and then do TCP input processing.
1826 * Receive window is amount of space in rcv queue,
1827 * but not less than advertised window.
1828 */
1829 win = sbspace(&so->so_rcv);
1830 if (win < 0)
1831 win = 0;
1832 KASSERT(SEQ_GEQ(tp->rcv_adv, tp->rcv_nxt),
1833 ("tcp_input negative window: tp %p rcv_nxt %u rcv_adv %u", tp,
1834 tp->rcv_nxt, tp->rcv_adv));
1835 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1836
1837 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1838 tp->rfbuf_ts = 0;
1839 tp->rfbuf_cnt = 0;
1840
1841 switch (tp->t_state) {
1842
1843 /*
1844 * If the state is SYN_RECEIVED:
1845 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1846 */
1847 case TCPS_SYN_RECEIVED:
1848 if ((thflags & TH_ACK) &&
1849 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1850 SEQ_GT(th->th_ack, tp->snd_max))) {
1851 rstreason = BANDLIM_RST_OPENPORT;
1852 goto dropwithreset;
1853 }
1854 break;
1855
1856 /*
1857 * If the state is SYN_SENT:
1858 * if seg contains an ACK, but not for our SYN, drop the input.
1859 * if seg contains a RST, then drop the connection.
1860 * if seg does not contain SYN, then drop it.
1861 * Otherwise this is an acceptable SYN segment
1862 * initialize tp->rcv_nxt and tp->irs
1863 * if seg contains ack then advance tp->snd_una
1864 * if seg contains an ECE and ECN support is enabled, the stream
1865 * is ECN capable.
1866 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1867 * arrange for segment to be acked (eventually)
1868 * continue processing rest of data/controls, beginning with URG
1869 */
1870 case TCPS_SYN_SENT:
1871 if ((thflags & TH_ACK) &&
1872 (SEQ_LEQ(th->th_ack, tp->iss) ||
1873 SEQ_GT(th->th_ack, tp->snd_max))) {
1874 rstreason = BANDLIM_UNLIMITED;
1875 goto dropwithreset;
1876 }
1877 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST))
1878 tp = tcp_drop(tp, ECONNREFUSED);
1879 if (thflags & TH_RST)
1880 goto drop;
1881 if (!(thflags & TH_SYN))
1882 goto drop;
1883
1884 tp->irs = th->th_seq;
1885 tcp_rcvseqinit(tp);
1886 if (thflags & TH_ACK) {
1887 TCPSTAT_INC(tcps_connects);
1888 soisconnected(so);
1889 #ifdef MAC
1890 mac_socketpeer_set_from_mbuf(m, so);
1891 #endif
1892 /* Do window scaling on this connection? */
1893 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1894 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1895 tp->rcv_scale = tp->request_r_scale;
1896 }
1897 tp->rcv_adv += imin(tp->rcv_wnd,
1898 TCP_MAXWIN << tp->rcv_scale);
1899 tp->snd_una++; /* SYN is acked */
1900 /*
1901 * If there's data, delay ACK; if there's also a FIN
1902 * ACKNOW will be turned on later.
1903 */
1904 if (DELAY_ACK(tp) && tlen != 0)
1905 tcp_timer_activate(tp, TT_DELACK,
1906 tcp_delacktime);
1907 else
1908 tp->t_flags |= TF_ACKNOW;
1909
1910 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
1911 tp->t_flags |= TF_ECN_PERMIT;
1912 TCPSTAT_INC(tcps_ecn_shs);
1913 }
1914
1915 /*
1916 * Received <SYN,ACK> in SYN_SENT[*] state.
1917 * Transitions:
1918 * SYN_SENT --> ESTABLISHED
1919 * SYN_SENT* --> FIN_WAIT_1
1920 */
1921 tp->t_starttime = ticks;
1922 if (tp->t_flags & TF_NEEDFIN) {
1923 tp->t_state = TCPS_FIN_WAIT_1;
1924 tp->t_flags &= ~TF_NEEDFIN;
1925 thflags &= ~TH_SYN;
1926 } else {
1927 tp->t_state = TCPS_ESTABLISHED;
1928 cc_conn_init(tp);
1929 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
1930 }
1931 } else {
1932 /*
1933 * Received initial SYN in SYN-SENT[*] state =>
1934 * simultaneous open. If segment contains CC option
1935 * and there is a cached CC, apply TAO test.
1936 * If it succeeds, connection is * half-synchronized.
1937 * Otherwise, do 3-way handshake:
1938 * SYN-SENT -> SYN-RECEIVED
1939 * SYN-SENT* -> SYN-RECEIVED*
1940 * If there was no CC option, clear cached CC value.
1941 */
1942 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1943 tcp_timer_activate(tp, TT_REXMT, 0);
1944 tp->t_state = TCPS_SYN_RECEIVED;
1945 }
1946
1947 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: "
1948 "ti_locked %d", __func__, ti_locked));
1949 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1950 INP_WLOCK_ASSERT(tp->t_inpcb);
1951
1952 /*
1953 * Advance th->th_seq to correspond to first data byte.
1954 * If data, trim to stay within window,
1955 * dropping FIN if necessary.
1956 */
1957 th->th_seq++;
1958 if (tlen > tp->rcv_wnd) {
1959 todrop = tlen - tp->rcv_wnd;
1960 m_adj(m, -todrop);
1961 tlen = tp->rcv_wnd;
1962 thflags &= ~TH_FIN;
1963 TCPSTAT_INC(tcps_rcvpackafterwin);
1964 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
1965 }
1966 tp->snd_wl1 = th->th_seq - 1;
1967 tp->rcv_up = th->th_seq;
1968 /*
1969 * Client side of transaction: already sent SYN and data.
1970 * If the remote host used T/TCP to validate the SYN,
1971 * our data will be ACK'd; if so, enter normal data segment
1972 * processing in the middle of step 5, ack processing.
1973 * Otherwise, goto step 6.
1974 */
1975 if (thflags & TH_ACK)
1976 goto process_ACK;
1977
1978 goto step6;
1979
1980 /*
1981 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1982 * do normal processing.
1983 *
1984 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
1985 */
1986 case TCPS_LAST_ACK:
1987 case TCPS_CLOSING:
1988 break; /* continue normal processing */
1989 }
1990
1991 /*
1992 * States other than LISTEN or SYN_SENT.
1993 * First check the RST flag and sequence number since reset segments
1994 * are exempt from the timestamp and connection count tests. This
1995 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1996 * below which allowed reset segments in half the sequence space
1997 * to fall though and be processed (which gives forged reset
1998 * segments with a random sequence number a 50 percent chance of
1999 * killing a connection).
2000 * Then check timestamp, if present.
2001 * Then check the connection count, if present.
2002 * Then check that at least some bytes of segment are within
2003 * receive window. If segment begins before rcv_nxt,
2004 * drop leading data (and SYN); if nothing left, just ack.
2005 *
2006 *
2007 * If the RST bit is set, check the sequence number to see
2008 * if this is a valid reset segment.
2009 * RFC 793 page 37:
2010 * In all states except SYN-SENT, all reset (RST) segments
2011 * are validated by checking their SEQ-fields. A reset is
2012 * valid if its sequence number is in the window.
2013 * Note: this does not take into account delayed ACKs, so
2014 * we should test against last_ack_sent instead of rcv_nxt.
2015 * The sequence number in the reset segment is normally an
2016 * echo of our outgoing acknowlegement numbers, but some hosts
2017 * send a reset with the sequence number at the rightmost edge
2018 * of our receive window, and we have to handle this case.
2019 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
2020 * that brute force RST attacks are possible. To combat this,
2021 * we use a much stricter check while in the ESTABLISHED state,
2022 * only accepting RSTs where the sequence number is equal to
2023 * last_ack_sent. In all other states (the states in which a
2024 * RST is more likely), the more permissive check is used.
2025 * If we have multiple segments in flight, the initial reset
2026 * segment sequence numbers will be to the left of last_ack_sent,
2027 * but they will eventually catch up.
2028 * In any case, it never made sense to trim reset segments to
2029 * fit the receive window since RFC 1122 says:
2030 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
2031 *
2032 * A TCP SHOULD allow a received RST segment to include data.
2033 *
2034 * DISCUSSION
2035 * It has been suggested that a RST segment could contain
2036 * ASCII text that encoded and explained the cause of the
2037 * RST. No standard has yet been established for such
2038 * data.
2039 *
2040 * If the reset segment passes the sequence number test examine
2041 * the state:
2042 * SYN_RECEIVED STATE:
2043 * If passive open, return to LISTEN state.
2044 * If active open, inform user that connection was refused.
2045 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
2046 * Inform user that connection was reset, and close tcb.
2047 * CLOSING, LAST_ACK STATES:
2048 * Close the tcb.
2049 * TIME_WAIT STATE:
2050 * Drop the segment - see Stevens, vol. 2, p. 964 and
2051 * RFC 1337.
2052 */
2053 if (thflags & TH_RST) {
2054 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2055 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2056 switch (tp->t_state) {
2057
2058 case TCPS_SYN_RECEIVED:
2059 so->so_error = ECONNREFUSED;
2060 goto close;
2061
2062 case TCPS_ESTABLISHED:
2063 if (V_tcp_insecure_rst == 0 &&
2064 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
2065 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
2066 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2067 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
2068 TCPSTAT_INC(tcps_badrst);
2069 goto drop;
2070 }
2071 /* FALLTHROUGH */
2072 case TCPS_FIN_WAIT_1:
2073 case TCPS_FIN_WAIT_2:
2074 case TCPS_CLOSE_WAIT:
2075 so->so_error = ECONNRESET;
2076 close:
2077 KASSERT(ti_locked == TI_WLOCKED,
2078 ("tcp_do_segment: TH_RST 1 ti_locked %d",
2079 ti_locked));
2080 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2081
2082 tp->t_state = TCPS_CLOSED;
2083 TCPSTAT_INC(tcps_drops);
2084 tp = tcp_close(tp);
2085 break;
2086
2087 case TCPS_CLOSING:
2088 case TCPS_LAST_ACK:
2089 KASSERT(ti_locked == TI_WLOCKED,
2090 ("tcp_do_segment: TH_RST 2 ti_locked %d",
2091 ti_locked));
2092 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2093
2094 tp = tcp_close(tp);
2095 break;
2096 }
2097 }
2098 goto drop;
2099 }
2100
2101 /*
2102 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2103 * and it's less than ts_recent, drop it.
2104 */
2105 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2106 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2107
2108 /* Check to see if ts_recent is over 24 days old. */
2109 if (ticks - tp->ts_recent_age > TCP_PAWS_IDLE) {
2110 /*
2111 * Invalidate ts_recent. If this segment updates
2112 * ts_recent, the age will be reset later and ts_recent
2113 * will get a valid value. If it does not, setting
2114 * ts_recent to zero will at least satisfy the
2115 * requirement that zero be placed in the timestamp
2116 * echo reply when ts_recent isn't valid. The
2117 * age isn't reset until we get a valid ts_recent
2118 * because we don't want out-of-order segments to be
2119 * dropped when ts_recent is old.
2120 */
2121 tp->ts_recent = 0;
2122 } else {
2123 TCPSTAT_INC(tcps_rcvduppack);
2124 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2125 TCPSTAT_INC(tcps_pawsdrop);
2126 if (tlen)
2127 goto dropafterack;
2128 goto drop;
2129 }
2130 }
2131
2132 /*
2133 * In the SYN-RECEIVED state, validate that the packet belongs to
2134 * this connection before trimming the data to fit the receive
2135 * window. Check the sequence number versus IRS since we know
2136 * the sequence numbers haven't wrapped. This is a partial fix
2137 * for the "LAND" DoS attack.
2138 */
2139 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2140 rstreason = BANDLIM_RST_OPENPORT;
2141 goto dropwithreset;
2142 }
2143
2144 todrop = tp->rcv_nxt - th->th_seq;
2145 if (todrop > 0) {
2146 /*
2147 * If this is a duplicate SYN for our current connection,
2148 * advance over it and pretend and it's not a SYN.
2149 */
2150 if (thflags & TH_SYN && th->th_seq == tp->irs) {
2151 thflags &= ~TH_SYN;
2152 th->th_seq++;
2153 if (th->th_urp > 1)
2154 th->th_urp--;
2155 else
2156 thflags &= ~TH_URG;
2157 todrop--;
2158 }
2159 /*
2160 * Following if statement from Stevens, vol. 2, p. 960.
2161 */
2162 if (todrop > tlen
2163 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2164 /*
2165 * Any valid FIN must be to the left of the window.
2166 * At this point the FIN must be a duplicate or out
2167 * of sequence; drop it.
2168 */
2169 thflags &= ~TH_FIN;
2170
2171 /*
2172 * Send an ACK to resynchronize and drop any data.
2173 * But keep on processing for RST or ACK.
2174 */
2175 tp->t_flags |= TF_ACKNOW;
2176 todrop = tlen;
2177 TCPSTAT_INC(tcps_rcvduppack);
2178 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2179 } else {
2180 TCPSTAT_INC(tcps_rcvpartduppack);
2181 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2182 }
2183 drop_hdrlen += todrop; /* drop from the top afterwards */
2184 th->th_seq += todrop;
2185 tlen -= todrop;
2186 if (th->th_urp > todrop)
2187 th->th_urp -= todrop;
2188 else {
2189 thflags &= ~TH_URG;
2190 th->th_urp = 0;
2191 }
2192 }
2193
2194 /*
2195 * If new data are received on a connection after the
2196 * user processes are gone, then RST the other end.
2197 */
2198 if ((so->so_state & SS_NOFDREF) &&
2199 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2200 char *s;
2201
2202 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && "
2203 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2204 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2205
2206 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
2207 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket "
2208 "was closed, sending RST and removing tcpcb\n",
2209 s, __func__, tcpstates[tp->t_state], tlen);
2210 free(s, M_TCPLOG);
2211 }
2212 tp = tcp_close(tp);
2213 TCPSTAT_INC(tcps_rcvafterclose);
2214 rstreason = BANDLIM_UNLIMITED;
2215 goto dropwithreset;
2216 }
2217
2218 /*
2219 * If segment ends after window, drop trailing data
2220 * (and PUSH and FIN); if nothing left, just ACK.
2221 */
2222 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2223 if (todrop > 0) {
2224 TCPSTAT_INC(tcps_rcvpackafterwin);
2225 if (todrop >= tlen) {
2226 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2227 /*
2228 * If window is closed can only take segments at
2229 * window edge, and have to drop data and PUSH from
2230 * incoming segments. Continue processing, but
2231 * remember to ack. Otherwise, drop segment
2232 * and ack.
2233 */
2234 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2235 tp->t_flags |= TF_ACKNOW;
2236 TCPSTAT_INC(tcps_rcvwinprobe);
2237 } else
2238 goto dropafterack;
2239 } else
2240 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2241 m_adj(m, -todrop);
2242 tlen -= todrop;
2243 thflags &= ~(TH_PUSH|TH_FIN);
2244 }
2245
2246 /*
2247 * If last ACK falls within this segment's sequence numbers,
2248 * record its timestamp.
2249 * NOTE:
2250 * 1) That the test incorporates suggestions from the latest
2251 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2252 * 2) That updating only on newer timestamps interferes with
2253 * our earlier PAWS tests, so this check should be solely
2254 * predicated on the sequence space of this segment.
2255 * 3) That we modify the segment boundary check to be
2256 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2257 * instead of RFC1323's
2258 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2259 * This modified check allows us to overcome RFC1323's
2260 * limitations as described in Stevens TCP/IP Illustrated
2261 * Vol. 2 p.869. In such cases, we can still calculate the
2262 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2263 */
2264 if ((to.to_flags & TOF_TS) != 0 &&
2265 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2266 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2267 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2268 tp->ts_recent_age = ticks;
2269 tp->ts_recent = to.to_tsval;
2270 }
2271
2272 /*
2273 * If a SYN is in the window, then this is an
2274 * error and we send an RST and drop the connection.
2275 */
2276 if (thflags & TH_SYN) {
2277 KASSERT(ti_locked == TI_WLOCKED,
2278 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2279 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2280
2281 tp = tcp_drop(tp, ECONNRESET);
2282 rstreason = BANDLIM_UNLIMITED;
2283 goto drop;
2284 }
2285
2286 /*
2287 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2288 * flag is on (half-synchronized state), then queue data for
2289 * later processing; else drop segment and return.
2290 */
2291 if ((thflags & TH_ACK) == 0) {
2292 if (tp->t_state == TCPS_SYN_RECEIVED ||
2293 (tp->t_flags & TF_NEEDSYN))
2294 goto step6;
2295 else if (tp->t_flags & TF_ACKNOW)
2296 goto dropafterack;
2297 else
2298 goto drop;
2299 }
2300
2301 /*
2302 * Ack processing.
2303 */
2304 switch (tp->t_state) {
2305
2306 /*
2307 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2308 * ESTABLISHED state and continue processing.
2309 * The ACK was checked above.
2310 */
2311 case TCPS_SYN_RECEIVED:
2312
2313 TCPSTAT_INC(tcps_connects);
2314 soisconnected(so);
2315 /* Do window scaling? */
2316 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2317 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2318 tp->rcv_scale = tp->request_r_scale;
2319 tp->snd_wnd = tiwin;
2320 }
2321 /*
2322 * Make transitions:
2323 * SYN-RECEIVED -> ESTABLISHED
2324 * SYN-RECEIVED* -> FIN-WAIT-1
2325 */
2326 tp->t_starttime = ticks;
2327 if (tp->t_flags & TF_NEEDFIN) {
2328 tp->t_state = TCPS_FIN_WAIT_1;
2329 tp->t_flags &= ~TF_NEEDFIN;
2330 } else {
2331 tp->t_state = TCPS_ESTABLISHED;
2332 cc_conn_init(tp);
2333 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
2334 }
2335 /*
2336 * If segment contains data or ACK, will call tcp_reass()
2337 * later; if not, do so now to pass queued data to user.
2338 */
2339 if (tlen == 0 && (thflags & TH_FIN) == 0)
2340 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2341 (struct mbuf *)0);
2342 tp->snd_wl1 = th->th_seq - 1;
2343 /* FALLTHROUGH */
2344
2345 /*
2346 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2347 * ACKs. If the ack is in the range
2348 * tp->snd_una < th->th_ack <= tp->snd_max
2349 * then advance tp->snd_una to th->th_ack and drop
2350 * data from the retransmission queue. If this ACK reflects
2351 * more up to date window information we update our window information.
2352 */
2353 case TCPS_ESTABLISHED:
2354 case TCPS_FIN_WAIT_1:
2355 case TCPS_FIN_WAIT_2:
2356 case TCPS_CLOSE_WAIT:
2357 case TCPS_CLOSING:
2358 case TCPS_LAST_ACK:
2359 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2360 TCPSTAT_INC(tcps_rcvacktoomuch);
2361 goto dropafterack;
2362 }
2363 if ((tp->t_flags & TF_SACK_PERMIT) &&
2364 ((to.to_flags & TOF_SACK) ||
2365 !TAILQ_EMPTY(&tp->snd_holes)))
2366 tcp_sack_doack(tp, &to, th->th_ack);
2367
2368 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2369 hhook_run_tcp_est_in(tp, th, &to);
2370
2371 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2372 if (tlen == 0 && tiwin == tp->snd_wnd) {
2373 TCPSTAT_INC(tcps_rcvdupack);
2374 /*
2375 * If we have outstanding data (other than
2376 * a window probe), this is a completely
2377 * duplicate ack (ie, window info didn't
2378 * change), the ack is the biggest we've
2379 * seen and we've seen exactly our rexmt
2380 * threshhold of them, assume a packet
2381 * has been dropped and retransmit it.
2382 * Kludge snd_nxt & the congestion
2383 * window so we send only this one
2384 * packet.
2385 *
2386 * We know we're losing at the current
2387 * window size so do congestion avoidance
2388 * (set ssthresh to half the current window
2389 * and pull our congestion window back to
2390 * the new ssthresh).
2391 *
2392 * Dup acks mean that packets have left the
2393 * network (they're now cached at the receiver)
2394 * so bump cwnd by the amount in the receiver
2395 * to keep a constant cwnd packets in the
2396 * network.
2397 *
2398 * When using TCP ECN, notify the peer that
2399 * we reduced the cwnd.
2400 */
2401 if (!tcp_timer_active(tp, TT_REXMT) ||
2402 th->th_ack != tp->snd_una)
2403 tp->t_dupacks = 0;
2404 else if (++tp->t_dupacks > tcprexmtthresh ||
2405 IN_FASTRECOVERY(tp->t_flags)) {
2406 cc_ack_received(tp, th, CC_DUPACK);
2407 if ((tp->t_flags & TF_SACK_PERMIT) &&
2408 IN_FASTRECOVERY(tp->t_flags)) {
2409 int awnd;
2410
2411 /*
2412 * Compute the amount of data in flight first.
2413 * We can inject new data into the pipe iff
2414 * we have less than 1/2 the original window's
2415 * worth of data in flight.
2416 */
2417 awnd = (tp->snd_nxt - tp->snd_fack) +
2418 tp->sackhint.sack_bytes_rexmit;
2419 if (awnd < tp->snd_ssthresh) {
2420 tp->snd_cwnd += tp->t_maxseg;
2421 if (tp->snd_cwnd > tp->snd_ssthresh)
2422 tp->snd_cwnd = tp->snd_ssthresh;
2423 }
2424 } else
2425 tp->snd_cwnd += tp->t_maxseg;
2426 (void) tcp_output(tp);
2427 goto drop;
2428 } else if (tp->t_dupacks == tcprexmtthresh) {
2429 tcp_seq onxt = tp->snd_nxt;
2430
2431 /*
2432 * If we're doing sack, check to
2433 * see if we're already in sack
2434 * recovery. If we're not doing sack,
2435 * check to see if we're in newreno
2436 * recovery.
2437 */
2438 if (tp->t_flags & TF_SACK_PERMIT) {
2439 if (IN_FASTRECOVERY(tp->t_flags)) {
2440 tp->t_dupacks = 0;
2441 break;
2442 }
2443 } else {
2444 if (SEQ_LEQ(th->th_ack,
2445 tp->snd_recover)) {
2446 tp->t_dupacks = 0;
2447 break;
2448 }
2449 }
2450 /* Congestion signal before ack. */
2451 cc_cong_signal(tp, th, CC_NDUPACK);
2452 cc_ack_received(tp, th, CC_DUPACK);
2453 tcp_timer_activate(tp, TT_REXMT, 0);
2454 tp->t_rtttime = 0;
2455 if (tp->t_flags & TF_SACK_PERMIT) {
2456 TCPSTAT_INC(
2457 tcps_sack_recovery_episode);
2458 tp->sack_newdata = tp->snd_nxt;
2459 tp->snd_cwnd = tp->t_maxseg;
2460 (void) tcp_output(tp);
2461 goto drop;
2462 }
2463 tp->snd_nxt = th->th_ack;
2464 tp->snd_cwnd = tp->t_maxseg;
2465 (void) tcp_output(tp);
2466 KASSERT(tp->snd_limited <= 2,
2467 ("%s: tp->snd_limited too big",
2468 __func__));
2469 tp->snd_cwnd = tp->snd_ssthresh +
2470 tp->t_maxseg *
2471 (tp->t_dupacks - tp->snd_limited);
2472 if (SEQ_GT(onxt, tp->snd_nxt))
2473 tp->snd_nxt = onxt;
2474 goto drop;
2475 } else if (V_tcp_do_rfc3042) {
2476 cc_ack_received(tp, th, CC_DUPACK);
2477 u_long oldcwnd = tp->snd_cwnd;
2478 tcp_seq oldsndmax = tp->snd_max;
2479 u_int sent;
2480
2481 KASSERT(tp->t_dupacks == 1 ||
2482 tp->t_dupacks == 2,
2483 ("%s: dupacks not 1 or 2",
2484 __func__));
2485 if (tp->t_dupacks == 1)
2486 tp->snd_limited = 0;
2487 tp->snd_cwnd =
2488 (tp->snd_nxt - tp->snd_una) +
2489 (tp->t_dupacks - tp->snd_limited) *
2490 tp->t_maxseg;
2491 (void) tcp_output(tp);
2492 sent = tp->snd_max - oldsndmax;
2493 if (sent > tp->t_maxseg) {
2494 KASSERT((tp->t_dupacks == 2 &&
2495 tp->snd_limited == 0) ||
2496 (sent == tp->t_maxseg + 1 &&
2497 tp->t_flags & TF_SENTFIN),
2498 ("%s: sent too much",
2499 __func__));
2500 tp->snd_limited = 2;
2501 } else if (sent > 0)
2502 ++tp->snd_limited;
2503 tp->snd_cwnd = oldcwnd;
2504 goto drop;
2505 }
2506 } else
2507 tp->t_dupacks = 0;
2508 break;
2509 }
2510
2511 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2512 ("%s: th_ack <= snd_una", __func__));
2513
2514 /*
2515 * If the congestion window was inflated to account
2516 * for the other side's cached packets, retract it.
2517 */
2518 if (IN_FASTRECOVERY(tp->t_flags)) {
2519 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2520 if (tp->t_flags & TF_SACK_PERMIT)
2521 tcp_sack_partialack(tp, th);
2522 else
2523 tcp_newreno_partial_ack(tp, th);
2524 } else
2525 cc_post_recovery(tp, th);
2526 }
2527 tp->t_dupacks = 0;
2528 /*
2529 * If we reach this point, ACK is not a duplicate,
2530 * i.e., it ACKs something we sent.
2531 */
2532 if (tp->t_flags & TF_NEEDSYN) {
2533 /*
2534 * T/TCP: Connection was half-synchronized, and our
2535 * SYN has been ACK'd (so connection is now fully
2536 * synchronized). Go to non-starred state,
2537 * increment snd_una for ACK of SYN, and check if
2538 * we can do window scaling.
2539 */
2540 tp->t_flags &= ~TF_NEEDSYN;
2541 tp->snd_una++;
2542 /* Do window scaling? */
2543 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2544 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2545 tp->rcv_scale = tp->request_r_scale;
2546 /* Send window already scaled. */
2547 }
2548 }
2549
2550 process_ACK:
2551 INP_WLOCK_ASSERT(tp->t_inpcb);
2552
2553 acked = BYTES_THIS_ACK(tp, th);
2554 TCPSTAT_INC(tcps_rcvackpack);
2555 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2556
2557 /*
2558 * If we just performed our first retransmit, and the ACK
2559 * arrives within our recovery window, then it was a mistake
2560 * to do the retransmit in the first place. Recover our
2561 * original cwnd and ssthresh, and proceed to transmit where
2562 * we left off.
2563 */
2564 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2565 (int)(ticks - tp->t_badrxtwin) < 0)
2566 cc_cong_signal(tp, th, CC_RTO_ERR);
2567
2568 /*
2569 * If we have a timestamp reply, update smoothed
2570 * round trip time. If no timestamp is present but
2571 * transmit timer is running and timed sequence
2572 * number was acked, update smoothed round trip time.
2573 * Since we now have an rtt measurement, cancel the
2574 * timer backoff (cf., Phil Karn's retransmit alg.).
2575 * Recompute the initial retransmit timer.
2576 *
2577 * Some boxes send broken timestamp replies
2578 * during the SYN+ACK phase, ignore
2579 * timestamps of 0 or we could calculate a
2580 * huge RTT and blow up the retransmit timer.
2581 */
2582 if ((to.to_flags & TOF_TS) != 0 &&
2583 to.to_tsecr) {
2584 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr)
2585 tp->t_rttlow = ticks - to.to_tsecr;
2586 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1);
2587 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2588 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2589 tp->t_rttlow = ticks - tp->t_rtttime;
2590 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2591 }
2592
2593 /*
2594 * If all outstanding data is acked, stop retransmit
2595 * timer and remember to restart (more output or persist).
2596 * If there is more data to be acked, restart retransmit
2597 * timer, using current (possibly backed-off) value.
2598 */
2599 if (th->th_ack == tp->snd_max) {
2600 tcp_timer_activate(tp, TT_REXMT, 0);
2601 needoutput = 1;
2602 } else if (!tcp_timer_active(tp, TT_PERSIST))
2603 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2604
2605 /*
2606 * If no data (only SYN) was ACK'd,
2607 * skip rest of ACK processing.
2608 */
2609 if (acked == 0)
2610 goto step6;
2611
2612 /*
2613 * Let the congestion control algorithm update congestion
2614 * control related information. This typically means increasing
2615 * the congestion window.
2616 */
2617 cc_ack_received(tp, th, CC_ACK);
2618
2619 SOCKBUF_LOCK(&so->so_snd);
2620 if (acked > so->so_snd.sb_cc) {
2621 tp->snd_wnd -= so->so_snd.sb_cc;
2622 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2623 ourfinisacked = 1;
2624 } else {
2625 sbdrop_locked(&so->so_snd, acked);
2626 tp->snd_wnd -= acked;
2627 ourfinisacked = 0;
2628 }
2629 /* NB: sowwakeup_locked() does an implicit unlock. */
2630 sowwakeup_locked(so);
2631 /* Detect una wraparound. */
2632 if (!IN_RECOVERY(tp->t_flags) &&
2633 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2634 SEQ_LEQ(th->th_ack, tp->snd_recover))
2635 tp->snd_recover = th->th_ack - 1;
2636 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2637 if (IN_RECOVERY(tp->t_flags) &&
2638 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2639 EXIT_RECOVERY(tp->t_flags);
2640 }
2641 tp->snd_una = th->th_ack;
2642 if (tp->t_flags & TF_SACK_PERMIT) {
2643 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2644 tp->snd_recover = tp->snd_una;
2645 }
2646 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2647 tp->snd_nxt = tp->snd_una;
2648
2649 switch (tp->t_state) {
2650
2651 /*
2652 * In FIN_WAIT_1 STATE in addition to the processing
2653 * for the ESTABLISHED state if our FIN is now acknowledged
2654 * then enter FIN_WAIT_2.
2655 */
2656 case TCPS_FIN_WAIT_1:
2657 if (ourfinisacked) {
2658 /*
2659 * If we can't receive any more
2660 * data, then closing user can proceed.
2661 * Starting the timer is contrary to the
2662 * specification, but if we don't get a FIN
2663 * we'll hang forever.
2664 *
2665 * XXXjl:
2666 * we should release the tp also, and use a
2667 * compressed state.
2668 */
2669 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2670 int timeout;
2671
2672 soisdisconnected(so);
2673 timeout = (tcp_fast_finwait2_recycle) ?
2674 tcp_finwait2_timeout : tcp_maxidle;
2675 tcp_timer_activate(tp, TT_2MSL, timeout);
2676 }
2677 tp->t_state = TCPS_FIN_WAIT_2;
2678 }
2679 break;
2680
2681 /*
2682 * In CLOSING STATE in addition to the processing for
2683 * the ESTABLISHED state if the ACK acknowledges our FIN
2684 * then enter the TIME-WAIT state, otherwise ignore
2685 * the segment.
2686 */
2687 case TCPS_CLOSING:
2688 if (ourfinisacked) {
2689 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2690 tcp_twstart(tp);
2691 INP_INFO_WUNLOCK(&V_tcbinfo);
2692 m_freem(m);
2693 return;
2694 }
2695 break;
2696
2697 /*
2698 * In LAST_ACK, we may still be waiting for data to drain
2699 * and/or to be acked, as well as for the ack of our FIN.
2700 * If our FIN is now acknowledged, delete the TCB,
2701 * enter the closed state and return.
2702 */
2703 case TCPS_LAST_ACK:
2704 if (ourfinisacked) {
2705 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2706 tp = tcp_close(tp);
2707 goto drop;
2708 }
2709 break;
2710 }
2711 }
2712
2713 step6:
2714 INP_WLOCK_ASSERT(tp->t_inpcb);
2715
2716 /*
2717 * Update window information.
2718 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2719 */
2720 if ((thflags & TH_ACK) &&
2721 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2722 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2723 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2724 /* keep track of pure window updates */
2725 if (tlen == 0 &&
2726 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2727 TCPSTAT_INC(tcps_rcvwinupd);
2728 tp->snd_wnd = tiwin;
2729 tp->snd_wl1 = th->th_seq;
2730 tp->snd_wl2 = th->th_ack;
2731 if (tp->snd_wnd > tp->max_sndwnd)
2732 tp->max_sndwnd = tp->snd_wnd;
2733 needoutput = 1;
2734 }
2735
2736 /*
2737 * Process segments with URG.
2738 */
2739 if ((thflags & TH_URG) && th->th_urp &&
2740 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2741 /*
2742 * This is a kludge, but if we receive and accept
2743 * random urgent pointers, we'll crash in
2744 * soreceive. It's hard to imagine someone
2745 * actually wanting to send this much urgent data.
2746 */
2747 SOCKBUF_LOCK(&so->so_rcv);
2748 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2749 th->th_urp = 0; /* XXX */
2750 thflags &= ~TH_URG; /* XXX */
2751 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2752 goto dodata; /* XXX */
2753 }
2754 /*
2755 * If this segment advances the known urgent pointer,
2756 * then mark the data stream. This should not happen
2757 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2758 * a FIN has been received from the remote side.
2759 * In these states we ignore the URG.
2760 *
2761 * According to RFC961 (Assigned Protocols),
2762 * the urgent pointer points to the last octet
2763 * of urgent data. We continue, however,
2764 * to consider it to indicate the first octet
2765 * of data past the urgent section as the original
2766 * spec states (in one of two places).
2767 */
2768 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2769 tp->rcv_up = th->th_seq + th->th_urp;
2770 so->so_oobmark = so->so_rcv.sb_cc +
2771 (tp->rcv_up - tp->rcv_nxt) - 1;
2772 if (so->so_oobmark == 0)
2773 so->so_rcv.sb_state |= SBS_RCVATMARK;
2774 sohasoutofband(so);
2775 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2776 }
2777 SOCKBUF_UNLOCK(&so->so_rcv);
2778 /*
2779 * Remove out of band data so doesn't get presented to user.
2780 * This can happen independent of advancing the URG pointer,
2781 * but if two URG's are pending at once, some out-of-band
2782 * data may creep in... ick.
2783 */
2784 if (th->th_urp <= (u_long)tlen &&
2785 !(so->so_options & SO_OOBINLINE)) {
2786 /* hdr drop is delayed */
2787 tcp_pulloutofband(so, th, m, drop_hdrlen);
2788 }
2789 } else {
2790 /*
2791 * If no out of band data is expected,
2792 * pull receive urgent pointer along
2793 * with the receive window.
2794 */
2795 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2796 tp->rcv_up = tp->rcv_nxt;
2797 }
2798 dodata: /* XXX */
2799 INP_WLOCK_ASSERT(tp->t_inpcb);
2800
2801 /*
2802 * Process the segment text, merging it into the TCP sequencing queue,
2803 * and arranging for acknowledgment of receipt if necessary.
2804 * This process logically involves adjusting tp->rcv_wnd as data
2805 * is presented to the user (this happens in tcp_usrreq.c,
2806 * case PRU_RCVD). If a FIN has already been received on this
2807 * connection then we just ignore the text.
2808 */
2809 if ((tlen || (thflags & TH_FIN)) &&
2810 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2811 tcp_seq save_start = th->th_seq;
2812 m_adj(m, drop_hdrlen); /* delayed header drop */
2813 /*
2814 * Insert segment which includes th into TCP reassembly queue
2815 * with control block tp. Set thflags to whether reassembly now
2816 * includes a segment with FIN. This handles the common case
2817 * inline (segment is the next to be received on an established
2818 * connection, and the queue is empty), avoiding linkage into
2819 * and removal from the queue and repetition of various
2820 * conversions.
2821 * Set DELACK for segments received in order, but ack
2822 * immediately when segments are out of order (so
2823 * fast retransmit can work).
2824 */
2825 if (th->th_seq == tp->rcv_nxt &&
2826 LIST_EMPTY(&tp->t_segq) &&
2827 TCPS_HAVEESTABLISHED(tp->t_state)) {
2828 if (DELAY_ACK(tp))
2829 tp->t_flags |= TF_DELACK;
2830 else
2831 tp->t_flags |= TF_ACKNOW;
2832 tp->rcv_nxt += tlen;
2833 thflags = th->th_flags & TH_FIN;
2834 TCPSTAT_INC(tcps_rcvpack);
2835 TCPSTAT_ADD(tcps_rcvbyte, tlen);
2836 ND6_HINT(tp);
2837 SOCKBUF_LOCK(&so->so_rcv);
2838 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2839 m_freem(m);
2840 else
2841 sbappendstream_locked(&so->so_rcv, m);
2842 /* NB: sorwakeup_locked() does an implicit unlock. */
2843 sorwakeup_locked(so);
2844 } else {
2845 /*
2846 * XXX: Due to the header drop above "th" is
2847 * theoretically invalid by now. Fortunately
2848 * m_adj() doesn't actually frees any mbufs
2849 * when trimming from the head.
2850 */
2851 thflags = tcp_reass(tp, th, &tlen, m);
2852 tp->t_flags |= TF_ACKNOW;
2853 }
2854 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2855 tcp_update_sack_list(tp, save_start, save_start + tlen);
2856 #if 0
2857 /*
2858 * Note the amount of data that peer has sent into
2859 * our window, in order to estimate the sender's
2860 * buffer size.
2861 * XXX: Unused.
2862 */
2863 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
2864 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2865 else
2866 len = so->so_rcv.sb_hiwat;
2867 #endif
2868 } else {
2869 m_freem(m);
2870 thflags &= ~TH_FIN;
2871 }
2872
2873 /*
2874 * If FIN is received ACK the FIN and let the user know
2875 * that the connection is closing.
2876 */
2877 if (thflags & TH_FIN) {
2878 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2879 socantrcvmore(so);
2880 /*
2881 * If connection is half-synchronized
2882 * (ie NEEDSYN flag on) then delay ACK,
2883 * so it may be piggybacked when SYN is sent.
2884 * Otherwise, since we received a FIN then no
2885 * more input can be expected, send ACK now.
2886 */
2887 if (tp->t_flags & TF_NEEDSYN)
2888 tp->t_flags |= TF_DELACK;
2889 else
2890 tp->t_flags |= TF_ACKNOW;
2891 tp->rcv_nxt++;
2892 }
2893 switch (tp->t_state) {
2894
2895 /*
2896 * In SYN_RECEIVED and ESTABLISHED STATES
2897 * enter the CLOSE_WAIT state.
2898 */
2899 case TCPS_SYN_RECEIVED:
2900 tp->t_starttime = ticks;
2901 /* FALLTHROUGH */
2902 case TCPS_ESTABLISHED:
2903 tp->t_state = TCPS_CLOSE_WAIT;
2904 break;
2905
2906 /*
2907 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2908 * enter the CLOSING state.
2909 */
2910 case TCPS_FIN_WAIT_1:
2911 tp->t_state = TCPS_CLOSING;
2912 break;
2913
2914 /*
2915 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2916 * starting the time-wait timer, turning off the other
2917 * standard timers.
2918 */
2919 case TCPS_FIN_WAIT_2:
2920 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2921 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata "
2922 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
2923 ti_locked));
2924
2925 tcp_twstart(tp);
2926 INP_INFO_WUNLOCK(&V_tcbinfo);
2927 return;
2928 }
2929 }
2930 if (ti_locked == TI_WLOCKED)
2931 INP_INFO_WUNLOCK(&V_tcbinfo);
2932 ti_locked = TI_UNLOCKED;
2933
2934 #ifdef TCPDEBUG
2935 if (so->so_options & SO_DEBUG)
2936 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
2937 &tcp_savetcp, 0);
2938 #endif
2939
2940 /*
2941 * Return any desired output.
2942 */
2943 if (needoutput || (tp->t_flags & TF_ACKNOW))
2944 (void) tcp_output(tp);
2945
2946 check_delack:
2947 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
2948 __func__, ti_locked));
2949 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
2950 INP_WLOCK_ASSERT(tp->t_inpcb);
2951
2952 if (tp->t_flags & TF_DELACK) {
2953 tp->t_flags &= ~TF_DELACK;
2954 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
2955 }
2956 INP_WUNLOCK(tp->t_inpcb);
2957 return;
2958
2959 dropafterack:
2960 /*
2961 * Generate an ACK dropping incoming segment if it occupies
2962 * sequence space, where the ACK reflects our state.
2963 *
2964 * We can now skip the test for the RST flag since all
2965 * paths to this code happen after packets containing
2966 * RST have been dropped.
2967 *
2968 * In the SYN-RECEIVED state, don't send an ACK unless the
2969 * segment we received passes the SYN-RECEIVED ACK test.
2970 * If it fails send a RST. This breaks the loop in the
2971 * "LAND" DoS attack, and also prevents an ACK storm
2972 * between two listening ports that have been sent forged
2973 * SYN segments, each with the source address of the other.
2974 */
2975 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2976 (SEQ_GT(tp->snd_una, th->th_ack) ||
2977 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2978 rstreason = BANDLIM_RST_OPENPORT;
2979 goto dropwithreset;
2980 }
2981 #ifdef TCPDEBUG
2982 if (so->so_options & SO_DEBUG)
2983 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2984 &tcp_savetcp, 0);
2985 #endif
2986 if (ti_locked == TI_WLOCKED)
2987 INP_INFO_WUNLOCK(&V_tcbinfo);
2988 ti_locked = TI_UNLOCKED;
2989
2990 tp->t_flags |= TF_ACKNOW;
2991 (void) tcp_output(tp);
2992 INP_WUNLOCK(tp->t_inpcb);
2993 m_freem(m);
2994 return;
2995
2996 dropwithreset:
2997 if (ti_locked == TI_WLOCKED)
2998 INP_INFO_WUNLOCK(&V_tcbinfo);
2999 ti_locked = TI_UNLOCKED;
3000
3001 if (tp != NULL) {
3002 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3003 INP_WUNLOCK(tp->t_inpcb);
3004 } else
3005 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3006 return;
3007
3008 drop:
3009 if (ti_locked == TI_WLOCKED) {
3010 INP_INFO_WUNLOCK(&V_tcbinfo);
3011 ti_locked = TI_UNLOCKED;
3012 }
3013 #ifdef INVARIANTS
3014 else
3015 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3016 #endif
3017
3018 /*
3019 * Drop space held by incoming segment and return.
3020 */
3021 #ifdef TCPDEBUG
3022 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3023 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3024 &tcp_savetcp, 0);
3025 #endif
3026 if (tp != NULL)
3027 INP_WUNLOCK(tp->t_inpcb);
3028 m_freem(m);
3029 }
3030
3031 /*
3032 * Issue RST and make ACK acceptable to originator of segment.
3033 * The mbuf must still include the original packet header.
3034 * tp may be NULL.
3035 */
3036 static void
3037 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3038 int tlen, int rstreason)
3039 {
3040 #ifdef INET
3041 struct ip *ip;
3042 #endif
3043 #ifdef INET6
3044 struct ip6_hdr *ip6;
3045 #endif
3046
3047 if (tp != NULL) {
3048 INP_WLOCK_ASSERT(tp->t_inpcb);
3049 }
3050
3051 /* Don't bother if destination was broadcast/multicast. */
3052 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3053 goto drop;
3054 #ifdef INET6
3055 if (mtod(m, struct ip *)->ip_v == 6) {
3056 ip6 = mtod(m, struct ip6_hdr *);
3057 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3058 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3059 goto drop;
3060 /* IPv6 anycast check is done at tcp6_input() */
3061 }
3062 #endif
3063 #if defined(INET) && defined(INET6)
3064 else
3065 #endif
3066 #ifdef INET
3067 {
3068 ip = mtod(m, struct ip *);
3069 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3070 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3071 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3072 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3073 goto drop;
3074 }
3075 #endif
3076
3077 /* Perform bandwidth limiting. */
3078 if (badport_bandlim(rstreason) < 0)
3079 goto drop;
3080
3081 /* tcp_respond consumes the mbuf chain. */
3082 if (th->th_flags & TH_ACK) {
3083 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3084 th->th_ack, TH_RST);
3085 } else {
3086 if (th->th_flags & TH_SYN)
3087 tlen++;
3088 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3089 (tcp_seq)0, TH_RST|TH_ACK);
3090 }
3091 return;
3092 drop:
3093 m_freem(m);
3094 }
3095
3096 /*
3097 * Parse TCP options and place in tcpopt.
3098 */
3099 static void
3100 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3101 {
3102 int opt, optlen;
3103
3104 to->to_flags = 0;
3105 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3106 opt = cp[0];
3107 if (opt == TCPOPT_EOL)
3108 break;
3109 if (opt == TCPOPT_NOP)
3110 optlen = 1;
3111 else {
3112 if (cnt < 2)
3113 break;
3114 optlen = cp[1];
3115 if (optlen < 2 || optlen > cnt)
3116 break;
3117 }
3118 switch (opt) {
3119 case TCPOPT_MAXSEG:
3120 if (optlen != TCPOLEN_MAXSEG)
3121 continue;
3122 if (!(flags & TO_SYN))
3123 continue;
3124 to->to_flags |= TOF_MSS;
3125 bcopy((char *)cp + 2,
3126 (char *)&to->to_mss, sizeof(to->to_mss));
3127 to->to_mss = ntohs(to->to_mss);
3128 break;
3129 case TCPOPT_WINDOW:
3130 if (optlen != TCPOLEN_WINDOW)
3131 continue;
3132 if (!(flags & TO_SYN))
3133 continue;
3134 to->to_flags |= TOF_SCALE;
3135 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3136 break;
3137 case TCPOPT_TIMESTAMP:
3138 if (optlen != TCPOLEN_TIMESTAMP)
3139 continue;
3140 to->to_flags |= TOF_TS;
3141 bcopy((char *)cp + 2,
3142 (char *)&to->to_tsval, sizeof(to->to_tsval));
3143 to->to_tsval = ntohl(to->to_tsval);
3144 bcopy((char *)cp + 6,
3145 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3146 to->to_tsecr = ntohl(to->to_tsecr);
3147 break;
3148 #ifdef TCP_SIGNATURE
3149 /*
3150 * XXX In order to reply to a host which has set the
3151 * TCP_SIGNATURE option in its initial SYN, we have to
3152 * record the fact that the option was observed here
3153 * for the syncache code to perform the correct response.
3154 */
3155 case TCPOPT_SIGNATURE:
3156 if (optlen != TCPOLEN_SIGNATURE)
3157 continue;
3158 to->to_flags |= TOF_SIGNATURE;
3159 to->to_signature = cp + 2;
3160 break;
3161 #endif
3162 case TCPOPT_SACK_PERMITTED:
3163 if (optlen != TCPOLEN_SACK_PERMITTED)
3164 continue;
3165 if (!(flags & TO_SYN))
3166 continue;
3167 if (!V_tcp_do_sack)
3168 continue;
3169 to->to_flags |= TOF_SACKPERM;
3170 break;
3171 case TCPOPT_SACK:
3172 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3173 continue;
3174 if (flags & TO_SYN)
3175 continue;
3176 to->to_flags |= TOF_SACK;
3177 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3178 to->to_sacks = cp + 2;
3179 TCPSTAT_INC(tcps_sack_rcv_blocks);
3180 break;
3181 default:
3182 continue;
3183 }
3184 }
3185 }
3186
3187 /*
3188 * Pull out of band byte out of a segment so
3189 * it doesn't appear in the user's data queue.
3190 * It is still reflected in the segment length for
3191 * sequencing purposes.
3192 */
3193 static void
3194 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3195 int off)
3196 {
3197 int cnt = off + th->th_urp - 1;
3198
3199 while (cnt >= 0) {
3200 if (m->m_len > cnt) {
3201 char *cp = mtod(m, caddr_t) + cnt;
3202 struct tcpcb *tp = sototcpcb(so);
3203
3204 INP_WLOCK_ASSERT(tp->t_inpcb);
3205
3206 tp->t_iobc = *cp;
3207 tp->t_oobflags |= TCPOOB_HAVEDATA;
3208 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3209 m->m_len--;
3210 if (m->m_flags & M_PKTHDR)
3211 m->m_pkthdr.len--;
3212 return;
3213 }
3214 cnt -= m->m_len;
3215 m = m->m_next;
3216 if (m == NULL)
3217 break;
3218 }
3219 panic("tcp_pulloutofband");
3220 }
3221
3222 /*
3223 * Collect new round-trip time estimate
3224 * and update averages and current timeout.
3225 */
3226 static void
3227 tcp_xmit_timer(struct tcpcb *tp, int rtt)
3228 {
3229 int delta;
3230
3231 INP_WLOCK_ASSERT(tp->t_inpcb);
3232
3233 TCPSTAT_INC(tcps_rttupdated);
3234 tp->t_rttupdated++;
3235 if (tp->t_srtt != 0) {
3236 /*
3237 * srtt is stored as fixed point with 5 bits after the
3238 * binary point (i.e., scaled by 8). The following magic
3239 * is equivalent to the smoothing algorithm in rfc793 with
3240 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3241 * point). Adjust rtt to origin 0.
3242 */
3243 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3244 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3245
3246 if ((tp->t_srtt += delta) <= 0)
3247 tp->t_srtt = 1;
3248
3249 /*
3250 * We accumulate a smoothed rtt variance (actually, a
3251 * smoothed mean difference), then set the retransmit
3252 * timer to smoothed rtt + 4 times the smoothed variance.
3253 * rttvar is stored as fixed point with 4 bits after the
3254 * binary point (scaled by 16). The following is
3255 * equivalent to rfc793 smoothing with an alpha of .75
3256 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3257 * rfc793's wired-in beta.
3258 */
3259 if (delta < 0)
3260 delta = -delta;
3261 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3262 if ((tp->t_rttvar += delta) <= 0)
3263 tp->t_rttvar = 1;
3264 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3265 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3266 } else {
3267 /*
3268 * No rtt measurement yet - use the unsmoothed rtt.
3269 * Set the variance to half the rtt (so our first
3270 * retransmit happens at 3*rtt).
3271 */
3272 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3273 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3274 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3275 }
3276 tp->t_rtttime = 0;
3277 tp->t_rxtshift = 0;
3278
3279 /*
3280 * the retransmit should happen at rtt + 4 * rttvar.
3281 * Because of the way we do the smoothing, srtt and rttvar
3282 * will each average +1/2 tick of bias. When we compute
3283 * the retransmit timer, we want 1/2 tick of rounding and
3284 * 1 extra tick because of +-1/2 tick uncertainty in the
3285 * firing of the timer. The bias will give us exactly the
3286 * 1.5 tick we need. But, because the bias is
3287 * statistical, we have to test that we don't drop below
3288 * the minimum feasible timer (which is 2 ticks).
3289 */
3290 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3291 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3292
3293 /*
3294 * We received an ack for a packet that wasn't retransmitted;
3295 * it is probably safe to discard any error indications we've
3296 * received recently. This isn't quite right, but close enough
3297 * for now (a route might have failed after we sent a segment,
3298 * and the return path might not be symmetrical).
3299 */
3300 tp->t_softerror = 0;
3301 }
3302
3303 /*
3304 * Determine a reasonable value for maxseg size.
3305 * If the route is known, check route for mtu.
3306 * If none, use an mss that can be handled on the outgoing
3307 * interface without forcing IP to fragment; if bigger than
3308 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
3309 * to utilize large mbufs. If no route is found, route has no mtu,
3310 * or the destination isn't local, use a default, hopefully conservative
3311 * size (usually 512 or the default IP max size, but no more than the mtu
3312 * of the interface), as we can't discover anything about intervening
3313 * gateways or networks. We also initialize the congestion/slow start
3314 * window to be a single segment if the destination isn't local.
3315 * While looking at the routing entry, we also initialize other path-dependent
3316 * parameters from pre-set or cached values in the routing entry.
3317 *
3318 * Also take into account the space needed for options that we
3319 * send regularly. Make maxseg shorter by that amount to assure
3320 * that we can send maxseg amount of data even when the options
3321 * are present. Store the upper limit of the length of options plus
3322 * data in maxopd.
3323 *
3324 * In case of T/TCP, we call this routine during implicit connection
3325 * setup as well (offer = -1), to initialize maxseg from the cached
3326 * MSS of our peer.
3327 *
3328 * NOTE that this routine is only called when we process an incoming
3329 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt().
3330 */
3331 void
3332 tcp_mss_update(struct tcpcb *tp, int offer,
3333 struct hc_metrics_lite *metricptr, int *mtuflags)
3334 {
3335 int mss = 0;
3336 u_long maxmtu = 0;
3337 struct inpcb *inp = tp->t_inpcb;
3338 struct hc_metrics_lite metrics;
3339 int origoffer = offer;
3340 #ifdef INET6
3341 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3342 size_t min_protoh = isipv6 ?
3343 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3344 sizeof (struct tcpiphdr);
3345 #else
3346 const size_t min_protoh = sizeof(struct tcpiphdr);
3347 #endif
3348
3349 INP_WLOCK_ASSERT(tp->t_inpcb);
3350
3351 /* Initialize. */
3352 #ifdef INET6
3353 if (isipv6) {
3354 maxmtu = tcp_maxmtu6(&inp->inp_inc, mtuflags);
3355 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt;
3356 }
3357 #endif
3358 #if defined(INET) && defined(INET6)
3359 else
3360 #endif
3361 #ifdef INET
3362 {
3363 maxmtu = tcp_maxmtu(&inp->inp_inc, mtuflags);
3364 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt;
3365 }
3366 #endif
3367
3368 /*
3369 * No route to sender, stay with default mss and return.
3370 */
3371 if (maxmtu == 0) {
3372 /*
3373 * In case we return early we need to initialize metrics
3374 * to a defined state as tcp_hc_get() would do for us
3375 * if there was no cache hit.
3376 */
3377 if (metricptr != NULL)
3378 bzero(metricptr, sizeof(struct hc_metrics_lite));
3379 return;
3380 }
3381
3382 /* What have we got? */
3383 switch (offer) {
3384 case 0:
3385 /*
3386 * Offer == 0 means that there was no MSS on the SYN
3387 * segment, in this case we use tcp_mssdflt as
3388 * already assigned to t_maxopd above.
3389 */
3390 offer = tp->t_maxopd;
3391 break;
3392
3393 case -1:
3394 /*
3395 * Offer == -1 means that we didn't receive SYN yet.
3396 */
3397 /* FALLTHROUGH */
3398
3399 default:
3400 /*
3401 * Prevent DoS attack with too small MSS. Round up
3402 * to at least minmss.
3403 */
3404 offer = max(offer, V_tcp_minmss);
3405 }
3406
3407 /*
3408 * rmx information is now retrieved from tcp_hostcache.
3409 */
3410 tcp_hc_get(&inp->inp_inc, &metrics);
3411 if (metricptr != NULL)
3412 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3413
3414 /*
3415 * If there's a discovered mtu int tcp hostcache, use it
3416 * else, use the link mtu.
3417 */
3418 if (metrics.rmx_mtu)
3419 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3420 else {
3421 #ifdef INET6
3422 if (isipv6) {
3423 mss = maxmtu - min_protoh;
3424 if (!V_path_mtu_discovery &&
3425 !in6_localaddr(&inp->in6p_faddr))
3426 mss = min(mss, V_tcp_v6mssdflt);
3427 }
3428 #endif
3429 #if defined(INET) && defined(INET6)
3430 else
3431 #endif
3432 #ifdef INET
3433 {
3434 mss = maxmtu - min_protoh;
3435 if (!V_path_mtu_discovery &&
3436 !in_localaddr(inp->inp_faddr))
3437 mss = min(mss, V_tcp_mssdflt);
3438 }
3439 #endif
3440 /*
3441 * XXX - The above conditional (mss = maxmtu - min_protoh)
3442 * probably violates the TCP spec.
3443 * The problem is that, since we don't know the
3444 * other end's MSS, we are supposed to use a conservative
3445 * default. But, if we do that, then MTU discovery will
3446 * never actually take place, because the conservative
3447 * default is much less than the MTUs typically seen
3448 * on the Internet today. For the moment, we'll sweep
3449 * this under the carpet.
3450 *
3451 * The conservative default might not actually be a problem
3452 * if the only case this occurs is when sending an initial
3453 * SYN with options and data to a host we've never talked
3454 * to before. Then, they will reply with an MSS value which
3455 * will get recorded and the new parameters should get
3456 * recomputed. For Further Study.
3457 */
3458 }
3459 mss = min(mss, offer);
3460
3461 /*
3462 * Sanity check: make sure that maxopd will be large
3463 * enough to allow some data on segments even if the
3464 * all the option space is used (40bytes). Otherwise
3465 * funny things may happen in tcp_output.
3466 */
3467 mss = max(mss, 64);
3468
3469 /*
3470 * maxopd stores the maximum length of data AND options
3471 * in a segment; maxseg is the amount of data in a normal
3472 * segment. We need to store this value (maxopd) apart
3473 * from maxseg, because now every segment carries options
3474 * and thus we normally have somewhat less data in segments.
3475 */
3476 tp->t_maxopd = mss;
3477
3478 /*
3479 * origoffer==-1 indicates that no segments were received yet.
3480 * In this case we just guess.
3481 */
3482 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
3483 (origoffer == -1 ||
3484 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3485 mss -= TCPOLEN_TSTAMP_APPA;
3486
3487 #if (MCLBYTES & (MCLBYTES - 1)) == 0
3488 if (mss > MCLBYTES)
3489 mss &= ~(MCLBYTES-1);
3490 #else
3491 if (mss > MCLBYTES)
3492 mss = mss / MCLBYTES * MCLBYTES;
3493 #endif
3494 tp->t_maxseg = mss;
3495 }
3496
3497 void
3498 tcp_mss(struct tcpcb *tp, int offer)
3499 {
3500 int mss;
3501 u_long bufsize;
3502 struct inpcb *inp;
3503 struct socket *so;
3504 struct hc_metrics_lite metrics;
3505 int mtuflags = 0;
3506
3507 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3508
3509 tcp_mss_update(tp, offer, &metrics, &mtuflags);
3510
3511 mss = tp->t_maxseg;
3512 inp = tp->t_inpcb;
3513
3514 /*
3515 * If there's a pipesize, change the socket buffer to that size,
3516 * don't change if sb_hiwat is different than default (then it
3517 * has been changed on purpose with setsockopt).
3518 * Make the socket buffers an integral number of mss units;
3519 * if the mss is larger than the socket buffer, decrease the mss.
3520 */
3521 so = inp->inp_socket;
3522 SOCKBUF_LOCK(&so->so_snd);
3523 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe)
3524 bufsize = metrics.rmx_sendpipe;
3525 else
3526 bufsize = so->so_snd.sb_hiwat;
3527 if (bufsize < mss)
3528 mss = bufsize;
3529 else {
3530 bufsize = roundup(bufsize, mss);
3531 if (bufsize > sb_max)
3532 bufsize = sb_max;
3533 if (bufsize > so->so_snd.sb_hiwat)
3534 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3535 }
3536 SOCKBUF_UNLOCK(&so->so_snd);
3537 tp->t_maxseg = mss;
3538
3539 SOCKBUF_LOCK(&so->so_rcv);
3540 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe)
3541 bufsize = metrics.rmx_recvpipe;
3542 else
3543 bufsize = so->so_rcv.sb_hiwat;
3544 if (bufsize > mss) {
3545 bufsize = roundup(bufsize, mss);
3546 if (bufsize > sb_max)
3547 bufsize = sb_max;
3548 if (bufsize > so->so_rcv.sb_hiwat)
3549 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3550 }
3551 SOCKBUF_UNLOCK(&so->so_rcv);
3552
3553 /* Check the interface for TSO capabilities. */
3554 if (mtuflags & CSUM_TSO)
3555 tp->t_flags |= TF_TSO;
3556 }
3557
3558 /*
3559 * Determine the MSS option to send on an outgoing SYN.
3560 */
3561 int
3562 tcp_mssopt(struct in_conninfo *inc)
3563 {
3564 int mss = 0;
3565 u_long maxmtu = 0;
3566 u_long thcmtu = 0;
3567 size_t min_protoh;
3568
3569 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3570
3571 #ifdef INET6
3572 if (inc->inc_flags & INC_ISIPV6) {
3573 mss = V_tcp_v6mssdflt;
3574 maxmtu = tcp_maxmtu6(inc, NULL);
3575 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3576 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3577 }
3578 #endif
3579 #if defined(INET) && defined(INET6)
3580 else
3581 #endif
3582 #ifdef INET
3583 {
3584 mss = V_tcp_mssdflt;
3585 maxmtu = tcp_maxmtu(inc, NULL);
3586 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3587 min_protoh = sizeof(struct tcpiphdr);
3588 }
3589 #endif
3590 if (maxmtu && thcmtu)
3591 mss = min(maxmtu, thcmtu) - min_protoh;
3592 else if (maxmtu || thcmtu)
3593 mss = max(maxmtu, thcmtu) - min_protoh;
3594
3595 return (mss);
3596 }
3597
3598
3599 /*
3600 * On a partial ack arrives, force the retransmission of the
3601 * next unacknowledged segment. Do not clear tp->t_dupacks.
3602 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3603 * be started again.
3604 */
3605 static void
3606 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3607 {
3608 tcp_seq onxt = tp->snd_nxt;
3609 u_long ocwnd = tp->snd_cwnd;
3610
3611 INP_WLOCK_ASSERT(tp->t_inpcb);
3612
3613 tcp_timer_activate(tp, TT_REXMT, 0);
3614 tp->t_rtttime = 0;
3615 tp->snd_nxt = th->th_ack;
3616 /*
3617 * Set snd_cwnd to one segment beyond acknowledged offset.
3618 * (tp->snd_una has not yet been updated when this function is called.)
3619 */
3620 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th);
3621 tp->t_flags |= TF_ACKNOW;
3622 (void) tcp_output(tp);
3623 tp->snd_cwnd = ocwnd;
3624 if (SEQ_GT(onxt, tp->snd_nxt))
3625 tp->snd_nxt = onxt;
3626 /*
3627 * Partial window deflation. Relies on fact that tp->snd_una
3628 * not updated yet.
3629 */
3630 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3631 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3632 else
3633 tp->snd_cwnd = 0;
3634 tp->snd_cwnd += tp->t_maxseg;
3635 }
Cache object: 705977ee27bb12c426536df0a7d00220
|