1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_ipfw.h" /* for ipfw_fwd */
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 #include "opt_mac.h"
40 #include "opt_tcpdebug.h"
41
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/proc.h> /* for proc0 declaration */
47 #include <sys/protosw.h>
48 #include <sys/signalvar.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
53 #include <sys/systm.h>
54
55 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
56
57 #include <vm/uma.h>
58
59 #include <net/if.h>
60 #include <net/route.h>
61
62 #define TCPSTATES /* for logging */
63
64 #include <netinet/in.h>
65 #include <netinet/in_pcb.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
70 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
71 #include <netinet/ip_var.h>
72 #include <netinet/ip_options.h>
73 #include <netinet/ip6.h>
74 #include <netinet/icmp6.h>
75 #include <netinet6/in6_pcb.h>
76 #include <netinet6/ip6_var.h>
77 #include <netinet6/nd6.h>
78 #include <netinet/tcp.h>
79 #include <netinet/tcp_fsm.h>
80 #include <netinet/tcp_seq.h>
81 #include <netinet/tcp_timer.h>
82 #include <netinet/tcp_var.h>
83 #include <netinet6/tcp6_var.h>
84 #include <netinet/tcpip.h>
85 #include <netinet/tcp_syncache.h>
86 #ifdef TCPDEBUG
87 #include <netinet/tcp_debug.h>
88 #endif /* TCPDEBUG */
89
90 #ifdef IPSEC
91 #include <netipsec/ipsec.h>
92 #include <netipsec/ipsec6.h>
93 #endif /*IPSEC*/
94
95 #include <machine/in_cksum.h>
96
97 #include <security/mac/mac_framework.h>
98
99 static const int tcprexmtthresh = 3;
100
101 struct tcpstat tcpstat;
102 SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
103 &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
104
105 int tcp_log_in_vain = 0;
106 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
107 &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports");
108
109 static int blackhole = 0;
110 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
111 &blackhole, 0, "Do not send RST on segments to closed ports");
112
113 int tcp_delack_enabled = 1;
114 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
115 &tcp_delack_enabled, 0,
116 "Delay ACK to try and piggyback it onto a data packet");
117
118 static int drop_synfin = 0;
119 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
120 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
121
122 static int tcp_do_rfc3042 = 1;
123 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
124 &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)");
125
126 static int tcp_do_rfc3390 = 1;
127 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
128 &tcp_do_rfc3390, 0,
129 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
130
131 static int tcp_insecure_rst = 0;
132 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
133 &tcp_insecure_rst, 0,
134 "Follow the old (insecure) criteria for accepting RST packets");
135
136 int tcp_do_autorcvbuf = 1;
137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
138 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing");
139
140 int tcp_autorcvbuf_inc = 16*1024;
141 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
142 &tcp_autorcvbuf_inc, 0,
143 "Incrementor step size of automatic receive buffer");
144
145 int tcp_autorcvbuf_max = 256*1024;
146 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
147 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer");
148
149 struct inpcbhead tcb;
150 #define tcb6 tcb /* for KAME src sync over BSD*'s */
151 struct inpcbinfo tcbinfo;
152
153 static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
154 static void tcp_do_segment(struct mbuf *, struct tcphdr *,
155 struct socket *, struct tcpcb *, int, int);
156 static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
157 struct tcpcb *, int, int);
158 static void tcp_pulloutofband(struct socket *,
159 struct tcphdr *, struct mbuf *, int);
160 static void tcp_xmit_timer(struct tcpcb *, int);
161 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
162
163 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
164 #ifdef INET6
165 #define ND6_HINT(tp) \
166 do { \
167 if ((tp) && (tp)->t_inpcb && \
168 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
169 nd6_nud_hint(NULL, NULL, 0); \
170 } while (0)
171 #else
172 #define ND6_HINT(tp)
173 #endif
174
175 /*
176 * Indicate whether this ack should be delayed. We can delay the ack if
177 * - there is no delayed ack timer in progress and
178 * - our last ack wasn't a 0-sized window. We never want to delay
179 * the ack that opens up a 0-sized window and
180 * - delayed acks are enabled or
181 * - this is a half-synchronized T/TCP connection.
182 */
183 #define DELAY_ACK(tp) \
184 ((!tcp_timer_active(tp, TT_DELACK) && \
185 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
186 (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
187
188
189 /*
190 * TCP input handling is split into multiple parts:
191 * tcp6_input is a thin wrapper around tcp_input for the extended
192 * ip6_protox[] call format in ip6_input
193 * tcp_input handles primary segment validation, inpcb lookup and
194 * SYN processing on listen sockets
195 * tcp_do_segment processes the ACK and text of the segment for
196 * establishing, established and closing connections
197 */
198 #ifdef INET6
199 int
200 tcp6_input(struct mbuf **mp, int *offp, int proto)
201 {
202 struct mbuf *m = *mp;
203 struct in6_ifaddr *ia6;
204
205 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
206
207 /*
208 * draft-itojun-ipv6-tcp-to-anycast
209 * better place to put this in?
210 */
211 ia6 = ip6_getdstifaddr(m);
212 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
213 struct ip6_hdr *ip6;
214
215 ip6 = mtod(m, struct ip6_hdr *);
216 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
217 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
218 return IPPROTO_DONE;
219 }
220
221 tcp_input(m, *offp);
222 return IPPROTO_DONE;
223 }
224 #endif
225
226 void
227 tcp_input(struct mbuf *m, int off0)
228 {
229 struct tcphdr *th;
230 struct ip *ip = NULL;
231 struct ipovly *ipov;
232 struct inpcb *inp = NULL;
233 struct tcpcb *tp = NULL;
234 struct socket *so = NULL;
235 u_char *optp = NULL;
236 int optlen = 0;
237 int len, tlen, off;
238 int drop_hdrlen;
239 int thflags;
240 int rstreason = 0; /* For badport_bandlim accounting purposes */
241 #ifdef IPFIREWALL_FORWARD
242 struct m_tag *fwd_tag;
243 #endif
244 #ifdef INET6
245 struct ip6_hdr *ip6 = NULL;
246 int isipv6;
247 #else
248 const void *ip6 = NULL;
249 const int isipv6 = 0;
250 #endif
251 struct tcpopt to; /* options in this segment */
252 char *s = NULL; /* address and port logging */
253
254 #ifdef TCPDEBUG
255 /*
256 * The size of tcp_saveipgen must be the size of the max ip header,
257 * now IPv6.
258 */
259 u_char tcp_saveipgen[IP6_HDR_LEN];
260 struct tcphdr tcp_savetcp;
261 short ostate = 0;
262 #endif
263
264 #ifdef INET6
265 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
266 #endif
267
268 to.to_flags = 0;
269 tcpstat.tcps_rcvtotal++;
270
271 if (isipv6) {
272 #ifdef INET6
273 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
274 ip6 = mtod(m, struct ip6_hdr *);
275 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
276 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
277 tcpstat.tcps_rcvbadsum++;
278 goto drop;
279 }
280 th = (struct tcphdr *)((caddr_t)ip6 + off0);
281
282 /*
283 * Be proactive about unspecified IPv6 address in source.
284 * As we use all-zero to indicate unbounded/unconnected pcb,
285 * unspecified IPv6 address can be used to confuse us.
286 *
287 * Note that packets with unspecified IPv6 destination is
288 * already dropped in ip6_input.
289 */
290 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
291 /* XXX stat */
292 goto drop;
293 }
294 #else
295 th = NULL; /* XXX: Avoid compiler warning. */
296 #endif
297 } else {
298 /*
299 * Get IP and TCP header together in first mbuf.
300 * Note: IP leaves IP header in first mbuf.
301 */
302 if (off0 > sizeof (struct ip)) {
303 ip_stripoptions(m, (struct mbuf *)0);
304 off0 = sizeof(struct ip);
305 }
306 if (m->m_len < sizeof (struct tcpiphdr)) {
307 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
308 == NULL) {
309 tcpstat.tcps_rcvshort++;
310 return;
311 }
312 }
313 ip = mtod(m, struct ip *);
314 ipov = (struct ipovly *)ip;
315 th = (struct tcphdr *)((caddr_t)ip + off0);
316 tlen = ip->ip_len;
317
318 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
319 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
320 th->th_sum = m->m_pkthdr.csum_data;
321 else
322 th->th_sum = in_pseudo(ip->ip_src.s_addr,
323 ip->ip_dst.s_addr,
324 htonl(m->m_pkthdr.csum_data +
325 ip->ip_len +
326 IPPROTO_TCP));
327 th->th_sum ^= 0xffff;
328 #ifdef TCPDEBUG
329 ipov->ih_len = (u_short)tlen;
330 ipov->ih_len = htons(ipov->ih_len);
331 #endif
332 } else {
333 /*
334 * Checksum extended TCP header and data.
335 */
336 len = sizeof (struct ip) + tlen;
337 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
338 ipov->ih_len = (u_short)tlen;
339 ipov->ih_len = htons(ipov->ih_len);
340 th->th_sum = in_cksum(m, len);
341 }
342 if (th->th_sum) {
343 tcpstat.tcps_rcvbadsum++;
344 goto drop;
345 }
346 /* Re-initialization for later version check */
347 ip->ip_v = IPVERSION;
348 }
349
350 /*
351 * Check that TCP offset makes sense,
352 * pull out TCP options and adjust length. XXX
353 */
354 off = th->th_off << 2;
355 if (off < sizeof (struct tcphdr) || off > tlen) {
356 tcpstat.tcps_rcvbadoff++;
357 goto drop;
358 }
359 tlen -= off; /* tlen is used instead of ti->ti_len */
360 if (off > sizeof (struct tcphdr)) {
361 if (isipv6) {
362 #ifdef INET6
363 IP6_EXTHDR_CHECK(m, off0, off, );
364 ip6 = mtod(m, struct ip6_hdr *);
365 th = (struct tcphdr *)((caddr_t)ip6 + off0);
366 #endif
367 } else {
368 if (m->m_len < sizeof(struct ip) + off) {
369 if ((m = m_pullup(m, sizeof (struct ip) + off))
370 == NULL) {
371 tcpstat.tcps_rcvshort++;
372 return;
373 }
374 ip = mtod(m, struct ip *);
375 ipov = (struct ipovly *)ip;
376 th = (struct tcphdr *)((caddr_t)ip + off0);
377 }
378 }
379 optlen = off - sizeof (struct tcphdr);
380 optp = (u_char *)(th + 1);
381 }
382 thflags = th->th_flags;
383
384 /*
385 * Convert TCP protocol specific fields to host format.
386 */
387 th->th_seq = ntohl(th->th_seq);
388 th->th_ack = ntohl(th->th_ack);
389 th->th_win = ntohs(th->th_win);
390 th->th_urp = ntohs(th->th_urp);
391
392 /*
393 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
394 */
395 drop_hdrlen = off0 + off;
396
397 /*
398 * Locate pcb for segment.
399 */
400 INP_INFO_WLOCK(&tcbinfo);
401 findpcb:
402 INP_INFO_WLOCK_ASSERT(&tcbinfo);
403 #ifdef IPFIREWALL_FORWARD
404 /*
405 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
406 */
407 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
408
409 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */
410 struct sockaddr_in *next_hop;
411
412 next_hop = (struct sockaddr_in *)(fwd_tag+1);
413 /*
414 * Transparently forwarded. Pretend to be the destination.
415 * already got one like this?
416 */
417 inp = in_pcblookup_hash(&tcbinfo,
418 ip->ip_src, th->th_sport,
419 ip->ip_dst, th->th_dport,
420 0, m->m_pkthdr.rcvif);
421 if (!inp) {
422 /* It's new. Try to find the ambushing socket. */
423 inp = in_pcblookup_hash(&tcbinfo,
424 ip->ip_src, th->th_sport,
425 next_hop->sin_addr,
426 next_hop->sin_port ?
427 ntohs(next_hop->sin_port) :
428 th->th_dport,
429 INPLOOKUP_WILDCARD,
430 m->m_pkthdr.rcvif);
431 }
432 /* Remove the tag from the packet. We don't need it anymore. */
433 m_tag_delete(m, fwd_tag);
434 } else
435 #endif /* IPFIREWALL_FORWARD */
436 {
437 if (isipv6) {
438 #ifdef INET6
439 inp = in6_pcblookup_hash(&tcbinfo,
440 &ip6->ip6_src, th->th_sport,
441 &ip6->ip6_dst, th->th_dport,
442 INPLOOKUP_WILDCARD,
443 m->m_pkthdr.rcvif);
444 #endif
445 } else
446 inp = in_pcblookup_hash(&tcbinfo,
447 ip->ip_src, th->th_sport,
448 ip->ip_dst, th->th_dport,
449 INPLOOKUP_WILDCARD,
450 m->m_pkthdr.rcvif);
451 }
452
453 /*
454 * If the INPCB does not exist then all data in the incoming
455 * segment is discarded and an appropriate RST is sent back.
456 */
457 if (inp == NULL) {
458 /*
459 * Log communication attempts to ports that are not
460 * in use.
461 */
462 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
463 tcp_log_in_vain == 2) {
464 if ((s = tcp_log_addrs(NULL, th, (void *)ip, ip6)))
465 log(LOG_INFO, "%s; %s: Connection attempt "
466 "to closed port\n", s, __func__);
467 }
468 /*
469 * When blackholing do not respond with a RST but
470 * completely ignore the segment and drop it.
471 */
472 if ((blackhole == 1 && (thflags & TH_SYN)) ||
473 blackhole == 2)
474 goto dropunlock;
475
476 rstreason = BANDLIM_RST_CLOSEDPORT;
477 goto dropwithreset;
478 }
479 INP_LOCK(inp);
480
481 #ifdef IPSEC
482 #ifdef INET6
483 if (isipv6 && ipsec6_in_reject(m, inp)) {
484 ipsec6stat.in_polvio++;
485 goto dropunlock;
486 } else
487 #endif /* INET6 */
488 if (ipsec4_in_reject(m, inp) != 0) {
489 ipsec4stat.in_polvio++;
490 goto dropunlock;
491 }
492 #endif /* IPSEC */
493
494 /*
495 * Check the minimum TTL for socket.
496 */
497 if (inp->inp_ip_minttl != 0) {
498 #ifdef INET6
499 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
500 goto dropunlock;
501 else
502 #endif
503 if (inp->inp_ip_minttl > ip->ip_ttl)
504 goto dropunlock;
505 }
506
507 /*
508 * A previous connection in TIMEWAIT state is supposed to catch
509 * stray or duplicate segments arriving late. If this segment
510 * was a legitimate new connection attempt the old INPCB gets
511 * removed and we can try again to find a listening socket.
512 */
513 if (inp->inp_vflag & INP_TIMEWAIT) {
514 if (thflags & TH_SYN)
515 tcp_dooptions(&to, optp, optlen, TO_SYN);
516 /*
517 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
518 */
519 if (tcp_twcheck(inp, &to, th, m, tlen))
520 goto findpcb;
521 INP_INFO_WUNLOCK(&tcbinfo);
522 return;
523 }
524 /*
525 * The TCPCB may no longer exist if the connection is winding
526 * down or it is in the CLOSED state. Either way we drop the
527 * segment and send an appropriate response.
528 */
529 tp = intotcpcb(inp);
530 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
531 rstreason = BANDLIM_RST_CLOSEDPORT;
532 goto dropwithreset;
533 }
534
535 #ifdef MAC
536 INP_LOCK_ASSERT(inp);
537 if (mac_check_inpcb_deliver(inp, m))
538 goto dropunlock;
539 #endif
540 so = inp->inp_socket;
541 KASSERT(so != NULL, ("%s: so == NULL", __func__));
542 #ifdef TCPDEBUG
543 if (so->so_options & SO_DEBUG) {
544 ostate = tp->t_state;
545 if (isipv6) {
546 #ifdef INET6
547 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
548 #endif
549 } else
550 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
551 tcp_savetcp = *th;
552 }
553 #endif
554 /*
555 * When the socket is accepting connections (the INPCB is in LISTEN
556 * state) we look into the SYN cache if this is a new connection
557 * attempt or the completion of a previous one.
558 */
559 if (so->so_options & SO_ACCEPTCONN) {
560 struct in_conninfo inc;
561
562 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
563 "tp not listening", __func__));
564
565 bzero(&inc, sizeof(inc));
566 inc.inc_isipv6 = isipv6;
567 #ifdef INET6
568 if (isipv6) {
569 inc.inc6_faddr = ip6->ip6_src;
570 inc.inc6_laddr = ip6->ip6_dst;
571 } else
572 #endif
573 {
574 inc.inc_faddr = ip->ip_src;
575 inc.inc_laddr = ip->ip_dst;
576 }
577 inc.inc_fport = th->th_sport;
578 inc.inc_lport = th->th_dport;
579
580 /*
581 * Check for an existing connection attempt in syncache if
582 * the flag is only ACK. A successful lookup creates a new
583 * socket appended to the listen queue in SYN_RECEIVED state.
584 */
585 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
586 /*
587 * Parse the TCP options here because
588 * syncookies need access to the reflected
589 * timestamp.
590 */
591 tcp_dooptions(&to, optp, optlen, 0);
592 /*
593 * NB: syncache_expand() doesn't unlock
594 * inp and tcpinfo locks.
595 */
596 if (!syncache_expand(&inc, &to, th, &so, m)) {
597 /*
598 * No syncache entry or ACK was not
599 * for our SYN/ACK. Send a RST.
600 * NB: syncache did its own logging
601 * of the failure cause.
602 */
603 rstreason = BANDLIM_RST_OPENPORT;
604 goto dropwithreset;
605 }
606 if (so == NULL) {
607 /*
608 * We completed the 3-way handshake
609 * but could not allocate a socket
610 * either due to memory shortage,
611 * listen queue length limits or
612 * global socket limits. Send RST
613 * or wait and have the remote end
614 * retransmit the ACK for another
615 * try.
616 */
617 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
618 log(LOG_DEBUG, "%s; %s: Listen socket: "
619 "Socket allocation failed due to "
620 "limits or memory shortage, %s\n",
621 s, __func__, (tcp_sc_rst_sock_fail ?
622 "sending RST" : "try again"));
623 if (tcp_sc_rst_sock_fail) {
624 rstreason = BANDLIM_UNLIMITED;
625 goto dropwithreset;
626 } else
627 goto dropunlock;
628 }
629 /*
630 * Socket is created in state SYN_RECEIVED.
631 * Unlock the listen socket, lock the newly
632 * created socket and update the tp variable.
633 */
634 INP_UNLOCK(inp); /* listen socket */
635 inp = sotoinpcb(so);
636 INP_LOCK(inp); /* new connection */
637 tp = intotcpcb(inp);
638 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
639 ("%s: ", __func__));
640 /*
641 * Process the segment and the data it
642 * contains. tcp_do_segment() consumes
643 * the mbuf chain and unlocks the inpcb.
644 */
645 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen);
646 INP_INFO_UNLOCK_ASSERT(&tcbinfo);
647 return;
648 }
649 /*
650 * Segment flag validation for new connection attempts:
651 *
652 * Our (SYN|ACK) response was rejected.
653 * Check with syncache and remove entry to prevent
654 * retransmits.
655 *
656 * NB: syncache_chkrst does its own logging of failure
657 * causes.
658 */
659 if (thflags & TH_RST) {
660 syncache_chkrst(&inc, th);
661 goto dropunlock;
662 }
663 /*
664 * We can't do anything without SYN.
665 */
666 if ((thflags & TH_SYN) == 0) {
667 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
668 log(LOG_DEBUG, "%s; %s: Listen socket: "
669 "SYN is missing, segment ignored\n",
670 s, __func__);
671 tcpstat.tcps_badsyn++;
672 goto dropunlock;
673 }
674 /*
675 * (SYN|ACK) is bogus on a listen socket.
676 */
677 if (thflags & TH_ACK) {
678 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
679 log(LOG_DEBUG, "%s; %s: Listen socket: "
680 "SYN|ACK invalid, segment rejected\n",
681 s, __func__);
682 syncache_badack(&inc); /* XXX: Not needed! */
683 tcpstat.tcps_badsyn++;
684 rstreason = BANDLIM_RST_OPENPORT;
685 goto dropwithreset;
686 }
687 /*
688 * If the drop_synfin option is enabled, drop all
689 * segments with both the SYN and FIN bits set.
690 * This prevents e.g. nmap from identifying the
691 * TCP/IP stack.
692 * XXX: Poor reasoning. nmap has other methods
693 * and is constantly refining its stack detection
694 * strategies.
695 * XXX: This is a violation of the TCP specification
696 * and was used by RFC1644.
697 */
698 if ((thflags & TH_FIN) && drop_synfin) {
699 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
700 log(LOG_DEBUG, "%s; %s: Listen socket: "
701 "SYN|FIN segment ignored (based on "
702 "sysctl setting)\n", s, __func__);
703 tcpstat.tcps_badsyn++;
704 goto dropunlock;
705 }
706 /*
707 * Segment's flags are (SYN) or (SYN|FIN).
708 *
709 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
710 * as they do not affect the state of the TCP FSM.
711 * The data pointed to by TH_URG and th_urp is ignored.
712 */
713 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
714 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
715 KASSERT(thflags & (TH_SYN),
716 ("%s: Listen socket: TH_SYN not set", __func__));
717 #ifdef INET6
718 /*
719 * If deprecated address is forbidden,
720 * we do not accept SYN to deprecated interface
721 * address to prevent any new inbound connection from
722 * getting established.
723 * When we do not accept SYN, we send a TCP RST,
724 * with deprecated source address (instead of dropping
725 * it). We compromise it as it is much better for peer
726 * to send a RST, and RST will be the final packet
727 * for the exchange.
728 *
729 * If we do not forbid deprecated addresses, we accept
730 * the SYN packet. RFC2462 does not suggest dropping
731 * SYN in this case.
732 * If we decipher RFC2462 5.5.4, it says like this:
733 * 1. use of deprecated addr with existing
734 * communication is okay - "SHOULD continue to be
735 * used"
736 * 2. use of it with new communication:
737 * (2a) "SHOULD NOT be used if alternate address
738 * with sufficient scope is available"
739 * (2b) nothing mentioned otherwise.
740 * Here we fall into (2b) case as we have no choice in
741 * our source address selection - we must obey the peer.
742 *
743 * The wording in RFC2462 is confusing, and there are
744 * multiple description text for deprecated address
745 * handling - worse, they are not exactly the same.
746 * I believe 5.5.4 is the best one, so we follow 5.5.4.
747 */
748 if (isipv6 && !ip6_use_deprecated) {
749 struct in6_ifaddr *ia6;
750
751 if ((ia6 = ip6_getdstifaddr(m)) &&
752 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
753 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
754 log(LOG_DEBUG, "%s; %s: Listen socket: "
755 "Connection attempt to deprecated "
756 "IPv6 address rejected\n",
757 s, __func__);
758 rstreason = BANDLIM_RST_OPENPORT;
759 goto dropwithreset;
760 }
761 }
762 #endif
763 /*
764 * Basic sanity checks on incoming SYN requests:
765 * Don't respond if the destination is a link layer
766 * broadcast according to RFC1122 4.2.3.10, p. 104.
767 * If it is from this socket it must be forged.
768 * Don't respond if the source or destination is a
769 * global or subnet broad- or multicast address.
770 * Note that it is quite possible to receive unicast
771 * link-layer packets with a broadcast IP address. Use
772 * in_broadcast() to find them.
773 */
774 if (m->m_flags & (M_BCAST|M_MCAST)) {
775 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
776 log(LOG_DEBUG, "%s; %s: Listen socket: "
777 "Connection attempt from broad- or multicast "
778 "link layer address ignored\n", s, __func__);
779 goto dropunlock;
780 }
781 if (isipv6) {
782 #ifdef INET6
783 if (th->th_dport == th->th_sport &&
784 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
785 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
786 log(LOG_DEBUG, "%s; %s: Listen socket: "
787 "Connection attempt to/from self "
788 "ignored\n", s, __func__);
789 goto dropunlock;
790 }
791 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
792 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
793 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
794 log(LOG_DEBUG, "%s; %s: Listen socket: "
795 "Connection attempt from/to multicast "
796 "address ignored\n", s, __func__);
797 goto dropunlock;
798 }
799 #endif
800 } else {
801 if (th->th_dport == th->th_sport &&
802 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
803 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
804 log(LOG_DEBUG, "%s; %s: Listen socket: "
805 "Connection attempt from/to self "
806 "ignored\n", s, __func__);
807 goto dropunlock;
808 }
809 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
810 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
811 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
812 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
813 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
814 log(LOG_DEBUG, "%s; %s: Listen socket: "
815 "Connection attempt from/to broad- "
816 "or multicast address ignored\n",
817 s, __func__);
818 goto dropunlock;
819 }
820 }
821 /*
822 * SYN appears to be valid. Create compressed TCP state
823 * for syncache.
824 */
825 #ifdef TCPDEBUG
826 if (so->so_options & SO_DEBUG)
827 tcp_trace(TA_INPUT, ostate, tp,
828 (void *)tcp_saveipgen, &tcp_savetcp, 0);
829 #endif
830 tcp_dooptions(&to, optp, optlen, TO_SYN);
831 syncache_add(&inc, &to, th, inp, &so, m);
832 /*
833 * Entry added to syncache and mbuf consumed.
834 * Everything already unlocked by syncache_add().
835 */
836 INP_INFO_UNLOCK_ASSERT(&tcbinfo);
837 return;
838 }
839
840 /*
841 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
842 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
843 * the inpcb, and unlocks pcbinfo.
844 */
845 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen);
846 INP_INFO_UNLOCK_ASSERT(&tcbinfo);
847 return;
848
849 dropwithreset:
850 INP_INFO_WLOCK_ASSERT(&tcbinfo);
851 tcp_dropwithreset(m, th, tp, tlen, rstreason);
852 m = NULL; /* mbuf chain got consumed. */
853 dropunlock:
854 INP_INFO_WLOCK_ASSERT(&tcbinfo);
855 if (inp != NULL)
856 INP_UNLOCK(inp);
857 INP_INFO_WUNLOCK(&tcbinfo);
858 drop:
859 INP_INFO_UNLOCK_ASSERT(&tcbinfo);
860 if (s != NULL)
861 free(s, M_TCPLOG);
862 if (m != NULL)
863 m_freem(m);
864 return;
865 }
866
867 static void
868 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
869 struct tcpcb *tp, int drop_hdrlen, int tlen)
870 {
871 int thflags, acked, ourfinisacked, needoutput = 0;
872 int headlocked = 1;
873 int rstreason, todrop, win;
874 u_long tiwin;
875 struct tcpopt to;
876
877 #ifdef TCPDEBUG
878 /*
879 * The size of tcp_saveipgen must be the size of the max ip header,
880 * now IPv6.
881 */
882 u_char tcp_saveipgen[IP6_HDR_LEN];
883 struct tcphdr tcp_savetcp;
884 short ostate = 0;
885 #endif
886 thflags = th->th_flags;
887
888 INP_INFO_WLOCK_ASSERT(&tcbinfo);
889 INP_LOCK_ASSERT(tp->t_inpcb);
890 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
891 __func__));
892 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
893 __func__));
894
895 /*
896 * Segment received on connection.
897 * Reset idle time and keep-alive timer.
898 * XXX: This should be done after segment
899 * validation to ignore broken/spoofed segs.
900 */
901 tp->t_rcvtime = ticks;
902 if (TCPS_HAVEESTABLISHED(tp->t_state))
903 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
904
905 /*
906 * Unscale the window into a 32-bit value.
907 * For the SYN_SENT state the scale is zero.
908 */
909 tiwin = th->th_win << tp->snd_scale;
910
911 /*
912 * Parse options on any incoming segment.
913 */
914 tcp_dooptions(&to, (u_char *)(th + 1),
915 (th->th_off << 2) - sizeof(struct tcphdr),
916 (thflags & TH_SYN) ? TO_SYN : 0);
917
918 /*
919 * If echoed timestamp is later than the current time,
920 * fall back to non RFC1323 RTT calculation. Normalize
921 * timestamp if syncookies were used when this connection
922 * was established.
923 */
924 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
925 to.to_tsecr -= tp->ts_offset;
926 if (TSTMP_GT(to.to_tsecr, ticks))
927 to.to_tsecr = 0;
928 }
929
930 /*
931 * Process options only when we get SYN/ACK back. The SYN case
932 * for incoming connections is handled in tcp_syncache.
933 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
934 * or <SYN,ACK>) segment itself is never scaled.
935 * XXX this is traditional behavior, may need to be cleaned up.
936 */
937 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
938 if ((to.to_flags & TOF_SCALE) &&
939 (tp->t_flags & TF_REQ_SCALE)) {
940 tp->t_flags |= TF_RCVD_SCALE;
941 tp->snd_scale = to.to_wscale;
942 }
943 /*
944 * Initial send window. It will be updated with
945 * the next incoming segment to the scaled value.
946 */
947 tp->snd_wnd = th->th_win;
948 if (to.to_flags & TOF_TS) {
949 tp->t_flags |= TF_RCVD_TSTMP;
950 tp->ts_recent = to.to_tsval;
951 tp->ts_recent_age = ticks;
952 }
953 if (to.to_flags & TOF_MSS)
954 tcp_mss(tp, to.to_mss);
955 if ((tp->t_flags & TF_SACK_PERMIT) &&
956 (to.to_flags & TOF_SACKPERM) == 0)
957 tp->t_flags &= ~TF_SACK_PERMIT;
958 }
959
960 /*
961 * Header prediction: check for the two common cases
962 * of a uni-directional data xfer. If the packet has
963 * no control flags, is in-sequence, the window didn't
964 * change and we're not retransmitting, it's a
965 * candidate. If the length is zero and the ack moved
966 * forward, we're the sender side of the xfer. Just
967 * free the data acked & wake any higher level process
968 * that was blocked waiting for space. If the length
969 * is non-zero and the ack didn't move, we're the
970 * receiver side. If we're getting packets in-order
971 * (the reassembly queue is empty), add the data to
972 * the socket buffer and note that we need a delayed ack.
973 * Make sure that the hidden state-flags are also off.
974 * Since we check for TCPS_ESTABLISHED first, it can only
975 * be TH_NEEDSYN.
976 */
977 if (tp->t_state == TCPS_ESTABLISHED &&
978 th->th_seq == tp->rcv_nxt &&
979 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
980 tp->snd_nxt == tp->snd_max &&
981 tiwin && tiwin == tp->snd_wnd &&
982 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
983 LIST_EMPTY(&tp->t_segq) &&
984 ((to.to_flags & TOF_TS) == 0 ||
985 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
986
987 /*
988 * If last ACK falls within this segment's sequence numbers,
989 * record the timestamp.
990 * NOTE that the test is modified according to the latest
991 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
992 */
993 if ((to.to_flags & TOF_TS) != 0 &&
994 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
995 tp->ts_recent_age = ticks;
996 tp->ts_recent = to.to_tsval;
997 }
998
999 if (tlen == 0) {
1000 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1001 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1002 tp->snd_cwnd >= tp->snd_wnd &&
1003 ((!tcp_do_newreno &&
1004 !(tp->t_flags & TF_SACK_PERMIT) &&
1005 tp->t_dupacks < tcprexmtthresh) ||
1006 ((tcp_do_newreno ||
1007 (tp->t_flags & TF_SACK_PERMIT)) &&
1008 !IN_FASTRECOVERY(tp) &&
1009 (to.to_flags & TOF_SACK) == 0 &&
1010 TAILQ_EMPTY(&tp->snd_holes)))) {
1011 KASSERT(headlocked,
1012 ("%s: headlocked", __func__));
1013 INP_INFO_WUNLOCK(&tcbinfo);
1014 headlocked = 0;
1015 /*
1016 * This is a pure ack for outstanding data.
1017 */
1018 ++tcpstat.tcps_predack;
1019 /*
1020 * "bad retransmit" recovery.
1021 */
1022 if (tp->t_rxtshift == 1 &&
1023 ticks < tp->t_badrxtwin) {
1024 ++tcpstat.tcps_sndrexmitbad;
1025 tp->snd_cwnd = tp->snd_cwnd_prev;
1026 tp->snd_ssthresh =
1027 tp->snd_ssthresh_prev;
1028 tp->snd_recover = tp->snd_recover_prev;
1029 if (tp->t_flags & TF_WASFRECOVERY)
1030 ENTER_FASTRECOVERY(tp);
1031 tp->snd_nxt = tp->snd_max;
1032 tp->t_badrxtwin = 0;
1033 }
1034
1035 /*
1036 * Recalculate the transmit timer / rtt.
1037 *
1038 * Some boxes send broken timestamp replies
1039 * during the SYN+ACK phase, ignore
1040 * timestamps of 0 or we could calculate a
1041 * huge RTT and blow up the retransmit timer.
1042 */
1043 if ((to.to_flags & TOF_TS) != 0 &&
1044 to.to_tsecr) {
1045 if (!tp->t_rttlow ||
1046 tp->t_rttlow > ticks - to.to_tsecr)
1047 tp->t_rttlow = ticks - to.to_tsecr;
1048 tcp_xmit_timer(tp,
1049 ticks - to.to_tsecr + 1);
1050 } else if (tp->t_rtttime &&
1051 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1052 if (!tp->t_rttlow ||
1053 tp->t_rttlow > ticks - tp->t_rtttime)
1054 tp->t_rttlow = ticks - tp->t_rtttime;
1055 tcp_xmit_timer(tp,
1056 ticks - tp->t_rtttime);
1057 }
1058 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1059 acked = th->th_ack - tp->snd_una;
1060 tcpstat.tcps_rcvackpack++;
1061 tcpstat.tcps_rcvackbyte += acked;
1062 sbdrop(&so->so_snd, acked);
1063 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1064 SEQ_LEQ(th->th_ack, tp->snd_recover))
1065 tp->snd_recover = th->th_ack - 1;
1066 tp->snd_una = th->th_ack;
1067 /*
1068 * Pull snd_wl2 up to prevent seq wrap relative
1069 * to th_ack.
1070 */
1071 tp->snd_wl2 = th->th_ack;
1072 tp->t_dupacks = 0;
1073 m_freem(m);
1074 ND6_HINT(tp); /* Some progress has been made. */
1075
1076 /*
1077 * If all outstanding data are acked, stop
1078 * retransmit timer, otherwise restart timer
1079 * using current (possibly backed-off) value.
1080 * If process is waiting for space,
1081 * wakeup/selwakeup/signal. If data
1082 * are ready to send, let tcp_output
1083 * decide between more output or persist.
1084 */
1085 #ifdef TCPDEBUG
1086 if (so->so_options & SO_DEBUG)
1087 tcp_trace(TA_INPUT, ostate, tp,
1088 (void *)tcp_saveipgen,
1089 &tcp_savetcp, 0);
1090 #endif
1091 if (tp->snd_una == tp->snd_max)
1092 tcp_timer_activate(tp, TT_REXMT, 0);
1093 else if (!tcp_timer_active(tp, TT_PERSIST))
1094 tcp_timer_activate(tp, TT_REXMT,
1095 tp->t_rxtcur);
1096 sowwakeup(so);
1097 if (so->so_snd.sb_cc)
1098 (void) tcp_output(tp);
1099 goto check_delack;
1100 }
1101 } else if (th->th_ack == tp->snd_una &&
1102 tlen <= sbspace(&so->so_rcv)) {
1103 int newsize = 0; /* automatic sockbuf scaling */
1104
1105 KASSERT(headlocked, ("%s: headlocked", __func__));
1106 INP_INFO_WUNLOCK(&tcbinfo);
1107 headlocked = 0;
1108 /*
1109 * This is a pure, in-sequence data packet
1110 * with nothing on the reassembly queue and
1111 * we have enough buffer space to take it.
1112 */
1113 /* Clean receiver SACK report if present */
1114 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1115 tcp_clean_sackreport(tp);
1116 ++tcpstat.tcps_preddat;
1117 tp->rcv_nxt += tlen;
1118 /*
1119 * Pull snd_wl1 up to prevent seq wrap relative to
1120 * th_seq.
1121 */
1122 tp->snd_wl1 = th->th_seq;
1123 /*
1124 * Pull rcv_up up to prevent seq wrap relative to
1125 * rcv_nxt.
1126 */
1127 tp->rcv_up = tp->rcv_nxt;
1128 tcpstat.tcps_rcvpack++;
1129 tcpstat.tcps_rcvbyte += tlen;
1130 ND6_HINT(tp); /* Some progress has been made */
1131 #ifdef TCPDEBUG
1132 if (so->so_options & SO_DEBUG)
1133 tcp_trace(TA_INPUT, ostate, tp,
1134 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1135 #endif
1136 /*
1137 * Automatic sizing of receive socket buffer. Often the send
1138 * buffer size is not optimally adjusted to the actual network
1139 * conditions at hand (delay bandwidth product). Setting the
1140 * buffer size too small limits throughput on links with high
1141 * bandwidth and high delay (eg. trans-continental/oceanic links).
1142 *
1143 * On the receive side the socket buffer memory is only rarely
1144 * used to any significant extent. This allows us to be much
1145 * more aggressive in scaling the receive socket buffer. For
1146 * the case that the buffer space is actually used to a large
1147 * extent and we run out of kernel memory we can simply drop
1148 * the new segments; TCP on the sender will just retransmit it
1149 * later. Setting the buffer size too big may only consume too
1150 * much kernel memory if the application doesn't read() from
1151 * the socket or packet loss or reordering makes use of the
1152 * reassembly queue.
1153 *
1154 * The criteria to step up the receive buffer one notch are:
1155 * 1. the number of bytes received during the time it takes
1156 * one timestamp to be reflected back to us (the RTT);
1157 * 2. received bytes per RTT is within seven eighth of the
1158 * current socket buffer size;
1159 * 3. receive buffer size has not hit maximal automatic size;
1160 *
1161 * This algorithm does one step per RTT at most and only if
1162 * we receive a bulk stream w/o packet losses or reorderings.
1163 * Shrinking the buffer during idle times is not necessary as
1164 * it doesn't consume any memory when idle.
1165 *
1166 * TODO: Only step up if the application is actually serving
1167 * the buffer to better manage the socket buffer resources.
1168 */
1169 if (tcp_do_autorcvbuf &&
1170 to.to_tsecr &&
1171 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1172 if (to.to_tsecr > tp->rfbuf_ts &&
1173 to.to_tsecr - tp->rfbuf_ts < hz) {
1174 if (tp->rfbuf_cnt >
1175 (so->so_rcv.sb_hiwat / 8 * 7) &&
1176 so->so_rcv.sb_hiwat <
1177 tcp_autorcvbuf_max) {
1178 newsize =
1179 min(so->so_rcv.sb_hiwat +
1180 tcp_autorcvbuf_inc,
1181 tcp_autorcvbuf_max);
1182 }
1183 /* Start over with next RTT. */
1184 tp->rfbuf_ts = 0;
1185 tp->rfbuf_cnt = 0;
1186 } else
1187 tp->rfbuf_cnt += tlen; /* add up */
1188 }
1189
1190 /* Add data to socket buffer. */
1191 SOCKBUF_LOCK(&so->so_rcv);
1192 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1193 m_freem(m);
1194 } else {
1195 /*
1196 * Set new socket buffer size.
1197 * Give up when limit is reached.
1198 */
1199 if (newsize)
1200 if (!sbreserve_locked(&so->so_rcv,
1201 newsize, so, curthread))
1202 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1203 m_adj(m, drop_hdrlen); /* delayed header drop */
1204 sbappendstream_locked(&so->so_rcv, m);
1205 }
1206 /* NB: sorwakeup_locked() does an implicit unlock. */
1207 sorwakeup_locked(so);
1208 if (DELAY_ACK(tp)) {
1209 tp->t_flags |= TF_DELACK;
1210 } else {
1211 tp->t_flags |= TF_ACKNOW;
1212 tcp_output(tp);
1213 }
1214 goto check_delack;
1215 }
1216 }
1217
1218 /*
1219 * Calculate amount of space in receive window,
1220 * and then do TCP input processing.
1221 * Receive window is amount of space in rcv queue,
1222 * but not less than advertised window.
1223 */
1224 win = sbspace(&so->so_rcv);
1225 if (win < 0)
1226 win = 0;
1227 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1228
1229 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1230 tp->rfbuf_ts = 0;
1231 tp->rfbuf_cnt = 0;
1232
1233 switch (tp->t_state) {
1234
1235 /*
1236 * If the state is SYN_RECEIVED:
1237 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1238 */
1239 case TCPS_SYN_RECEIVED:
1240 if ((thflags & TH_ACK) &&
1241 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1242 SEQ_GT(th->th_ack, tp->snd_max))) {
1243 rstreason = BANDLIM_RST_OPENPORT;
1244 goto dropwithreset;
1245 }
1246 break;
1247
1248 /*
1249 * If the state is SYN_SENT:
1250 * if seg contains an ACK, but not for our SYN, drop the input.
1251 * if seg contains a RST, then drop the connection.
1252 * if seg does not contain SYN, then drop it.
1253 * Otherwise this is an acceptable SYN segment
1254 * initialize tp->rcv_nxt and tp->irs
1255 * if seg contains ack then advance tp->snd_una
1256 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1257 * arrange for segment to be acked (eventually)
1258 * continue processing rest of data/controls, beginning with URG
1259 */
1260 case TCPS_SYN_SENT:
1261 if ((thflags & TH_ACK) &&
1262 (SEQ_LEQ(th->th_ack, tp->iss) ||
1263 SEQ_GT(th->th_ack, tp->snd_max))) {
1264 rstreason = BANDLIM_UNLIMITED;
1265 goto dropwithreset;
1266 }
1267 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST))
1268 tp = tcp_drop(tp, ECONNREFUSED);
1269 if (thflags & TH_RST)
1270 goto drop;
1271 if (!(thflags & TH_SYN))
1272 goto drop;
1273
1274 tp->irs = th->th_seq;
1275 tcp_rcvseqinit(tp);
1276 if (thflags & TH_ACK) {
1277 tcpstat.tcps_connects++;
1278 soisconnected(so);
1279 #ifdef MAC
1280 SOCK_LOCK(so);
1281 mac_set_socket_peer_from_mbuf(m, so);
1282 SOCK_UNLOCK(so);
1283 #endif
1284 /* Do window scaling on this connection? */
1285 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1286 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1287 tp->rcv_scale = tp->request_r_scale;
1288 }
1289 tp->rcv_adv += tp->rcv_wnd;
1290 tp->snd_una++; /* SYN is acked */
1291 /*
1292 * If there's data, delay ACK; if there's also a FIN
1293 * ACKNOW will be turned on later.
1294 */
1295 if (DELAY_ACK(tp) && tlen != 0)
1296 tcp_timer_activate(tp, TT_DELACK,
1297 tcp_delacktime);
1298 else
1299 tp->t_flags |= TF_ACKNOW;
1300 /*
1301 * Received <SYN,ACK> in SYN_SENT[*] state.
1302 * Transitions:
1303 * SYN_SENT --> ESTABLISHED
1304 * SYN_SENT* --> FIN_WAIT_1
1305 */
1306 tp->t_starttime = ticks;
1307 if (tp->t_flags & TF_NEEDFIN) {
1308 tp->t_state = TCPS_FIN_WAIT_1;
1309 tp->t_flags &= ~TF_NEEDFIN;
1310 thflags &= ~TH_SYN;
1311 } else {
1312 tp->t_state = TCPS_ESTABLISHED;
1313 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
1314 }
1315 } else {
1316 /*
1317 * Received initial SYN in SYN-SENT[*] state =>
1318 * simultaneous open. If segment contains CC option
1319 * and there is a cached CC, apply TAO test.
1320 * If it succeeds, connection is * half-synchronized.
1321 * Otherwise, do 3-way handshake:
1322 * SYN-SENT -> SYN-RECEIVED
1323 * SYN-SENT* -> SYN-RECEIVED*
1324 * If there was no CC option, clear cached CC value.
1325 */
1326 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1327 tcp_timer_activate(tp, TT_REXMT, 0);
1328 tp->t_state = TCPS_SYN_RECEIVED;
1329 }
1330
1331 KASSERT(headlocked, ("%s: trimthenstep6: head not locked",
1332 __func__));
1333 INP_LOCK_ASSERT(tp->t_inpcb);
1334
1335 /*
1336 * Advance th->th_seq to correspond to first data byte.
1337 * If data, trim to stay within window,
1338 * dropping FIN if necessary.
1339 */
1340 th->th_seq++;
1341 if (tlen > tp->rcv_wnd) {
1342 todrop = tlen - tp->rcv_wnd;
1343 m_adj(m, -todrop);
1344 tlen = tp->rcv_wnd;
1345 thflags &= ~TH_FIN;
1346 tcpstat.tcps_rcvpackafterwin++;
1347 tcpstat.tcps_rcvbyteafterwin += todrop;
1348 }
1349 tp->snd_wl1 = th->th_seq - 1;
1350 tp->rcv_up = th->th_seq;
1351 /*
1352 * Client side of transaction: already sent SYN and data.
1353 * If the remote host used T/TCP to validate the SYN,
1354 * our data will be ACK'd; if so, enter normal data segment
1355 * processing in the middle of step 5, ack processing.
1356 * Otherwise, goto step 6.
1357 */
1358 if (thflags & TH_ACK)
1359 goto process_ACK;
1360
1361 goto step6;
1362
1363 /*
1364 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1365 * do normal processing.
1366 *
1367 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
1368 */
1369 case TCPS_LAST_ACK:
1370 case TCPS_CLOSING:
1371 break; /* continue normal processing */
1372 }
1373
1374 /*
1375 * States other than LISTEN or SYN_SENT.
1376 * First check the RST flag and sequence number since reset segments
1377 * are exempt from the timestamp and connection count tests. This
1378 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1379 * below which allowed reset segments in half the sequence space
1380 * to fall though and be processed (which gives forged reset
1381 * segments with a random sequence number a 50 percent chance of
1382 * killing a connection).
1383 * Then check timestamp, if present.
1384 * Then check the connection count, if present.
1385 * Then check that at least some bytes of segment are within
1386 * receive window. If segment begins before rcv_nxt,
1387 * drop leading data (and SYN); if nothing left, just ack.
1388 *
1389 *
1390 * If the RST bit is set, check the sequence number to see
1391 * if this is a valid reset segment.
1392 * RFC 793 page 37:
1393 * In all states except SYN-SENT, all reset (RST) segments
1394 * are validated by checking their SEQ-fields. A reset is
1395 * valid if its sequence number is in the window.
1396 * Note: this does not take into account delayed ACKs, so
1397 * we should test against last_ack_sent instead of rcv_nxt.
1398 * The sequence number in the reset segment is normally an
1399 * echo of our outgoing acknowlegement numbers, but some hosts
1400 * send a reset with the sequence number at the rightmost edge
1401 * of our receive window, and we have to handle this case.
1402 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
1403 * that brute force RST attacks are possible. To combat this,
1404 * we use a much stricter check while in the ESTABLISHED state,
1405 * only accepting RSTs where the sequence number is equal to
1406 * last_ack_sent. In all other states (the states in which a
1407 * RST is more likely), the more permissive check is used.
1408 * If we have multiple segments in flight, the intial reset
1409 * segment sequence numbers will be to the left of last_ack_sent,
1410 * but they will eventually catch up.
1411 * In any case, it never made sense to trim reset segments to
1412 * fit the receive window since RFC 1122 says:
1413 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1414 *
1415 * A TCP SHOULD allow a received RST segment to include data.
1416 *
1417 * DISCUSSION
1418 * It has been suggested that a RST segment could contain
1419 * ASCII text that encoded and explained the cause of the
1420 * RST. No standard has yet been established for such
1421 * data.
1422 *
1423 * If the reset segment passes the sequence number test examine
1424 * the state:
1425 * SYN_RECEIVED STATE:
1426 * If passive open, return to LISTEN state.
1427 * If active open, inform user that connection was refused.
1428 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1429 * Inform user that connection was reset, and close tcb.
1430 * CLOSING, LAST_ACK STATES:
1431 * Close the tcb.
1432 * TIME_WAIT STATE:
1433 * Drop the segment - see Stevens, vol. 2, p. 964 and
1434 * RFC 1337.
1435 */
1436 if (thflags & TH_RST) {
1437 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
1438 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1439 switch (tp->t_state) {
1440
1441 case TCPS_SYN_RECEIVED:
1442 so->so_error = ECONNREFUSED;
1443 goto close;
1444
1445 case TCPS_ESTABLISHED:
1446 if (tcp_insecure_rst == 0 &&
1447 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
1448 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
1449 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
1450 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
1451 tcpstat.tcps_badrst++;
1452 goto drop;
1453 }
1454 /* FALLTHROUGH */
1455 case TCPS_FIN_WAIT_1:
1456 case TCPS_FIN_WAIT_2:
1457 case TCPS_CLOSE_WAIT:
1458 so->so_error = ECONNRESET;
1459 close:
1460 tp->t_state = TCPS_CLOSED;
1461 tcpstat.tcps_drops++;
1462 KASSERT(headlocked, ("%s: trimthenstep6: "
1463 "tcp_close: head not locked", __func__));
1464 tp = tcp_close(tp);
1465 break;
1466
1467 case TCPS_CLOSING:
1468 case TCPS_LAST_ACK:
1469 KASSERT(headlocked, ("%s: trimthenstep6: "
1470 "tcp_close.2: head not locked", __func__));
1471 tp = tcp_close(tp);
1472 break;
1473 }
1474 }
1475 goto drop;
1476 }
1477
1478 /*
1479 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1480 * and it's less than ts_recent, drop it.
1481 */
1482 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
1483 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
1484
1485 /* Check to see if ts_recent is over 24 days old. */
1486 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1487 /*
1488 * Invalidate ts_recent. If this segment updates
1489 * ts_recent, the age will be reset later and ts_recent
1490 * will get a valid value. If it does not, setting
1491 * ts_recent to zero will at least satisfy the
1492 * requirement that zero be placed in the timestamp
1493 * echo reply when ts_recent isn't valid. The
1494 * age isn't reset until we get a valid ts_recent
1495 * because we don't want out-of-order segments to be
1496 * dropped when ts_recent is old.
1497 */
1498 tp->ts_recent = 0;
1499 } else {
1500 tcpstat.tcps_rcvduppack++;
1501 tcpstat.tcps_rcvdupbyte += tlen;
1502 tcpstat.tcps_pawsdrop++;
1503 if (tlen)
1504 goto dropafterack;
1505 goto drop;
1506 }
1507 }
1508
1509 /*
1510 * In the SYN-RECEIVED state, validate that the packet belongs to
1511 * this connection before trimming the data to fit the receive
1512 * window. Check the sequence number versus IRS since we know
1513 * the sequence numbers haven't wrapped. This is a partial fix
1514 * for the "LAND" DoS attack.
1515 */
1516 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
1517 rstreason = BANDLIM_RST_OPENPORT;
1518 goto dropwithreset;
1519 }
1520
1521 todrop = tp->rcv_nxt - th->th_seq;
1522 if (todrop > 0) {
1523 if (thflags & TH_SYN) {
1524 thflags &= ~TH_SYN;
1525 th->th_seq++;
1526 if (th->th_urp > 1)
1527 th->th_urp--;
1528 else
1529 thflags &= ~TH_URG;
1530 todrop--;
1531 }
1532 /*
1533 * Following if statement from Stevens, vol. 2, p. 960.
1534 */
1535 if (todrop > tlen
1536 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
1537 /*
1538 * Any valid FIN must be to the left of the window.
1539 * At this point the FIN must be a duplicate or out
1540 * of sequence; drop it.
1541 */
1542 thflags &= ~TH_FIN;
1543
1544 /*
1545 * Send an ACK to resynchronize and drop any data.
1546 * But keep on processing for RST or ACK.
1547 */
1548 tp->t_flags |= TF_ACKNOW;
1549 todrop = tlen;
1550 tcpstat.tcps_rcvduppack++;
1551 tcpstat.tcps_rcvdupbyte += todrop;
1552 } else {
1553 tcpstat.tcps_rcvpartduppack++;
1554 tcpstat.tcps_rcvpartdupbyte += todrop;
1555 }
1556 drop_hdrlen += todrop; /* drop from the top afterwards */
1557 th->th_seq += todrop;
1558 tlen -= todrop;
1559 if (th->th_urp > todrop)
1560 th->th_urp -= todrop;
1561 else {
1562 thflags &= ~TH_URG;
1563 th->th_urp = 0;
1564 }
1565 }
1566
1567 /*
1568 * If new data are received on a connection after the
1569 * user processes are gone, then RST the other end.
1570 */
1571 if ((so->so_state & SS_NOFDREF) &&
1572 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1573 char *s;
1574
1575 KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head "
1576 "not locked", __func__));
1577 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
1578 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket "
1579 "was closed, sending RST and removing tcpcb\n",
1580 s, __func__, tcpstates[tp->t_state], tlen);
1581 free(s, M_TCPLOG);
1582 }
1583 tp = tcp_close(tp);
1584 tcpstat.tcps_rcvafterclose++;
1585 rstreason = BANDLIM_UNLIMITED;
1586 goto dropwithreset;
1587 }
1588
1589 /*
1590 * If segment ends after window, drop trailing data
1591 * (and PUSH and FIN); if nothing left, just ACK.
1592 */
1593 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
1594 if (todrop > 0) {
1595 tcpstat.tcps_rcvpackafterwin++;
1596 if (todrop >= tlen) {
1597 tcpstat.tcps_rcvbyteafterwin += tlen;
1598 /*
1599 * If window is closed can only take segments at
1600 * window edge, and have to drop data and PUSH from
1601 * incoming segments. Continue processing, but
1602 * remember to ack. Otherwise, drop segment
1603 * and ack.
1604 */
1605 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1606 tp->t_flags |= TF_ACKNOW;
1607 tcpstat.tcps_rcvwinprobe++;
1608 } else
1609 goto dropafterack;
1610 } else
1611 tcpstat.tcps_rcvbyteafterwin += todrop;
1612 m_adj(m, -todrop);
1613 tlen -= todrop;
1614 thflags &= ~(TH_PUSH|TH_FIN);
1615 }
1616
1617 /*
1618 * If last ACK falls within this segment's sequence numbers,
1619 * record its timestamp.
1620 * NOTE:
1621 * 1) That the test incorporates suggestions from the latest
1622 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1623 * 2) That updating only on newer timestamps interferes with
1624 * our earlier PAWS tests, so this check should be solely
1625 * predicated on the sequence space of this segment.
1626 * 3) That we modify the segment boundary check to be
1627 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
1628 * instead of RFC1323's
1629 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
1630 * This modified check allows us to overcome RFC1323's
1631 * limitations as described in Stevens TCP/IP Illustrated
1632 * Vol. 2 p.869. In such cases, we can still calculate the
1633 * RTT correctly when RCV.NXT == Last.ACK.Sent.
1634 */
1635 if ((to.to_flags & TOF_TS) != 0 &&
1636 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
1637 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
1638 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
1639 tp->ts_recent_age = ticks;
1640 tp->ts_recent = to.to_tsval;
1641 }
1642
1643 /*
1644 * If a SYN is in the window, then this is an
1645 * error and we send an RST and drop the connection.
1646 */
1647 if (thflags & TH_SYN) {
1648 KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: "
1649 "head not locked", __func__));
1650 tp = tcp_drop(tp, ECONNRESET);
1651 rstreason = BANDLIM_UNLIMITED;
1652 goto drop;
1653 }
1654
1655 /*
1656 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
1657 * flag is on (half-synchronized state), then queue data for
1658 * later processing; else drop segment and return.
1659 */
1660 if ((thflags & TH_ACK) == 0) {
1661 if (tp->t_state == TCPS_SYN_RECEIVED ||
1662 (tp->t_flags & TF_NEEDSYN))
1663 goto step6;
1664 else if (tp->t_flags & TF_ACKNOW)
1665 goto dropafterack;
1666 else
1667 goto drop;
1668 }
1669
1670 /*
1671 * Ack processing.
1672 */
1673 switch (tp->t_state) {
1674
1675 /*
1676 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
1677 * ESTABLISHED state and continue processing.
1678 * The ACK was checked above.
1679 */
1680 case TCPS_SYN_RECEIVED:
1681
1682 tcpstat.tcps_connects++;
1683 soisconnected(so);
1684 /* Do window scaling? */
1685 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1686 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1687 tp->rcv_scale = tp->request_r_scale;
1688 tp->snd_wnd = tiwin;
1689 }
1690 /*
1691 * Make transitions:
1692 * SYN-RECEIVED -> ESTABLISHED
1693 * SYN-RECEIVED* -> FIN-WAIT-1
1694 */
1695 tp->t_starttime = ticks;
1696 if (tp->t_flags & TF_NEEDFIN) {
1697 tp->t_state = TCPS_FIN_WAIT_1;
1698 tp->t_flags &= ~TF_NEEDFIN;
1699 } else {
1700 tp->t_state = TCPS_ESTABLISHED;
1701 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
1702 }
1703 /*
1704 * If segment contains data or ACK, will call tcp_reass()
1705 * later; if not, do so now to pass queued data to user.
1706 */
1707 if (tlen == 0 && (thflags & TH_FIN) == 0)
1708 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
1709 (struct mbuf *)0);
1710 tp->snd_wl1 = th->th_seq - 1;
1711 /* FALLTHROUGH */
1712
1713 /*
1714 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1715 * ACKs. If the ack is in the range
1716 * tp->snd_una < th->th_ack <= tp->snd_max
1717 * then advance tp->snd_una to th->th_ack and drop
1718 * data from the retransmission queue. If this ACK reflects
1719 * more up to date window information we update our window information.
1720 */
1721 case TCPS_ESTABLISHED:
1722 case TCPS_FIN_WAIT_1:
1723 case TCPS_FIN_WAIT_2:
1724 case TCPS_CLOSE_WAIT:
1725 case TCPS_CLOSING:
1726 case TCPS_LAST_ACK:
1727 if (SEQ_GT(th->th_ack, tp->snd_max)) {
1728 tcpstat.tcps_rcvacktoomuch++;
1729 goto dropafterack;
1730 }
1731 if ((tp->t_flags & TF_SACK_PERMIT) &&
1732 ((to.to_flags & TOF_SACK) ||
1733 !TAILQ_EMPTY(&tp->snd_holes)))
1734 tcp_sack_doack(tp, &to, th->th_ack);
1735 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
1736 if (tlen == 0 && tiwin == tp->snd_wnd) {
1737 tcpstat.tcps_rcvdupack++;
1738 /*
1739 * If we have outstanding data (other than
1740 * a window probe), this is a completely
1741 * duplicate ack (ie, window info didn't
1742 * change), the ack is the biggest we've
1743 * seen and we've seen exactly our rexmt
1744 * threshhold of them, assume a packet
1745 * has been dropped and retransmit it.
1746 * Kludge snd_nxt & the congestion
1747 * window so we send only this one
1748 * packet.
1749 *
1750 * We know we're losing at the current
1751 * window size so do congestion avoidance
1752 * (set ssthresh to half the current window
1753 * and pull our congestion window back to
1754 * the new ssthresh).
1755 *
1756 * Dup acks mean that packets have left the
1757 * network (they're now cached at the receiver)
1758 * so bump cwnd by the amount in the receiver
1759 * to keep a constant cwnd packets in the
1760 * network.
1761 */
1762 if (!tcp_timer_active(tp, TT_REXMT) ||
1763 th->th_ack != tp->snd_una)
1764 tp->t_dupacks = 0;
1765 else if (++tp->t_dupacks > tcprexmtthresh ||
1766 ((tcp_do_newreno ||
1767 (tp->t_flags & TF_SACK_PERMIT)) &&
1768 IN_FASTRECOVERY(tp))) {
1769 if ((tp->t_flags & TF_SACK_PERMIT) &&
1770 IN_FASTRECOVERY(tp)) {
1771 int awnd;
1772
1773 /*
1774 * Compute the amount of data in flight first.
1775 * We can inject new data into the pipe iff
1776 * we have less than 1/2 the original window's
1777 * worth of data in flight.
1778 */
1779 awnd = (tp->snd_nxt - tp->snd_fack) +
1780 tp->sackhint.sack_bytes_rexmit;
1781 if (awnd < tp->snd_ssthresh) {
1782 tp->snd_cwnd += tp->t_maxseg;
1783 if (tp->snd_cwnd > tp->snd_ssthresh)
1784 tp->snd_cwnd = tp->snd_ssthresh;
1785 }
1786 } else
1787 tp->snd_cwnd += tp->t_maxseg;
1788 (void) tcp_output(tp);
1789 goto drop;
1790 } else if (tp->t_dupacks == tcprexmtthresh) {
1791 tcp_seq onxt = tp->snd_nxt;
1792 u_int win;
1793
1794 /*
1795 * If we're doing sack, check to
1796 * see if we're already in sack
1797 * recovery. If we're not doing sack,
1798 * check to see if we're in newreno
1799 * recovery.
1800 */
1801 if (tp->t_flags & TF_SACK_PERMIT) {
1802 if (IN_FASTRECOVERY(tp)) {
1803 tp->t_dupacks = 0;
1804 break;
1805 }
1806 } else if (tcp_do_newreno) {
1807 if (SEQ_LEQ(th->th_ack,
1808 tp->snd_recover)) {
1809 tp->t_dupacks = 0;
1810 break;
1811 }
1812 }
1813 win = min(tp->snd_wnd, tp->snd_cwnd) /
1814 2 / tp->t_maxseg;
1815 if (win < 2)
1816 win = 2;
1817 tp->snd_ssthresh = win * tp->t_maxseg;
1818 ENTER_FASTRECOVERY(tp);
1819 tp->snd_recover = tp->snd_max;
1820 tcp_timer_activate(tp, TT_REXMT, 0);
1821 tp->t_rtttime = 0;
1822 if (tp->t_flags & TF_SACK_PERMIT) {
1823 tcpstat.tcps_sack_recovery_episode++;
1824 tp->sack_newdata = tp->snd_nxt;
1825 tp->snd_cwnd = tp->t_maxseg;
1826 (void) tcp_output(tp);
1827 goto drop;
1828 }
1829 tp->snd_nxt = th->th_ack;
1830 tp->snd_cwnd = tp->t_maxseg;
1831 (void) tcp_output(tp);
1832 KASSERT(tp->snd_limited <= 2,
1833 ("%s: tp->snd_limited too big",
1834 __func__));
1835 tp->snd_cwnd = tp->snd_ssthresh +
1836 tp->t_maxseg *
1837 (tp->t_dupacks - tp->snd_limited);
1838 if (SEQ_GT(onxt, tp->snd_nxt))
1839 tp->snd_nxt = onxt;
1840 goto drop;
1841 } else if (tcp_do_rfc3042) {
1842 u_long oldcwnd = tp->snd_cwnd;
1843 tcp_seq oldsndmax = tp->snd_max;
1844 u_int sent;
1845
1846 KASSERT(tp->t_dupacks == 1 ||
1847 tp->t_dupacks == 2,
1848 ("%s: dupacks not 1 or 2",
1849 __func__));
1850 if (tp->t_dupacks == 1)
1851 tp->snd_limited = 0;
1852 tp->snd_cwnd =
1853 (tp->snd_nxt - tp->snd_una) +
1854 (tp->t_dupacks - tp->snd_limited) *
1855 tp->t_maxseg;
1856 (void) tcp_output(tp);
1857 sent = tp->snd_max - oldsndmax;
1858 if (sent > tp->t_maxseg) {
1859 KASSERT((tp->t_dupacks == 2 &&
1860 tp->snd_limited == 0) ||
1861 (sent == tp->t_maxseg + 1 &&
1862 tp->t_flags & TF_SENTFIN),
1863 ("%s: sent too much",
1864 __func__));
1865 tp->snd_limited = 2;
1866 } else if (sent > 0)
1867 ++tp->snd_limited;
1868 tp->snd_cwnd = oldcwnd;
1869 goto drop;
1870 }
1871 } else
1872 tp->t_dupacks = 0;
1873 break;
1874 }
1875
1876 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
1877 ("%s: th_ack <= snd_una", __func__));
1878
1879 /*
1880 * If the congestion window was inflated to account
1881 * for the other side's cached packets, retract it.
1882 */
1883 if (tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) {
1884 if (IN_FASTRECOVERY(tp)) {
1885 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
1886 if (tp->t_flags & TF_SACK_PERMIT)
1887 tcp_sack_partialack(tp, th);
1888 else
1889 tcp_newreno_partial_ack(tp, th);
1890 } else {
1891 /*
1892 * Out of fast recovery.
1893 * Window inflation should have left us
1894 * with approximately snd_ssthresh
1895 * outstanding data.
1896 * But in case we would be inclined to
1897 * send a burst, better to do it via
1898 * the slow start mechanism.
1899 */
1900 if (SEQ_GT(th->th_ack +
1901 tp->snd_ssthresh,
1902 tp->snd_max))
1903 tp->snd_cwnd = tp->snd_max -
1904 th->th_ack +
1905 tp->t_maxseg;
1906 else
1907 tp->snd_cwnd = tp->snd_ssthresh;
1908 }
1909 }
1910 } else {
1911 if (tp->t_dupacks >= tcprexmtthresh &&
1912 tp->snd_cwnd > tp->snd_ssthresh)
1913 tp->snd_cwnd = tp->snd_ssthresh;
1914 }
1915 tp->t_dupacks = 0;
1916 /*
1917 * If we reach this point, ACK is not a duplicate,
1918 * i.e., it ACKs something we sent.
1919 */
1920 if (tp->t_flags & TF_NEEDSYN) {
1921 /*
1922 * T/TCP: Connection was half-synchronized, and our
1923 * SYN has been ACK'd (so connection is now fully
1924 * synchronized). Go to non-starred state,
1925 * increment snd_una for ACK of SYN, and check if
1926 * we can do window scaling.
1927 */
1928 tp->t_flags &= ~TF_NEEDSYN;
1929 tp->snd_una++;
1930 /* Do window scaling? */
1931 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1932 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1933 tp->rcv_scale = tp->request_r_scale;
1934 /* Send window already scaled. */
1935 }
1936 }
1937
1938 process_ACK:
1939 KASSERT(headlocked, ("%s: process_ACK: head not locked",
1940 __func__));
1941 INP_LOCK_ASSERT(tp->t_inpcb);
1942
1943 acked = th->th_ack - tp->snd_una;
1944 tcpstat.tcps_rcvackpack++;
1945 tcpstat.tcps_rcvackbyte += acked;
1946
1947 /*
1948 * If we just performed our first retransmit, and the ACK
1949 * arrives within our recovery window, then it was a mistake
1950 * to do the retransmit in the first place. Recover our
1951 * original cwnd and ssthresh, and proceed to transmit where
1952 * we left off.
1953 */
1954 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
1955 ++tcpstat.tcps_sndrexmitbad;
1956 tp->snd_cwnd = tp->snd_cwnd_prev;
1957 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1958 tp->snd_recover = tp->snd_recover_prev;
1959 if (tp->t_flags & TF_WASFRECOVERY)
1960 ENTER_FASTRECOVERY(tp);
1961 tp->snd_nxt = tp->snd_max;
1962 tp->t_badrxtwin = 0; /* XXX probably not required */
1963 }
1964
1965 /*
1966 * If we have a timestamp reply, update smoothed
1967 * round trip time. If no timestamp is present but
1968 * transmit timer is running and timed sequence
1969 * number was acked, update smoothed round trip time.
1970 * Since we now have an rtt measurement, cancel the
1971 * timer backoff (cf., Phil Karn's retransmit alg.).
1972 * Recompute the initial retransmit timer.
1973 *
1974 * Some boxes send broken timestamp replies
1975 * during the SYN+ACK phase, ignore
1976 * timestamps of 0 or we could calculate a
1977 * huge RTT and blow up the retransmit timer.
1978 */
1979 if ((to.to_flags & TOF_TS) != 0 &&
1980 to.to_tsecr) {
1981 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr)
1982 tp->t_rttlow = ticks - to.to_tsecr;
1983 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1);
1984 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
1985 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
1986 tp->t_rttlow = ticks - tp->t_rtttime;
1987 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
1988 }
1989 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1990
1991 /*
1992 * If all outstanding data is acked, stop retransmit
1993 * timer and remember to restart (more output or persist).
1994 * If there is more data to be acked, restart retransmit
1995 * timer, using current (possibly backed-off) value.
1996 */
1997 if (th->th_ack == tp->snd_max) {
1998 tcp_timer_activate(tp, TT_REXMT, 0);
1999 needoutput = 1;
2000 } else if (!tcp_timer_active(tp, TT_PERSIST))
2001 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2002
2003 /*
2004 * If no data (only SYN) was ACK'd,
2005 * skip rest of ACK processing.
2006 */
2007 if (acked == 0)
2008 goto step6;
2009
2010 /*
2011 * When new data is acked, open the congestion window.
2012 * If the window gives us less than ssthresh packets
2013 * in flight, open exponentially (maxseg per packet).
2014 * Otherwise open linearly: maxseg per window
2015 * (maxseg^2 / cwnd per packet).
2016 */
2017 if ((!tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) ||
2018 !IN_FASTRECOVERY(tp)) {
2019 u_int cw = tp->snd_cwnd;
2020 u_int incr = tp->t_maxseg;
2021 if (cw > tp->snd_ssthresh)
2022 incr = incr * incr / cw;
2023 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale);
2024 }
2025 SOCKBUF_LOCK(&so->so_snd);
2026 if (acked > so->so_snd.sb_cc) {
2027 tp->snd_wnd -= so->so_snd.sb_cc;
2028 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2029 ourfinisacked = 1;
2030 } else {
2031 sbdrop_locked(&so->so_snd, acked);
2032 tp->snd_wnd -= acked;
2033 ourfinisacked = 0;
2034 }
2035 /* NB: sowwakeup_locked() does an implicit unlock. */
2036 sowwakeup_locked(so);
2037 /* Detect una wraparound. */
2038 if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
2039 !IN_FASTRECOVERY(tp) &&
2040 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2041 SEQ_LEQ(th->th_ack, tp->snd_recover))
2042 tp->snd_recover = th->th_ack - 1;
2043 if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
2044 IN_FASTRECOVERY(tp) &&
2045 SEQ_GEQ(th->th_ack, tp->snd_recover))
2046 EXIT_FASTRECOVERY(tp);
2047 tp->snd_una = th->th_ack;
2048 if (tp->t_flags & TF_SACK_PERMIT) {
2049 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2050 tp->snd_recover = tp->snd_una;
2051 }
2052 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2053 tp->snd_nxt = tp->snd_una;
2054
2055 switch (tp->t_state) {
2056
2057 /*
2058 * In FIN_WAIT_1 STATE in addition to the processing
2059 * for the ESTABLISHED state if our FIN is now acknowledged
2060 * then enter FIN_WAIT_2.
2061 */
2062 case TCPS_FIN_WAIT_1:
2063 if (ourfinisacked) {
2064 /*
2065 * If we can't receive any more
2066 * data, then closing user can proceed.
2067 * Starting the timer is contrary to the
2068 * specification, but if we don't get a FIN
2069 * we'll hang forever.
2070 *
2071 * XXXjl:
2072 * we should release the tp also, and use a
2073 * compressed state.
2074 */
2075 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2076 int timeout;
2077
2078 soisdisconnected(so);
2079 timeout = (tcp_fast_finwait2_recycle) ?
2080 tcp_finwait2_timeout : tcp_maxidle;
2081 tcp_timer_activate(tp, TT_2MSL, timeout);
2082 }
2083 tp->t_state = TCPS_FIN_WAIT_2;
2084 }
2085 break;
2086
2087 /*
2088 * In CLOSING STATE in addition to the processing for
2089 * the ESTABLISHED state if the ACK acknowledges our FIN
2090 * then enter the TIME-WAIT state, otherwise ignore
2091 * the segment.
2092 */
2093 case TCPS_CLOSING:
2094 if (ourfinisacked) {
2095 KASSERT(headlocked, ("%s: process_ACK: "
2096 "head not locked", __func__));
2097 tcp_twstart(tp);
2098 INP_INFO_WUNLOCK(&tcbinfo);
2099 headlocked = 0;
2100 m_freem(m);
2101 return;
2102 }
2103 break;
2104
2105 /*
2106 * In LAST_ACK, we may still be waiting for data to drain
2107 * and/or to be acked, as well as for the ack of our FIN.
2108 * If our FIN is now acknowledged, delete the TCB,
2109 * enter the closed state and return.
2110 */
2111 case TCPS_LAST_ACK:
2112 if (ourfinisacked) {
2113 KASSERT(headlocked, ("%s: process_ACK: "
2114 "tcp_close: head not locked", __func__));
2115 tp = tcp_close(tp);
2116 goto drop;
2117 }
2118 break;
2119 }
2120 }
2121
2122 step6:
2123 KASSERT(headlocked, ("%s: step6: head not locked", __func__));
2124 INP_LOCK_ASSERT(tp->t_inpcb);
2125
2126 /*
2127 * Update window information.
2128 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2129 */
2130 if ((thflags & TH_ACK) &&
2131 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2132 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2133 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2134 /* keep track of pure window updates */
2135 if (tlen == 0 &&
2136 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2137 tcpstat.tcps_rcvwinupd++;
2138 tp->snd_wnd = tiwin;
2139 tp->snd_wl1 = th->th_seq;
2140 tp->snd_wl2 = th->th_ack;
2141 if (tp->snd_wnd > tp->max_sndwnd)
2142 tp->max_sndwnd = tp->snd_wnd;
2143 needoutput = 1;
2144 }
2145
2146 /*
2147 * Process segments with URG.
2148 */
2149 if ((thflags & TH_URG) && th->th_urp &&
2150 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2151 /*
2152 * This is a kludge, but if we receive and accept
2153 * random urgent pointers, we'll crash in
2154 * soreceive. It's hard to imagine someone
2155 * actually wanting to send this much urgent data.
2156 */
2157 SOCKBUF_LOCK(&so->so_rcv);
2158 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2159 th->th_urp = 0; /* XXX */
2160 thflags &= ~TH_URG; /* XXX */
2161 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2162 goto dodata; /* XXX */
2163 }
2164 /*
2165 * If this segment advances the known urgent pointer,
2166 * then mark the data stream. This should not happen
2167 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2168 * a FIN has been received from the remote side.
2169 * In these states we ignore the URG.
2170 *
2171 * According to RFC961 (Assigned Protocols),
2172 * the urgent pointer points to the last octet
2173 * of urgent data. We continue, however,
2174 * to consider it to indicate the first octet
2175 * of data past the urgent section as the original
2176 * spec states (in one of two places).
2177 */
2178 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2179 tp->rcv_up = th->th_seq + th->th_urp;
2180 so->so_oobmark = so->so_rcv.sb_cc +
2181 (tp->rcv_up - tp->rcv_nxt) - 1;
2182 if (so->so_oobmark == 0)
2183 so->so_rcv.sb_state |= SBS_RCVATMARK;
2184 sohasoutofband(so);
2185 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2186 }
2187 SOCKBUF_UNLOCK(&so->so_rcv);
2188 /*
2189 * Remove out of band data so doesn't get presented to user.
2190 * This can happen independent of advancing the URG pointer,
2191 * but if two URG's are pending at once, some out-of-band
2192 * data may creep in... ick.
2193 */
2194 if (th->th_urp <= (u_long)tlen &&
2195 !(so->so_options & SO_OOBINLINE)) {
2196 /* hdr drop is delayed */
2197 tcp_pulloutofband(so, th, m, drop_hdrlen);
2198 }
2199 } else {
2200 /*
2201 * If no out of band data is expected,
2202 * pull receive urgent pointer along
2203 * with the receive window.
2204 */
2205 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2206 tp->rcv_up = tp->rcv_nxt;
2207 }
2208 dodata: /* XXX */
2209 KASSERT(headlocked, ("%s: dodata: head not locked", __func__));
2210 INP_LOCK_ASSERT(tp->t_inpcb);
2211
2212 /*
2213 * Process the segment text, merging it into the TCP sequencing queue,
2214 * and arranging for acknowledgment of receipt if necessary.
2215 * This process logically involves adjusting tp->rcv_wnd as data
2216 * is presented to the user (this happens in tcp_usrreq.c,
2217 * case PRU_RCVD). If a FIN has already been received on this
2218 * connection then we just ignore the text.
2219 */
2220 if ((tlen || (thflags & TH_FIN)) &&
2221 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2222 tcp_seq save_start = th->th_seq;
2223 m_adj(m, drop_hdrlen); /* delayed header drop */
2224 /*
2225 * Insert segment which includes th into TCP reassembly queue
2226 * with control block tp. Set thflags to whether reassembly now
2227 * includes a segment with FIN. This handles the common case
2228 * inline (segment is the next to be received on an established
2229 * connection, and the queue is empty), avoiding linkage into
2230 * and removal from the queue and repetition of various
2231 * conversions.
2232 * Set DELACK for segments received in order, but ack
2233 * immediately when segments are out of order (so
2234 * fast retransmit can work).
2235 */
2236 if (th->th_seq == tp->rcv_nxt &&
2237 LIST_EMPTY(&tp->t_segq) &&
2238 TCPS_HAVEESTABLISHED(tp->t_state)) {
2239 if (DELAY_ACK(tp))
2240 tp->t_flags |= TF_DELACK;
2241 else
2242 tp->t_flags |= TF_ACKNOW;
2243 tp->rcv_nxt += tlen;
2244 thflags = th->th_flags & TH_FIN;
2245 tcpstat.tcps_rcvpack++;
2246 tcpstat.tcps_rcvbyte += tlen;
2247 ND6_HINT(tp);
2248 SOCKBUF_LOCK(&so->so_rcv);
2249 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2250 m_freem(m);
2251 else
2252 sbappendstream_locked(&so->so_rcv, m);
2253 /* NB: sorwakeup_locked() does an implicit unlock. */
2254 sorwakeup_locked(so);
2255 } else {
2256 /*
2257 * XXX: Due to the header drop above "th" is
2258 * theoretically invalid by now. Fortunately
2259 * m_adj() doesn't actually frees any mbufs
2260 * when trimming from the head.
2261 */
2262 thflags = tcp_reass(tp, th, &tlen, m);
2263 tp->t_flags |= TF_ACKNOW;
2264 }
2265 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2266 tcp_update_sack_list(tp, save_start, save_start + tlen);
2267 #if 0
2268 /*
2269 * Note the amount of data that peer has sent into
2270 * our window, in order to estimate the sender's
2271 * buffer size.
2272 * XXX: Unused.
2273 */
2274 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2275 #endif
2276 } else {
2277 m_freem(m);
2278 thflags &= ~TH_FIN;
2279 }
2280
2281 /*
2282 * If FIN is received ACK the FIN and let the user know
2283 * that the connection is closing.
2284 */
2285 if (thflags & TH_FIN) {
2286 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2287 socantrcvmore(so);
2288 /*
2289 * If connection is half-synchronized
2290 * (ie NEEDSYN flag on) then delay ACK,
2291 * so it may be piggybacked when SYN is sent.
2292 * Otherwise, since we received a FIN then no
2293 * more input can be expected, send ACK now.
2294 */
2295 if (tp->t_flags & TF_NEEDSYN)
2296 tp->t_flags |= TF_DELACK;
2297 else
2298 tp->t_flags |= TF_ACKNOW;
2299 tp->rcv_nxt++;
2300 }
2301 switch (tp->t_state) {
2302
2303 /*
2304 * In SYN_RECEIVED and ESTABLISHED STATES
2305 * enter the CLOSE_WAIT state.
2306 */
2307 case TCPS_SYN_RECEIVED:
2308 tp->t_starttime = ticks;
2309 /* FALLTHROUGH */
2310 case TCPS_ESTABLISHED:
2311 tp->t_state = TCPS_CLOSE_WAIT;
2312 break;
2313
2314 /*
2315 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2316 * enter the CLOSING state.
2317 */
2318 case TCPS_FIN_WAIT_1:
2319 tp->t_state = TCPS_CLOSING;
2320 break;
2321
2322 /*
2323 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2324 * starting the time-wait timer, turning off the other
2325 * standard timers.
2326 */
2327 case TCPS_FIN_WAIT_2:
2328 KASSERT(headlocked == 1, ("%s: dodata: "
2329 "TCP_FIN_WAIT_2: head not locked", __func__));
2330 tcp_twstart(tp);
2331 INP_INFO_WUNLOCK(&tcbinfo);
2332 return;
2333 }
2334 }
2335 INP_INFO_WUNLOCK(&tcbinfo);
2336 headlocked = 0;
2337 #ifdef TCPDEBUG
2338 if (so->so_options & SO_DEBUG)
2339 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
2340 &tcp_savetcp, 0);
2341 #endif
2342
2343 /*
2344 * Return any desired output.
2345 */
2346 if (needoutput || (tp->t_flags & TF_ACKNOW))
2347 (void) tcp_output(tp);
2348
2349 check_delack:
2350 KASSERT(headlocked == 0, ("%s: check_delack: head locked",
2351 __func__));
2352 INP_INFO_UNLOCK_ASSERT(&tcbinfo);
2353 INP_LOCK_ASSERT(tp->t_inpcb);
2354 if (tp->t_flags & TF_DELACK) {
2355 tp->t_flags &= ~TF_DELACK;
2356 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
2357 }
2358 INP_UNLOCK(tp->t_inpcb);
2359 return;
2360
2361 dropafterack:
2362 KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__));
2363 /*
2364 * Generate an ACK dropping incoming segment if it occupies
2365 * sequence space, where the ACK reflects our state.
2366 *
2367 * We can now skip the test for the RST flag since all
2368 * paths to this code happen after packets containing
2369 * RST have been dropped.
2370 *
2371 * In the SYN-RECEIVED state, don't send an ACK unless the
2372 * segment we received passes the SYN-RECEIVED ACK test.
2373 * If it fails send a RST. This breaks the loop in the
2374 * "LAND" DoS attack, and also prevents an ACK storm
2375 * between two listening ports that have been sent forged
2376 * SYN segments, each with the source address of the other.
2377 */
2378 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2379 (SEQ_GT(tp->snd_una, th->th_ack) ||
2380 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2381 rstreason = BANDLIM_RST_OPENPORT;
2382 goto dropwithreset;
2383 }
2384 #ifdef TCPDEBUG
2385 if (so->so_options & SO_DEBUG)
2386 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2387 &tcp_savetcp, 0);
2388 #endif
2389 KASSERT(headlocked, ("%s: headlocked should be 1", __func__));
2390 INP_INFO_WUNLOCK(&tcbinfo);
2391 tp->t_flags |= TF_ACKNOW;
2392 (void) tcp_output(tp);
2393 INP_UNLOCK(tp->t_inpcb);
2394 m_freem(m);
2395 return;
2396
2397 dropwithreset:
2398 KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__));
2399
2400 tcp_dropwithreset(m, th, tp, tlen, rstreason);
2401
2402 if (tp != NULL)
2403 INP_UNLOCK(tp->t_inpcb);
2404 if (headlocked)
2405 INP_INFO_WUNLOCK(&tcbinfo);
2406 return;
2407
2408 drop:
2409 /*
2410 * Drop space held by incoming segment and return.
2411 */
2412 #ifdef TCPDEBUG
2413 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2414 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2415 &tcp_savetcp, 0);
2416 #endif
2417 if (tp != NULL)
2418 INP_UNLOCK(tp->t_inpcb);
2419 if (headlocked)
2420 INP_INFO_WUNLOCK(&tcbinfo);
2421 m_freem(m);
2422 return;
2423 }
2424
2425 /*
2426 * Issue RST and make ACK acceptable to originator of segment.
2427 * The mbuf must still include the original packet header.
2428 * tp may be NULL.
2429 */
2430 static void
2431 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
2432 int tlen, int rstreason)
2433 {
2434 struct ip *ip;
2435 #ifdef INET6
2436 struct ip6_hdr *ip6;
2437 #endif
2438 /* Don't bother if destination was broadcast/multicast. */
2439 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
2440 goto drop;
2441 #ifdef INET6
2442 if (mtod(m, struct ip *)->ip_v == 6) {
2443 ip6 = mtod(m, struct ip6_hdr *);
2444 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2445 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2446 goto drop;
2447 /* IPv6 anycast check is done at tcp6_input() */
2448 } else
2449 #endif
2450 {
2451 ip = mtod(m, struct ip *);
2452 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2453 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2454 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2455 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2456 goto drop;
2457 }
2458
2459 /* Perform bandwidth limiting. */
2460 if (badport_bandlim(rstreason) < 0)
2461 goto drop;
2462
2463 /* tcp_respond consumes the mbuf chain. */
2464 if (th->th_flags & TH_ACK) {
2465 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
2466 th->th_ack, TH_RST);
2467 } else {
2468 if (th->th_flags & TH_SYN)
2469 tlen++;
2470 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
2471 (tcp_seq)0, TH_RST|TH_ACK);
2472 }
2473 return;
2474 drop:
2475 m_freem(m);
2476 return;
2477 }
2478
2479 /*
2480 * Parse TCP options and place in tcpopt.
2481 */
2482 static void
2483 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
2484 {
2485 int opt, optlen;
2486
2487 to->to_flags = 0;
2488 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2489 opt = cp[0];
2490 if (opt == TCPOPT_EOL)
2491 break;
2492 if (opt == TCPOPT_NOP)
2493 optlen = 1;
2494 else {
2495 if (cnt < 2)
2496 break;
2497 optlen = cp[1];
2498 if (optlen < 2 || optlen > cnt)
2499 break;
2500 }
2501 switch (opt) {
2502 case TCPOPT_MAXSEG:
2503 if (optlen != TCPOLEN_MAXSEG)
2504 continue;
2505 if (!(flags & TO_SYN))
2506 continue;
2507 to->to_flags |= TOF_MSS;
2508 bcopy((char *)cp + 2,
2509 (char *)&to->to_mss, sizeof(to->to_mss));
2510 to->to_mss = ntohs(to->to_mss);
2511 break;
2512 case TCPOPT_WINDOW:
2513 if (optlen != TCPOLEN_WINDOW)
2514 continue;
2515 if (!(flags & TO_SYN))
2516 continue;
2517 to->to_flags |= TOF_SCALE;
2518 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
2519 break;
2520 case TCPOPT_TIMESTAMP:
2521 if (optlen != TCPOLEN_TIMESTAMP)
2522 continue;
2523 to->to_flags |= TOF_TS;
2524 bcopy((char *)cp + 2,
2525 (char *)&to->to_tsval, sizeof(to->to_tsval));
2526 to->to_tsval = ntohl(to->to_tsval);
2527 bcopy((char *)cp + 6,
2528 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
2529 to->to_tsecr = ntohl(to->to_tsecr);
2530 break;
2531 #ifdef TCP_SIGNATURE
2532 /*
2533 * XXX In order to reply to a host which has set the
2534 * TCP_SIGNATURE option in its initial SYN, we have to
2535 * record the fact that the option was observed here
2536 * for the syncache code to perform the correct response.
2537 */
2538 case TCPOPT_SIGNATURE:
2539 if (optlen != TCPOLEN_SIGNATURE)
2540 continue;
2541 to->to_flags |= TOF_SIGNATURE;
2542 to->to_signature = cp + 2;
2543 break;
2544 #endif
2545 case TCPOPT_SACK_PERMITTED:
2546 if (optlen != TCPOLEN_SACK_PERMITTED)
2547 continue;
2548 if (!(flags & TO_SYN))
2549 continue;
2550 if (!tcp_do_sack)
2551 continue;
2552 to->to_flags |= TOF_SACKPERM;
2553 break;
2554 case TCPOPT_SACK:
2555 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
2556 continue;
2557 if (flags & TO_SYN)
2558 continue;
2559 to->to_flags |= TOF_SACK;
2560 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
2561 to->to_sacks = cp + 2;
2562 tcpstat.tcps_sack_rcv_blocks++;
2563 break;
2564 default:
2565 continue;
2566 }
2567 }
2568 }
2569
2570 /*
2571 * Pull out of band byte out of a segment so
2572 * it doesn't appear in the user's data queue.
2573 * It is still reflected in the segment length for
2574 * sequencing purposes.
2575 */
2576 static void
2577 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
2578 int off)
2579 {
2580 int cnt = off + th->th_urp - 1;
2581
2582 while (cnt >= 0) {
2583 if (m->m_len > cnt) {
2584 char *cp = mtod(m, caddr_t) + cnt;
2585 struct tcpcb *tp = sototcpcb(so);
2586
2587 tp->t_iobc = *cp;
2588 tp->t_oobflags |= TCPOOB_HAVEDATA;
2589 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
2590 m->m_len--;
2591 if (m->m_flags & M_PKTHDR)
2592 m->m_pkthdr.len--;
2593 return;
2594 }
2595 cnt -= m->m_len;
2596 m = m->m_next;
2597 if (m == NULL)
2598 break;
2599 }
2600 panic("tcp_pulloutofband");
2601 }
2602
2603 /*
2604 * Collect new round-trip time estimate
2605 * and update averages and current timeout.
2606 */
2607 static void
2608 tcp_xmit_timer(struct tcpcb *tp, int rtt)
2609 {
2610 int delta;
2611
2612 INP_LOCK_ASSERT(tp->t_inpcb);
2613
2614 tcpstat.tcps_rttupdated++;
2615 tp->t_rttupdated++;
2616 if (tp->t_srtt != 0) {
2617 /*
2618 * srtt is stored as fixed point with 5 bits after the
2619 * binary point (i.e., scaled by 8). The following magic
2620 * is equivalent to the smoothing algorithm in rfc793 with
2621 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2622 * point). Adjust rtt to origin 0.
2623 */
2624 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
2625 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
2626
2627 if ((tp->t_srtt += delta) <= 0)
2628 tp->t_srtt = 1;
2629
2630 /*
2631 * We accumulate a smoothed rtt variance (actually, a
2632 * smoothed mean difference), then set the retransmit
2633 * timer to smoothed rtt + 4 times the smoothed variance.
2634 * rttvar is stored as fixed point with 4 bits after the
2635 * binary point (scaled by 16). The following is
2636 * equivalent to rfc793 smoothing with an alpha of .75
2637 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
2638 * rfc793's wired-in beta.
2639 */
2640 if (delta < 0)
2641 delta = -delta;
2642 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
2643 if ((tp->t_rttvar += delta) <= 0)
2644 tp->t_rttvar = 1;
2645 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2646 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2647 } else {
2648 /*
2649 * No rtt measurement yet - use the unsmoothed rtt.
2650 * Set the variance to half the rtt (so our first
2651 * retransmit happens at 3*rtt).
2652 */
2653 tp->t_srtt = rtt << TCP_RTT_SHIFT;
2654 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
2655 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2656 }
2657 tp->t_rtttime = 0;
2658 tp->t_rxtshift = 0;
2659
2660 /*
2661 * the retransmit should happen at rtt + 4 * rttvar.
2662 * Because of the way we do the smoothing, srtt and rttvar
2663 * will each average +1/2 tick of bias. When we compute
2664 * the retransmit timer, we want 1/2 tick of rounding and
2665 * 1 extra tick because of +-1/2 tick uncertainty in the
2666 * firing of the timer. The bias will give us exactly the
2667 * 1.5 tick we need. But, because the bias is
2668 * statistical, we have to test that we don't drop below
2669 * the minimum feasible timer (which is 2 ticks).
2670 */
2671 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2672 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
2673
2674 /*
2675 * We received an ack for a packet that wasn't retransmitted;
2676 * it is probably safe to discard any error indications we've
2677 * received recently. This isn't quite right, but close enough
2678 * for now (a route might have failed after we sent a segment,
2679 * and the return path might not be symmetrical).
2680 */
2681 tp->t_softerror = 0;
2682 }
2683
2684 /*
2685 * Determine a reasonable value for maxseg size.
2686 * If the route is known, check route for mtu.
2687 * If none, use an mss that can be handled on the outgoing
2688 * interface without forcing IP to fragment; if bigger than
2689 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2690 * to utilize large mbufs. If no route is found, route has no mtu,
2691 * or the destination isn't local, use a default, hopefully conservative
2692 * size (usually 512 or the default IP max size, but no more than the mtu
2693 * of the interface), as we can't discover anything about intervening
2694 * gateways or networks. We also initialize the congestion/slow start
2695 * window to be a single segment if the destination isn't local.
2696 * While looking at the routing entry, we also initialize other path-dependent
2697 * parameters from pre-set or cached values in the routing entry.
2698 *
2699 * Also take into account the space needed for options that we
2700 * send regularly. Make maxseg shorter by that amount to assure
2701 * that we can send maxseg amount of data even when the options
2702 * are present. Store the upper limit of the length of options plus
2703 * data in maxopd.
2704 *
2705 * In case of T/TCP, we call this routine during implicit connection
2706 * setup as well (offer = -1), to initialize maxseg from the cached
2707 * MSS of our peer.
2708 *
2709 * NOTE that this routine is only called when we process an incoming
2710 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt().
2711 */
2712 void
2713 tcp_mss(struct tcpcb *tp, int offer)
2714 {
2715 int rtt, mss;
2716 u_long bufsize;
2717 u_long maxmtu;
2718 struct inpcb *inp = tp->t_inpcb;
2719 struct socket *so;
2720 struct hc_metrics_lite metrics;
2721 int origoffer = offer;
2722 int mtuflags = 0;
2723 #ifdef INET6
2724 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
2725 size_t min_protoh = isipv6 ?
2726 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
2727 sizeof (struct tcpiphdr);
2728 #else
2729 const size_t min_protoh = sizeof(struct tcpiphdr);
2730 #endif
2731
2732 /* Initialize. */
2733 #ifdef INET6
2734 if (isipv6) {
2735 maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags);
2736 tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt;
2737 } else
2738 #endif
2739 {
2740 maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags);
2741 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
2742 }
2743 so = inp->inp_socket;
2744
2745 /*
2746 * No route to sender, stay with default mss and return.
2747 */
2748 if (maxmtu == 0)
2749 return;
2750
2751 /* What have we got? */
2752 switch (offer) {
2753 case 0:
2754 /*
2755 * Offer == 0 means that there was no MSS on the SYN
2756 * segment, in this case we use tcp_mssdflt.
2757 */
2758 offer =
2759 #ifdef INET6
2760 isipv6 ? tcp_v6mssdflt :
2761 #endif
2762 tcp_mssdflt;
2763 break;
2764
2765 case -1:
2766 /*
2767 * Offer == -1 means that we didn't receive SYN yet.
2768 */
2769 /* FALLTHROUGH */
2770
2771 default:
2772 /*
2773 * Prevent DoS attack with too small MSS. Round up
2774 * to at least minmss.
2775 */
2776 offer = max(offer, tcp_minmss);
2777 /*
2778 * Sanity check: make sure that maxopd will be large
2779 * enough to allow some data on segments even if the
2780 * all the option space is used (40bytes). Otherwise
2781 * funny things may happen in tcp_output.
2782 */
2783 offer = max(offer, 64);
2784 }
2785
2786 /*
2787 * rmx information is now retrieved from tcp_hostcache.
2788 */
2789 tcp_hc_get(&inp->inp_inc, &metrics);
2790
2791 /*
2792 * If there's a discovered mtu int tcp hostcache, use it
2793 * else, use the link mtu.
2794 */
2795 if (metrics.rmx_mtu)
2796 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
2797 else {
2798 #ifdef INET6
2799 if (isipv6) {
2800 mss = maxmtu - min_protoh;
2801 if (!path_mtu_discovery &&
2802 !in6_localaddr(&inp->in6p_faddr))
2803 mss = min(mss, tcp_v6mssdflt);
2804 } else
2805 #endif
2806 {
2807 mss = maxmtu - min_protoh;
2808 if (!path_mtu_discovery &&
2809 !in_localaddr(inp->inp_faddr))
2810 mss = min(mss, tcp_mssdflt);
2811 }
2812 }
2813 mss = min(mss, offer);
2814
2815 /*
2816 * maxopd stores the maximum length of data AND options
2817 * in a segment; maxseg is the amount of data in a normal
2818 * segment. We need to store this value (maxopd) apart
2819 * from maxseg, because now every segment carries options
2820 * and thus we normally have somewhat less data in segments.
2821 */
2822 tp->t_maxopd = mss;
2823
2824 /*
2825 * origoffer==-1 indicates that no segments were received yet.
2826 * In this case we just guess.
2827 */
2828 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2829 (origoffer == -1 ||
2830 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
2831 mss -= TCPOLEN_TSTAMP_APPA;
2832 tp->t_maxseg = mss;
2833
2834 #if (MCLBYTES & (MCLBYTES - 1)) == 0
2835 if (mss > MCLBYTES)
2836 mss &= ~(MCLBYTES-1);
2837 #else
2838 if (mss > MCLBYTES)
2839 mss = mss / MCLBYTES * MCLBYTES;
2840 #endif
2841 tp->t_maxseg = mss;
2842
2843 /*
2844 * If there's a pipesize, change the socket buffer to that size,
2845 * don't change if sb_hiwat is different than default (then it
2846 * has been changed on purpose with setsockopt).
2847 * Make the socket buffers an integral number of mss units;
2848 * if the mss is larger than the socket buffer, decrease the mss.
2849 */
2850 SOCKBUF_LOCK(&so->so_snd);
2851 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe)
2852 bufsize = metrics.rmx_sendpipe;
2853 else
2854 bufsize = so->so_snd.sb_hiwat;
2855 if (bufsize < mss)
2856 mss = bufsize;
2857 else {
2858 bufsize = roundup(bufsize, mss);
2859 if (bufsize > sb_max)
2860 bufsize = sb_max;
2861 if (bufsize > so->so_snd.sb_hiwat)
2862 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
2863 }
2864 SOCKBUF_UNLOCK(&so->so_snd);
2865 tp->t_maxseg = mss;
2866
2867 SOCKBUF_LOCK(&so->so_rcv);
2868 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe)
2869 bufsize = metrics.rmx_recvpipe;
2870 else
2871 bufsize = so->so_rcv.sb_hiwat;
2872 if (bufsize > mss) {
2873 bufsize = roundup(bufsize, mss);
2874 if (bufsize > sb_max)
2875 bufsize = sb_max;
2876 if (bufsize > so->so_rcv.sb_hiwat)
2877 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
2878 }
2879 SOCKBUF_UNLOCK(&so->so_rcv);
2880 /*
2881 * While we're here, check the others too.
2882 */
2883 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
2884 tp->t_srtt = rtt;
2885 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
2886 tcpstat.tcps_usedrtt++;
2887 if (metrics.rmx_rttvar) {
2888 tp->t_rttvar = metrics.rmx_rttvar;
2889 tcpstat.tcps_usedrttvar++;
2890 } else {
2891 /* default variation is +- 1 rtt */
2892 tp->t_rttvar =
2893 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
2894 }
2895 TCPT_RANGESET(tp->t_rxtcur,
2896 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
2897 tp->t_rttmin, TCPTV_REXMTMAX);
2898 }
2899 if (metrics.rmx_ssthresh) {
2900 /*
2901 * There's some sort of gateway or interface
2902 * buffer limit on the path. Use this to set
2903 * the slow start threshhold, but set the
2904 * threshold to no less than 2*mss.
2905 */
2906 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh);
2907 tcpstat.tcps_usedssthresh++;
2908 }
2909 if (metrics.rmx_bandwidth)
2910 tp->snd_bandwidth = metrics.rmx_bandwidth;
2911
2912 /*
2913 * Set the slow-start flight size depending on whether this
2914 * is a local network or not.
2915 *
2916 * Extend this so we cache the cwnd too and retrieve it here.
2917 * Make cwnd even bigger than RFC3390 suggests but only if we
2918 * have previous experience with the remote host. Be careful
2919 * not make cwnd bigger than remote receive window or our own
2920 * send socket buffer. Maybe put some additional upper bound
2921 * on the retrieved cwnd. Should do incremental updates to
2922 * hostcache when cwnd collapses so next connection doesn't
2923 * overloads the path again.
2924 *
2925 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost.
2926 * We currently check only in syncache_socket for that.
2927 */
2928 #define TCP_METRICS_CWND
2929 #ifdef TCP_METRICS_CWND
2930 if (metrics.rmx_cwnd)
2931 tp->snd_cwnd = max(mss,
2932 min(metrics.rmx_cwnd / 2,
2933 min(tp->snd_wnd, so->so_snd.sb_hiwat)));
2934 else
2935 #endif
2936 if (tcp_do_rfc3390)
2937 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380));
2938 #ifdef INET6
2939 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) ||
2940 (!isipv6 && in_localaddr(inp->inp_faddr)))
2941 #else
2942 else if (in_localaddr(inp->inp_faddr))
2943 #endif
2944 tp->snd_cwnd = mss * ss_fltsz_local;
2945 else
2946 tp->snd_cwnd = mss * ss_fltsz;
2947
2948 /* Check the interface for TSO capabilities. */
2949 if (mtuflags & CSUM_TSO)
2950 tp->t_flags |= TF_TSO;
2951 }
2952
2953 /*
2954 * Determine the MSS option to send on an outgoing SYN.
2955 */
2956 int
2957 tcp_mssopt(struct in_conninfo *inc)
2958 {
2959 int mss = 0;
2960 u_long maxmtu = 0;
2961 u_long thcmtu = 0;
2962 size_t min_protoh;
2963 #ifdef INET6
2964 int isipv6 = inc->inc_isipv6 ? 1 : 0;
2965 #endif
2966
2967 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
2968
2969 #ifdef INET6
2970 if (isipv6) {
2971 mss = tcp_v6mssdflt;
2972 maxmtu = tcp_maxmtu6(inc, NULL);
2973 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
2974 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
2975 } else
2976 #endif
2977 {
2978 mss = tcp_mssdflt;
2979 maxmtu = tcp_maxmtu(inc, NULL);
2980 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
2981 min_protoh = sizeof(struct tcpiphdr);
2982 }
2983 if (maxmtu && thcmtu)
2984 mss = min(maxmtu, thcmtu) - min_protoh;
2985 else if (maxmtu || thcmtu)
2986 mss = max(maxmtu, thcmtu) - min_protoh;
2987
2988 return (mss);
2989 }
2990
2991
2992 /*
2993 * On a partial ack arrives, force the retransmission of the
2994 * next unacknowledged segment. Do not clear tp->t_dupacks.
2995 * By setting snd_nxt to ti_ack, this forces retransmission timer to
2996 * be started again.
2997 */
2998 static void
2999 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3000 {
3001 tcp_seq onxt = tp->snd_nxt;
3002 u_long ocwnd = tp->snd_cwnd;
3003
3004 tcp_timer_activate(tp, TT_REXMT, 0);
3005 tp->t_rtttime = 0;
3006 tp->snd_nxt = th->th_ack;
3007 /*
3008 * Set snd_cwnd to one segment beyond acknowledged offset.
3009 * (tp->snd_una has not yet been updated when this function is called.)
3010 */
3011 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una);
3012 tp->t_flags |= TF_ACKNOW;
3013 (void) tcp_output(tp);
3014 tp->snd_cwnd = ocwnd;
3015 if (SEQ_GT(onxt, tp->snd_nxt))
3016 tp->snd_nxt = onxt;
3017 /*
3018 * Partial window deflation. Relies on fact that tp->snd_una
3019 * not updated yet.
3020 */
3021 if (tp->snd_cwnd > th->th_ack - tp->snd_una)
3022 tp->snd_cwnd -= th->th_ack - tp->snd_una;
3023 else
3024 tp->snd_cwnd = 0;
3025 tp->snd_cwnd += tp->t_maxseg;
3026 }
Cache object: c7f56535c9371b8a5d50b26cff5c1cd1
|