1 /*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
34 * $FreeBSD: releng/5.2/sys/netinet/tcp_subr.c 126496 2004-03-02 17:24:46Z nectar $
35 */
36
37 #include "opt_compat.h"
38 #include "opt_inet6.h"
39 #include "opt_ipsec.h"
40 #include "opt_mac.h"
41 #include "opt_tcpdebug.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/callout.h>
46 #include <sys/kernel.h>
47 #include <sys/sysctl.h>
48 #include <sys/mac.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #ifdef INET6
52 #include <sys/domain.h>
53 #endif
54 #include <sys/proc.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/protosw.h>
58 #include <sys/random.h>
59
60 #include <vm/uma.h>
61
62 #include <net/route.h>
63 #include <net/if.h>
64
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/ip.h>
68 #ifdef INET6
69 #include <netinet/ip6.h>
70 #endif
71 #include <netinet/in_pcb.h>
72 #ifdef INET6
73 #include <netinet6/in6_pcb.h>
74 #endif
75 #include <netinet/in_var.h>
76 #include <netinet/ip_var.h>
77 #ifdef INET6
78 #include <netinet6/ip6_var.h>
79 #include <netinet6/nd6.h>
80 #endif
81 #include <netinet/tcp.h>
82 #include <netinet/tcp_fsm.h>
83 #include <netinet/tcp_seq.h>
84 #include <netinet/tcp_timer.h>
85 #include <netinet/tcp_var.h>
86 #ifdef INET6
87 #include <netinet6/tcp6_var.h>
88 #endif
89 #include <netinet/tcpip.h>
90 #ifdef TCPDEBUG
91 #include <netinet/tcp_debug.h>
92 #endif
93 #include <netinet6/ip6protosw.h>
94
95 #ifdef IPSEC
96 #include <netinet6/ipsec.h>
97 #ifdef INET6
98 #include <netinet6/ipsec6.h>
99 #endif
100 #endif /*IPSEC*/
101
102 #ifdef FAST_IPSEC
103 #include <netipsec/ipsec.h>
104 #ifdef INET6
105 #include <netipsec/ipsec6.h>
106 #endif
107 #define IPSEC
108 #endif /*FAST_IPSEC*/
109
110 #include <machine/in_cksum.h>
111 #include <sys/md5.h>
112
113 int tcp_mssdflt = TCP_MSS;
114 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
115 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
116
117 #ifdef INET6
118 int tcp_v6mssdflt = TCP6_MSS;
119 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
120 CTLFLAG_RW, &tcp_v6mssdflt , 0,
121 "Default TCP Maximum Segment Size for IPv6");
122 #endif
123
124 /*
125 * Minimum MSS we accept and use. This prevents DoS attacks where
126 * we are forced to a ridiculous low MSS like 20 and send hundreds
127 * of packets instead of one. The effect scales with the available
128 * bandwidth and quickly saturates the CPU and network interface
129 * with packet generation and sending. Set to zero to disable MINMSS
130 * checking. This setting prevents us from sending too small packets.
131 */
132 int tcp_minmss = TCP_MINMSS;
133 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
134 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
135 /*
136 * Number of TCP segments per second we accept from remote host
137 * before we start to calculate average segment size. If average
138 * segment size drops below the minimum TCP MSS we assume a DoS
139 * attack and reset+drop the connection. Care has to be taken not to
140 * set this value too small to not kill interactive type connections
141 * (telnet, SSH) which send many small packets.
142 */
143 int tcp_minmssoverload = TCP_MINMSSOVERLOAD;
144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
145 &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to"
146 "be under the MINMSS Size");
147
148 #if 0
149 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
150 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
151 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time");
152 #endif
153
154 int tcp_do_rfc1323 = 1;
155 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
156 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
157
158 int tcp_do_rfc1644 = 0;
159 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
160 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
161
162 static int tcp_tcbhashsize = 0;
163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
164 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
165
166 static int do_tcpdrain = 1;
167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
168 "Enable tcp_drain routine for extra help when low on mbufs");
169
170 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
171 &tcbinfo.ipi_count, 0, "Number of active PCBs");
172
173 static int icmp_may_rst = 1;
174 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
175 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
176
177 static int tcp_isn_reseed_interval = 0;
178 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
179 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
180
181 /*
182 * TCP bandwidth limiting sysctls. Note that the default lower bound of
183 * 1024 exists only for debugging. A good production default would be
184 * something like 6100.
185 */
186 static int tcp_inflight_enable = 0;
187 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW,
188 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting");
189
190 static int tcp_inflight_debug = 0;
191 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW,
192 &tcp_inflight_debug, 0, "Debug TCP inflight calculations");
193
194 static int tcp_inflight_min = 6144;
195 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW,
196 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window");
197
198 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT;
199 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW,
200 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window");
201 static int tcp_inflight_stab = 20;
202 SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW,
203 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets");
204
205 static struct inpcb *tcp_notify(struct inpcb *, int);
206 static void tcp_discardcb(struct tcpcb *);
207
208 /*
209 * Target size of TCP PCB hash tables. Must be a power of two.
210 *
211 * Note that this can be overridden by the kernel environment
212 * variable net.inet.tcp.tcbhashsize
213 */
214 #ifndef TCBHASHSIZE
215 #define TCBHASHSIZE 512
216 #endif
217
218 /*
219 * XXX
220 * Callouts should be moved into struct tcp directly. They are currently
221 * separate becuase the tcpcb structure is exported to userland for sysctl
222 * parsing purposes, which do not know about callouts.
223 */
224 struct tcpcb_mem {
225 struct tcpcb tcb;
226 struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep;
227 struct callout tcpcb_mem_2msl, tcpcb_mem_delack;
228 };
229
230 static uma_zone_t tcpcb_zone;
231 static uma_zone_t tcptw_zone;
232
233 /*
234 * Tcp initialization
235 */
236 void
237 tcp_init()
238 {
239 int hashsize = TCBHASHSIZE;
240
241 tcp_ccgen = 1;
242
243 tcp_delacktime = TCPTV_DELACK;
244 tcp_keepinit = TCPTV_KEEP_INIT;
245 tcp_keepidle = TCPTV_KEEP_IDLE;
246 tcp_keepintvl = TCPTV_KEEPINTVL;
247 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
248 tcp_msl = TCPTV_MSL;
249 tcp_rexmit_min = TCPTV_MIN;
250 tcp_rexmit_slop = TCPTV_CPU_VAR;
251
252 INP_INFO_LOCK_INIT(&tcbinfo, "tcp");
253 LIST_INIT(&tcb);
254 tcbinfo.listhead = &tcb;
255 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
256 if (!powerof2(hashsize)) {
257 printf("WARNING: TCB hash size not a power of 2\n");
258 hashsize = 512; /* safe default */
259 }
260 tcp_tcbhashsize = hashsize;
261 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
262 tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
263 &tcbinfo.porthashmask);
264 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
265 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
266 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
267 #ifdef INET6
268 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
269 #else /* INET6 */
270 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
271 #endif /* INET6 */
272 if (max_protohdr < TCP_MINPROTOHDR)
273 max_protohdr = TCP_MINPROTOHDR;
274 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
275 panic("tcp_init");
276 #undef TCP_MINPROTOHDR
277 /*
278 * These have to be type stable for the benefit of the timers.
279 */
280 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
281 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
282 uma_zone_set_max(tcpcb_zone, maxsockets);
283 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
284 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
285 uma_zone_set_max(tcptw_zone, maxsockets / 5);
286 tcp_timer_init();
287 syncache_init();
288 tcp_hc_init();
289 tcp_reass_init();
290 }
291
292 /*
293 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
294 * tcp_template used to store this data in mbufs, but we now recopy it out
295 * of the tcpcb each time to conserve mbufs.
296 */
297 void
298 tcpip_fillheaders(inp, ip_ptr, tcp_ptr)
299 struct inpcb *inp;
300 void *ip_ptr;
301 void *tcp_ptr;
302 {
303 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
304
305 #ifdef INET6
306 if ((inp->inp_vflag & INP_IPV6) != 0) {
307 struct ip6_hdr *ip6;
308
309 ip6 = (struct ip6_hdr *)ip_ptr;
310 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
311 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
312 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
313 (IPV6_VERSION & IPV6_VERSION_MASK);
314 ip6->ip6_nxt = IPPROTO_TCP;
315 ip6->ip6_plen = sizeof(struct tcphdr);
316 ip6->ip6_src = inp->in6p_laddr;
317 ip6->ip6_dst = inp->in6p_faddr;
318 } else
319 #endif
320 {
321 struct ip *ip;
322
323 ip = (struct ip *)ip_ptr;
324 ip->ip_v = IPVERSION;
325 ip->ip_hl = 5;
326 ip->ip_tos = inp->inp_ip_tos;
327 ip->ip_len = 0;
328 ip->ip_id = 0;
329 ip->ip_off = 0;
330 ip->ip_ttl = inp->inp_ip_ttl;
331 ip->ip_sum = 0;
332 ip->ip_p = IPPROTO_TCP;
333 ip->ip_src = inp->inp_laddr;
334 ip->ip_dst = inp->inp_faddr;
335 }
336 th->th_sport = inp->inp_lport;
337 th->th_dport = inp->inp_fport;
338 th->th_seq = 0;
339 th->th_ack = 0;
340 th->th_x2 = 0;
341 th->th_off = 5;
342 th->th_flags = 0;
343 th->th_win = 0;
344 th->th_urp = 0;
345 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
346 }
347
348 /*
349 * Create template to be used to send tcp packets on a connection.
350 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
351 * use for this function is in keepalives, which use tcp_respond.
352 */
353 struct tcptemp *
354 tcpip_maketemplate(inp)
355 struct inpcb *inp;
356 {
357 struct mbuf *m;
358 struct tcptemp *n;
359
360 m = m_get(M_DONTWAIT, MT_HEADER);
361 if (m == NULL)
362 return (0);
363 m->m_len = sizeof(struct tcptemp);
364 n = mtod(m, struct tcptemp *);
365
366 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
367 return (n);
368 }
369
370 /*
371 * Send a single message to the TCP at address specified by
372 * the given TCP/IP header. If m == 0, then we make a copy
373 * of the tcpiphdr at ti and send directly to the addressed host.
374 * This is used to force keep alive messages out using the TCP
375 * template for a connection. If flags are given then we send
376 * a message back to the TCP which originated the * segment ti,
377 * and discard the mbuf containing it and any other attached mbufs.
378 *
379 * In any case the ack and sequence number of the transmitted
380 * segment are as specified by the parameters.
381 *
382 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
383 */
384 void
385 tcp_respond(tp, ipgen, th, m, ack, seq, flags)
386 struct tcpcb *tp;
387 void *ipgen;
388 register struct tcphdr *th;
389 register struct mbuf *m;
390 tcp_seq ack, seq;
391 int flags;
392 {
393 register int tlen;
394 int win = 0;
395 struct ip *ip;
396 struct tcphdr *nth;
397 #ifdef INET6
398 struct ip6_hdr *ip6;
399 int isipv6;
400 #endif /* INET6 */
401 int ipflags = 0;
402 struct inpcb *inp = NULL;
403
404 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
405
406 #ifdef INET6
407 isipv6 = ((struct ip *)ipgen)->ip_v == 6;
408 ip6 = ipgen;
409 #endif /* INET6 */
410 ip = ipgen;
411
412 if (tp) {
413 inp = tp->t_inpcb;
414 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
415 INP_INFO_WLOCK_ASSERT(&tcbinfo);
416 INP_LOCK_ASSERT(inp);
417 if (!(flags & TH_RST)) {
418 win = sbspace(&inp->inp_socket->so_rcv);
419 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
420 win = (long)TCP_MAXWIN << tp->rcv_scale;
421 }
422 }
423 if (m == 0) {
424 m = m_gethdr(M_DONTWAIT, MT_HEADER);
425 if (m == NULL)
426 return;
427 tlen = 0;
428 m->m_data += max_linkhdr;
429 #ifdef INET6
430 if (isipv6) {
431 bcopy((caddr_t)ip6, mtod(m, caddr_t),
432 sizeof(struct ip6_hdr));
433 ip6 = mtod(m, struct ip6_hdr *);
434 nth = (struct tcphdr *)(ip6 + 1);
435 } else
436 #endif /* INET6 */
437 {
438 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
439 ip = mtod(m, struct ip *);
440 nth = (struct tcphdr *)(ip + 1);
441 }
442 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
443 flags = TH_ACK;
444 } else {
445 m_freem(m->m_next);
446 m->m_next = 0;
447 m->m_data = (caddr_t)ipgen;
448 /* m_len is set later */
449 tlen = 0;
450 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
451 #ifdef INET6
452 if (isipv6) {
453 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
454 nth = (struct tcphdr *)(ip6 + 1);
455 } else
456 #endif /* INET6 */
457 {
458 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
459 nth = (struct tcphdr *)(ip + 1);
460 }
461 if (th != nth) {
462 /*
463 * this is usually a case when an extension header
464 * exists between the IPv6 header and the
465 * TCP header.
466 */
467 nth->th_sport = th->th_sport;
468 nth->th_dport = th->th_dport;
469 }
470 xchg(nth->th_dport, nth->th_sport, n_short);
471 #undef xchg
472 }
473 #ifdef INET6
474 if (isipv6) {
475 ip6->ip6_flow = 0;
476 ip6->ip6_vfc = IPV6_VERSION;
477 ip6->ip6_nxt = IPPROTO_TCP;
478 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
479 tlen));
480 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
481 } else
482 #endif
483 {
484 tlen += sizeof (struct tcpiphdr);
485 ip->ip_len = tlen;
486 ip->ip_ttl = ip_defttl;
487 if (path_mtu_discovery)
488 ip->ip_off |= IP_DF;
489 }
490 m->m_len = tlen;
491 m->m_pkthdr.len = tlen;
492 m->m_pkthdr.rcvif = (struct ifnet *) 0;
493 #ifdef MAC
494 if (inp != NULL) {
495 /*
496 * Packet is associated with a socket, so allow the
497 * label of the response to reflect the socket label.
498 */
499 mac_create_mbuf_from_socket(inp->inp_socket, m);
500 } else {
501 /*
502 * Packet is not associated with a socket, so possibly
503 * update the label in place.
504 */
505 mac_reflect_mbuf_tcp(m);
506 }
507 #endif
508 nth->th_seq = htonl(seq);
509 nth->th_ack = htonl(ack);
510 nth->th_x2 = 0;
511 nth->th_off = sizeof (struct tcphdr) >> 2;
512 nth->th_flags = flags;
513 if (tp)
514 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
515 else
516 nth->th_win = htons((u_short)win);
517 nth->th_urp = 0;
518 #ifdef INET6
519 if (isipv6) {
520 nth->th_sum = 0;
521 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
522 sizeof(struct ip6_hdr),
523 tlen - sizeof(struct ip6_hdr));
524 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, NULL);
525 } else
526 #endif /* INET6 */
527 {
528 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
529 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
530 m->m_pkthdr.csum_flags = CSUM_TCP;
531 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
532 }
533 #ifdef TCPDEBUG
534 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
535 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
536 #endif
537 #ifdef INET6
538 if (isipv6)
539 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp);
540 else
541 #endif /* INET6 */
542 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp);
543 }
544
545 /*
546 * Create a new TCP control block, making an
547 * empty reassembly queue and hooking it to the argument
548 * protocol control block. The `inp' parameter must have
549 * come from the zone allocator set up in tcp_init().
550 */
551 struct tcpcb *
552 tcp_newtcpcb(inp)
553 struct inpcb *inp;
554 {
555 struct tcpcb_mem *tm;
556 struct tcpcb *tp;
557 #ifdef INET6
558 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
559 #endif /* INET6 */
560
561 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO);
562 if (tm == NULL)
563 return (NULL);
564 tp = &tm->tcb;
565 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
566 tp->t_maxseg = tp->t_maxopd =
567 #ifdef INET6
568 isipv6 ? tcp_v6mssdflt :
569 #endif /* INET6 */
570 tcp_mssdflt;
571
572 /* Set up our timeouts. */
573 callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, 0);
574 callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, 0);
575 callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, 0);
576 callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, 0);
577 callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, 0);
578
579 if (tcp_do_rfc1323)
580 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
581 if (tcp_do_rfc1644)
582 tp->t_flags |= TF_REQ_CC;
583 tp->t_inpcb = inp; /* XXX */
584 /*
585 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
586 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
587 * reasonable initial retransmit time.
588 */
589 tp->t_srtt = TCPTV_SRTTBASE;
590 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
591 tp->t_rttmin = tcp_rexmit_min;
592 tp->t_rxtcur = TCPTV_RTOBASE;
593 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
594 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
595 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
596 tp->t_rcvtime = ticks;
597 tp->t_bw_rtttime = ticks;
598 /*
599 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
600 * because the socket may be bound to an IPv6 wildcard address,
601 * which may match an IPv4-mapped IPv6 address.
602 */
603 inp->inp_ip_ttl = ip_defttl;
604 inp->inp_ppcb = (caddr_t)tp;
605 return (tp); /* XXX */
606 }
607
608 /*
609 * Drop a TCP connection, reporting
610 * the specified error. If connection is synchronized,
611 * then send a RST to peer.
612 */
613 struct tcpcb *
614 tcp_drop(tp, errno)
615 register struct tcpcb *tp;
616 int errno;
617 {
618 struct socket *so = tp->t_inpcb->inp_socket;
619
620 if (TCPS_HAVERCVDSYN(tp->t_state)) {
621 tp->t_state = TCPS_CLOSED;
622 (void) tcp_output(tp);
623 tcpstat.tcps_drops++;
624 } else
625 tcpstat.tcps_conndrops++;
626 if (errno == ETIMEDOUT && tp->t_softerror)
627 errno = tp->t_softerror;
628 so->so_error = errno;
629 return (tcp_close(tp));
630 }
631
632 static void
633 tcp_discardcb(tp)
634 struct tcpcb *tp;
635 {
636 struct tseg_qent *q;
637 struct inpcb *inp = tp->t_inpcb;
638 struct socket *so = inp->inp_socket;
639 #ifdef INET6
640 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
641 #endif /* INET6 */
642
643 /*
644 * Make sure that all of our timers are stopped before we
645 * delete the PCB.
646 */
647 callout_stop(tp->tt_rexmt);
648 callout_stop(tp->tt_persist);
649 callout_stop(tp->tt_keep);
650 callout_stop(tp->tt_2msl);
651 callout_stop(tp->tt_delack);
652
653 /*
654 * If we got enough samples through the srtt filter,
655 * save the rtt and rttvar in the routing entry.
656 * 'Enough' is arbitrarily defined as 4 rtt samples.
657 * 4 samples is enough for the srtt filter to converge
658 * to within enough % of the correct value; fewer samples
659 * and we could save a bogus rtt. The danger is not high
660 * as tcp quickly recovers from everything.
661 * XXX: Works very well but needs some more statistics!
662 */
663 if (tp->t_rttupdated >= 4) {
664 struct hc_metrics_lite metrics;
665 u_long ssthresh;
666
667 bzero(&metrics, sizeof(metrics));
668 /*
669 * Update the ssthresh always when the conditions below
670 * are satisfied. This gives us better new start value
671 * for the congestion avoidance for new connections.
672 * ssthresh is only set if packet loss occured on a session.
673 */
674 ssthresh = tp->snd_ssthresh;
675 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
676 /*
677 * convert the limit from user data bytes to
678 * packets then to packet data bytes.
679 */
680 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
681 if (ssthresh < 2)
682 ssthresh = 2;
683 ssthresh *= (u_long)(tp->t_maxseg +
684 #ifdef INET6
685 (isipv6 ? sizeof (struct ip6_hdr) +
686 sizeof (struct tcphdr) :
687 #endif
688 sizeof (struct tcpiphdr)
689 #ifdef INET6
690 )
691 #endif
692 );
693 } else
694 ssthresh = 0;
695 metrics.rmx_ssthresh = ssthresh;
696
697 metrics.rmx_rtt = tp->t_srtt;
698 metrics.rmx_rttvar = tp->t_rttvar;
699 /* XXX: This wraps if the pipe is more than 4 Gbit per second */
700 metrics.rmx_bandwidth = tp->snd_bandwidth;
701 metrics.rmx_cwnd = tp->snd_cwnd;
702 metrics.rmx_sendpipe = 0;
703 metrics.rmx_recvpipe = 0;
704
705 tcp_hc_update(&inp->inp_inc, &metrics);
706 }
707
708 /* free the reassembly queue, if any */
709 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
710 LIST_REMOVE(q, tqe_q);
711 m_freem(q->tqe_m);
712 uma_zfree(tcp_reass_zone, q);
713 tcp_reass_qsize--;
714 }
715 inp->inp_ppcb = NULL;
716 tp->t_inpcb = NULL;
717 uma_zfree(tcpcb_zone, tp);
718 soisdisconnected(so);
719 }
720
721 /*
722 * Close a TCP control block:
723 * discard all space held by the tcp
724 * discard internet protocol block
725 * wake up any sleepers
726 */
727 struct tcpcb *
728 tcp_close(tp)
729 struct tcpcb *tp;
730 {
731 struct inpcb *inp = tp->t_inpcb;
732 #ifdef INET6
733 struct socket *so = inp->inp_socket;
734 #endif
735
736 tcp_discardcb(tp);
737 #ifdef INET6
738 if (INP_CHECK_SOCKAF(so, AF_INET6))
739 in6_pcbdetach(inp);
740 else
741 #endif
742 in_pcbdetach(inp);
743 tcpstat.tcps_closed++;
744 return ((struct tcpcb *)0);
745 }
746
747 void
748 tcp_drain()
749 {
750 if (do_tcpdrain)
751 {
752 struct inpcb *inpb;
753 struct tcpcb *tcpb;
754 struct tseg_qent *te;
755
756 /*
757 * Walk the tcpbs, if existing, and flush the reassembly queue,
758 * if there is one...
759 * XXX: The "Net/3" implementation doesn't imply that the TCP
760 * reassembly queue should be flushed, but in a situation
761 * where we're really low on mbufs, this is potentially
762 * usefull.
763 */
764 INP_INFO_RLOCK(&tcbinfo);
765 LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) {
766 if (inpb->inp_vflag & INP_TIMEWAIT)
767 continue;
768 INP_LOCK(inpb);
769 if ((tcpb = intotcpcb(inpb))) {
770 while ((te = LIST_FIRST(&tcpb->t_segq))
771 != NULL) {
772 LIST_REMOVE(te, tqe_q);
773 m_freem(te->tqe_m);
774 uma_zfree(tcp_reass_zone, te);
775 tcp_reass_qsize--;
776 }
777 }
778 INP_UNLOCK(inpb);
779 }
780 INP_INFO_RUNLOCK(&tcbinfo);
781 }
782 }
783
784 /*
785 * Notify a tcp user of an asynchronous error;
786 * store error as soft error, but wake up user
787 * (for now, won't do anything until can select for soft error).
788 *
789 * Do not wake up user since there currently is no mechanism for
790 * reporting soft errors (yet - a kqueue filter may be added).
791 */
792 static struct inpcb *
793 tcp_notify(inp, error)
794 struct inpcb *inp;
795 int error;
796 {
797 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
798
799 /*
800 * Ignore some errors if we are hooked up.
801 * If connection hasn't completed, has retransmitted several times,
802 * and receives a second error, give up now. This is better
803 * than waiting a long time to establish a connection that
804 * can never complete.
805 */
806 if (tp->t_state == TCPS_ESTABLISHED &&
807 (error == EHOSTUNREACH || error == ENETUNREACH ||
808 error == EHOSTDOWN)) {
809 return inp;
810 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
811 tp->t_softerror) {
812 tcp_drop(tp, error);
813 return (struct inpcb *)0;
814 } else {
815 tp->t_softerror = error;
816 return inp;
817 }
818 #if 0
819 wakeup( &so->so_timeo);
820 sorwakeup(so);
821 sowwakeup(so);
822 #endif
823 }
824
825 static int
826 tcp_pcblist(SYSCTL_HANDLER_ARGS)
827 {
828 int error, i, n, s;
829 struct inpcb *inp, **inp_list;
830 inp_gen_t gencnt;
831 struct xinpgen xig;
832
833 /*
834 * The process of preparing the TCB list is too time-consuming and
835 * resource-intensive to repeat twice on every request.
836 */
837 if (req->oldptr == 0) {
838 n = tcbinfo.ipi_count;
839 req->oldidx = 2 * (sizeof xig)
840 + (n + n/8) * sizeof(struct xtcpcb);
841 return 0;
842 }
843
844 if (req->newptr != 0)
845 return EPERM;
846
847 /*
848 * OK, now we're committed to doing something.
849 */
850 s = splnet();
851 INP_INFO_RLOCK(&tcbinfo);
852 gencnt = tcbinfo.ipi_gencnt;
853 n = tcbinfo.ipi_count;
854 INP_INFO_RUNLOCK(&tcbinfo);
855 splx(s);
856
857 sysctl_wire_old_buffer(req, 2 * (sizeof xig)
858 + n * sizeof(struct xtcpcb));
859
860 xig.xig_len = sizeof xig;
861 xig.xig_count = n;
862 xig.xig_gen = gencnt;
863 xig.xig_sogen = so_gencnt;
864 error = SYSCTL_OUT(req, &xig, sizeof xig);
865 if (error)
866 return error;
867
868 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
869 if (inp_list == 0)
870 return ENOMEM;
871
872 s = splnet();
873 INP_INFO_RLOCK(&tcbinfo);
874 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n;
875 inp = LIST_NEXT(inp, inp_list)) {
876 INP_LOCK(inp);
877 if (inp->inp_gencnt <= gencnt) {
878 /*
879 * XXX: This use of cr_cansee(), introduced with
880 * TCP state changes, is not quite right, but for
881 * now, better than nothing.
882 */
883 if (inp->inp_vflag & INP_TIMEWAIT)
884 error = cr_cansee(req->td->td_ucred,
885 intotw(inp)->tw_cred);
886 else
887 error = cr_canseesocket(req->td->td_ucred,
888 inp->inp_socket);
889 if (error == 0)
890 inp_list[i++] = inp;
891 }
892 INP_UNLOCK(inp);
893 }
894 INP_INFO_RUNLOCK(&tcbinfo);
895 splx(s);
896 n = i;
897
898 error = 0;
899 for (i = 0; i < n; i++) {
900 inp = inp_list[i];
901 if (inp->inp_gencnt <= gencnt) {
902 struct xtcpcb xt;
903 caddr_t inp_ppcb;
904 xt.xt_len = sizeof xt;
905 /* XXX should avoid extra copy */
906 bcopy(inp, &xt.xt_inp, sizeof *inp);
907 inp_ppcb = inp->inp_ppcb;
908 if (inp_ppcb == NULL)
909 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
910 else if (inp->inp_vflag & INP_TIMEWAIT) {
911 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
912 xt.xt_tp.t_state = TCPS_TIME_WAIT;
913 } else
914 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
915 if (inp->inp_socket)
916 sotoxsocket(inp->inp_socket, &xt.xt_socket);
917 else {
918 bzero(&xt.xt_socket, sizeof xt.xt_socket);
919 xt.xt_socket.xso_protocol = IPPROTO_TCP;
920 }
921 xt.xt_inp.inp_gencnt = inp->inp_gencnt;
922 error = SYSCTL_OUT(req, &xt, sizeof xt);
923 }
924 }
925 if (!error) {
926 /*
927 * Give the user an updated idea of our state.
928 * If the generation differs from what we told
929 * her before, she knows that something happened
930 * while we were processing this request, and it
931 * might be necessary to retry.
932 */
933 s = splnet();
934 INP_INFO_RLOCK(&tcbinfo);
935 xig.xig_gen = tcbinfo.ipi_gencnt;
936 xig.xig_sogen = so_gencnt;
937 xig.xig_count = tcbinfo.ipi_count;
938 INP_INFO_RUNLOCK(&tcbinfo);
939 splx(s);
940 error = SYSCTL_OUT(req, &xig, sizeof xig);
941 }
942 free(inp_list, M_TEMP);
943 return error;
944 }
945
946 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
947 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
948
949 static int
950 tcp_getcred(SYSCTL_HANDLER_ARGS)
951 {
952 struct xucred xuc;
953 struct sockaddr_in addrs[2];
954 struct inpcb *inp;
955 int error, s;
956
957 error = suser_cred(req->td->td_ucred, PRISON_ROOT);
958 if (error)
959 return (error);
960 error = SYSCTL_IN(req, addrs, sizeof(addrs));
961 if (error)
962 return (error);
963 s = splnet();
964 INP_INFO_RLOCK(&tcbinfo);
965 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
966 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
967 if (inp == NULL) {
968 error = ENOENT;
969 goto outunlocked;
970 }
971 INP_LOCK(inp);
972 if (inp->inp_socket == NULL) {
973 error = ENOENT;
974 goto out;
975 }
976 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
977 if (error)
978 goto out;
979 cru2x(inp->inp_socket->so_cred, &xuc);
980 out:
981 INP_UNLOCK(inp);
982 outunlocked:
983 INP_INFO_RUNLOCK(&tcbinfo);
984 splx(s);
985 if (error == 0)
986 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
987 return (error);
988 }
989
990 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
991 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
992 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
993
994 #ifdef INET6
995 static int
996 tcp6_getcred(SYSCTL_HANDLER_ARGS)
997 {
998 struct xucred xuc;
999 struct sockaddr_in6 addrs[2];
1000 struct inpcb *inp;
1001 int error, s, mapped = 0;
1002
1003 error = suser_cred(req->td->td_ucred, PRISON_ROOT);
1004 if (error)
1005 return (error);
1006 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1007 if (error)
1008 return (error);
1009 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1010 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1011 mapped = 1;
1012 else
1013 return (EINVAL);
1014 }
1015 s = splnet();
1016 INP_INFO_RLOCK(&tcbinfo);
1017 if (mapped == 1)
1018 inp = in_pcblookup_hash(&tcbinfo,
1019 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1020 addrs[1].sin6_port,
1021 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1022 addrs[0].sin6_port,
1023 0, NULL);
1024 else
1025 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr,
1026 addrs[1].sin6_port,
1027 &addrs[0].sin6_addr, addrs[0].sin6_port,
1028 0, NULL);
1029 if (inp == NULL) {
1030 error = ENOENT;
1031 goto outunlocked;
1032 }
1033 INP_LOCK(inp);
1034 if (inp->inp_socket == NULL) {
1035 error = ENOENT;
1036 goto out;
1037 }
1038 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1039 if (error)
1040 goto out;
1041 cru2x(inp->inp_socket->so_cred, &xuc);
1042 out:
1043 INP_UNLOCK(inp);
1044 outunlocked:
1045 INP_INFO_RUNLOCK(&tcbinfo);
1046 splx(s);
1047 if (error == 0)
1048 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1049 return (error);
1050 }
1051
1052 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
1053 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1054 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
1055 #endif
1056
1057
1058 void
1059 tcp_ctlinput(cmd, sa, vip)
1060 int cmd;
1061 struct sockaddr *sa;
1062 void *vip;
1063 {
1064 struct ip *ip = vip;
1065 struct tcphdr *th;
1066 struct in_addr faddr;
1067 struct inpcb *inp;
1068 struct tcpcb *tp;
1069 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1070 tcp_seq icmp_seq;
1071 int s;
1072
1073 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1074 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1075 return;
1076
1077 if (cmd == PRC_QUENCH)
1078 notify = tcp_quench;
1079 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1080 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
1081 notify = tcp_drop_syn_sent;
1082 else if (cmd == PRC_MSGSIZE)
1083 notify = tcp_mtudisc;
1084 /*
1085 * Redirects don't need to be handled up here.
1086 */
1087 else if (PRC_IS_REDIRECT(cmd))
1088 return;
1089 /*
1090 * Hostdead is ugly because it goes linearly through all PCBs.
1091 * XXX: We never get this from ICMP, otherwise it makes an
1092 * excellent DoS attack on machines with many connections.
1093 */
1094 else if (cmd == PRC_HOSTDEAD)
1095 ip = 0;
1096 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
1097 return;
1098 if (ip) {
1099 s = splnet();
1100 th = (struct tcphdr *)((caddr_t)ip
1101 + (ip->ip_hl << 2));
1102 INP_INFO_WLOCK(&tcbinfo);
1103 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1104 ip->ip_src, th->th_sport, 0, NULL);
1105 if (inp != NULL) {
1106 INP_LOCK(inp);
1107 if (inp->inp_socket != NULL) {
1108 icmp_seq = htonl(th->th_seq);
1109 tp = intotcpcb(inp);
1110 if (SEQ_GEQ(icmp_seq, tp->snd_una) &&
1111 SEQ_LT(icmp_seq, tp->snd_max))
1112 inp = (*notify)(inp, inetctlerrmap[cmd]);
1113 }
1114 if (inp)
1115 INP_UNLOCK(inp);
1116 } else {
1117 struct in_conninfo inc;
1118
1119 inc.inc_fport = th->th_dport;
1120 inc.inc_lport = th->th_sport;
1121 inc.inc_faddr = faddr;
1122 inc.inc_laddr = ip->ip_src;
1123 #ifdef INET6
1124 inc.inc_isipv6 = 0;
1125 #endif
1126 syncache_unreach(&inc, th);
1127 }
1128 INP_INFO_WUNLOCK(&tcbinfo);
1129 splx(s);
1130 } else
1131 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
1132 }
1133
1134 #ifdef INET6
1135 void
1136 tcp6_ctlinput(cmd, sa, d)
1137 int cmd;
1138 struct sockaddr *sa;
1139 void *d;
1140 {
1141 struct tcphdr th;
1142 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1143 struct ip6_hdr *ip6;
1144 struct mbuf *m;
1145 struct ip6ctlparam *ip6cp = NULL;
1146 const struct sockaddr_in6 *sa6_src = NULL;
1147 int off;
1148 struct tcp_portonly {
1149 u_int16_t th_sport;
1150 u_int16_t th_dport;
1151 } *thp;
1152
1153 if (sa->sa_family != AF_INET6 ||
1154 sa->sa_len != sizeof(struct sockaddr_in6))
1155 return;
1156
1157 if (cmd == PRC_QUENCH)
1158 notify = tcp_quench;
1159 else if (cmd == PRC_MSGSIZE)
1160 notify = tcp_mtudisc;
1161 else if (!PRC_IS_REDIRECT(cmd) &&
1162 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1163 return;
1164
1165 /* if the parameter is from icmp6, decode it. */
1166 if (d != NULL) {
1167 ip6cp = (struct ip6ctlparam *)d;
1168 m = ip6cp->ip6c_m;
1169 ip6 = ip6cp->ip6c_ip6;
1170 off = ip6cp->ip6c_off;
1171 sa6_src = ip6cp->ip6c_src;
1172 } else {
1173 m = NULL;
1174 ip6 = NULL;
1175 off = 0; /* fool gcc */
1176 sa6_src = &sa6_any;
1177 }
1178
1179 if (ip6) {
1180 struct in_conninfo inc;
1181 /*
1182 * XXX: We assume that when IPV6 is non NULL,
1183 * M and OFF are valid.
1184 */
1185
1186 /* check if we can safely examine src and dst ports */
1187 if (m->m_pkthdr.len < off + sizeof(*thp))
1188 return;
1189
1190 bzero(&th, sizeof(th));
1191 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1192
1193 in6_pcbnotify(&tcb, sa, th.th_dport,
1194 (struct sockaddr *)ip6cp->ip6c_src,
1195 th.th_sport, cmd, notify);
1196
1197 inc.inc_fport = th.th_dport;
1198 inc.inc_lport = th.th_sport;
1199 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1200 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1201 inc.inc_isipv6 = 1;
1202 syncache_unreach(&inc, &th);
1203 } else
1204 in6_pcbnotify(&tcb, sa, 0, (const struct sockaddr *)sa6_src,
1205 0, cmd, notify);
1206 }
1207 #endif /* INET6 */
1208
1209
1210 /*
1211 * Following is where TCP initial sequence number generation occurs.
1212 *
1213 * There are two places where we must use initial sequence numbers:
1214 * 1. In SYN-ACK packets.
1215 * 2. In SYN packets.
1216 *
1217 * All ISNs for SYN-ACK packets are generated by the syncache. See
1218 * tcp_syncache.c for details.
1219 *
1220 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1221 * depends on this property. In addition, these ISNs should be
1222 * unguessable so as to prevent connection hijacking. To satisfy
1223 * the requirements of this situation, the algorithm outlined in
1224 * RFC 1948 is used to generate sequence numbers.
1225 *
1226 * Implementation details:
1227 *
1228 * Time is based off the system timer, and is corrected so that it
1229 * increases by one megabyte per second. This allows for proper
1230 * recycling on high speed LANs while still leaving over an hour
1231 * before rollover.
1232 *
1233 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1234 * between seeding of isn_secret. This is normally set to zero,
1235 * as reseeding should not be necessary.
1236 *
1237 */
1238
1239 #define ISN_BYTES_PER_SECOND 1048576
1240
1241 u_char isn_secret[32];
1242 int isn_last_reseed;
1243 MD5_CTX isn_ctx;
1244
1245 tcp_seq
1246 tcp_new_isn(tp)
1247 struct tcpcb *tp;
1248 {
1249 u_int32_t md5_buffer[4];
1250 tcp_seq new_isn;
1251
1252 /* Seed if this is the first use, reseed if requested. */
1253 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) &&
1254 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1255 < (u_int)ticks))) {
1256 read_random(&isn_secret, sizeof(isn_secret));
1257 isn_last_reseed = ticks;
1258 }
1259
1260 /* Compute the md5 hash and return the ISN. */
1261 MD5Init(&isn_ctx);
1262 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1263 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1264 #ifdef INET6
1265 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1266 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1267 sizeof(struct in6_addr));
1268 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1269 sizeof(struct in6_addr));
1270 } else
1271 #endif
1272 {
1273 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1274 sizeof(struct in_addr));
1275 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1276 sizeof(struct in_addr));
1277 }
1278 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1279 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1280 new_isn = (tcp_seq) md5_buffer[0];
1281 new_isn += ticks * (ISN_BYTES_PER_SECOND / hz);
1282 return new_isn;
1283 }
1284
1285 /*
1286 * When a source quench is received, close congestion window
1287 * to one segment. We will gradually open it again as we proceed.
1288 */
1289 struct inpcb *
1290 tcp_quench(inp, errno)
1291 struct inpcb *inp;
1292 int errno;
1293 {
1294 struct tcpcb *tp = intotcpcb(inp);
1295
1296 if (tp)
1297 tp->snd_cwnd = tp->t_maxseg;
1298 return (inp);
1299 }
1300
1301 /*
1302 * When a specific ICMP unreachable message is received and the
1303 * connection state is SYN-SENT, drop the connection. This behavior
1304 * is controlled by the icmp_may_rst sysctl.
1305 */
1306 struct inpcb *
1307 tcp_drop_syn_sent(inp, errno)
1308 struct inpcb *inp;
1309 int errno;
1310 {
1311 struct tcpcb *tp = intotcpcb(inp);
1312
1313 if (tp && tp->t_state == TCPS_SYN_SENT) {
1314 tcp_drop(tp, errno);
1315 return (struct inpcb *)0;
1316 }
1317 return inp;
1318 }
1319
1320 /*
1321 * When `need fragmentation' ICMP is received, update our idea of the MSS
1322 * based on the new value in the route. Also nudge TCP to send something,
1323 * since we know the packet we just sent was dropped.
1324 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1325 */
1326 struct inpcb *
1327 tcp_mtudisc(inp, errno)
1328 struct inpcb *inp;
1329 int errno;
1330 {
1331 struct tcpcb *tp = intotcpcb(inp);
1332 struct rmxp_tao tao;
1333 struct socket *so = inp->inp_socket;
1334 u_int maxmtu;
1335 u_int romtu;
1336 int mss;
1337 #ifdef INET6
1338 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1339 #endif /* INET6 */
1340 bzero(&tao, sizeof(tao));
1341
1342 if (tp) {
1343 maxmtu = tcp_hc_getmtu(&inp->inp_inc); /* IPv4 and IPv6 */
1344 romtu =
1345 #ifdef INET6
1346 isipv6 ? tcp_maxmtu6(&inp->inp_inc) :
1347 #endif /* INET6 */
1348 tcp_maxmtu(&inp->inp_inc);
1349 if (!maxmtu)
1350 maxmtu = romtu;
1351 else
1352 maxmtu = min(maxmtu, romtu);
1353 if (!maxmtu) {
1354 tp->t_maxopd = tp->t_maxseg =
1355 #ifdef INET6
1356 isipv6 ? tcp_v6mssdflt :
1357 #endif /* INET6 */
1358 tcp_mssdflt;
1359 return inp;
1360 }
1361 mss = maxmtu -
1362 #ifdef INET6
1363 (isipv6 ?
1364 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1365 #endif /* INET6 */
1366 sizeof(struct tcpiphdr)
1367 #ifdef INET6
1368 )
1369 #endif /* INET6 */
1370 ;
1371
1372 if (tcp_do_rfc1644) {
1373 tcp_hc_gettao(&inp->inp_inc, &tao);
1374 if (tao.tao_mssopt)
1375 mss = min(mss, tao.tao_mssopt);
1376 }
1377 /*
1378 * XXX - The above conditional probably violates the TCP
1379 * spec. The problem is that, since we don't know the
1380 * other end's MSS, we are supposed to use a conservative
1381 * default. But, if we do that, then MTU discovery will
1382 * never actually take place, because the conservative
1383 * default is much less than the MTUs typically seen
1384 * on the Internet today. For the moment, we'll sweep
1385 * this under the carpet.
1386 *
1387 * The conservative default might not actually be a problem
1388 * if the only case this occurs is when sending an initial
1389 * SYN with options and data to a host we've never talked
1390 * to before. Then, they will reply with an MSS value which
1391 * will get recorded and the new parameters should get
1392 * recomputed. For Further Study.
1393 */
1394 if (tp->t_maxopd <= mss)
1395 return inp;
1396 tp->t_maxopd = mss;
1397
1398 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1399 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1400 mss -= TCPOLEN_TSTAMP_APPA;
1401 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
1402 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)
1403 mss -= TCPOLEN_CC_APPA;
1404 #if (MCLBYTES & (MCLBYTES - 1)) == 0
1405 if (mss > MCLBYTES)
1406 mss &= ~(MCLBYTES-1);
1407 #else
1408 if (mss > MCLBYTES)
1409 mss = mss / MCLBYTES * MCLBYTES;
1410 #endif
1411 if (so->so_snd.sb_hiwat < mss)
1412 mss = so->so_snd.sb_hiwat;
1413
1414 tp->t_maxseg = mss;
1415
1416 tcpstat.tcps_mturesent++;
1417 tp->t_rtttime = 0;
1418 tp->snd_nxt = tp->snd_una;
1419 tcp_output(tp);
1420 }
1421 return inp;
1422 }
1423
1424 /*
1425 * Look-up the routing entry to the peer of this inpcb. If no route
1426 * is found and it cannot be allocated, then return NULL. This routine
1427 * is called by TCP routines that access the rmx structure and by tcp_mss
1428 * to get the interface MTU.
1429 */
1430 u_long
1431 tcp_maxmtu(inc)
1432 struct in_conninfo *inc;
1433 {
1434 struct route sro;
1435 struct sockaddr_in *dst;
1436 struct ifnet *ifp;
1437 u_long maxmtu = 0;
1438
1439 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
1440
1441 bzero(&sro, sizeof(sro));
1442 if (inc->inc_faddr.s_addr != INADDR_ANY) {
1443 dst = (struct sockaddr_in *)&sro.ro_dst;
1444 dst->sin_family = AF_INET;
1445 dst->sin_len = sizeof(*dst);
1446 dst->sin_addr = inc->inc_faddr;
1447 rtalloc_ign(&sro, RTF_CLONING);
1448 }
1449 if (sro.ro_rt != NULL) {
1450 ifp = sro.ro_rt->rt_ifp;
1451 if (sro.ro_rt->rt_rmx.rmx_mtu == 0)
1452 maxmtu = ifp->if_mtu;
1453 else
1454 maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
1455 RTFREE(sro.ro_rt);
1456 }
1457 return (maxmtu);
1458 }
1459
1460 #ifdef INET6
1461 u_long
1462 tcp_maxmtu6(inc)
1463 struct in_conninfo *inc;
1464 {
1465 struct route_in6 sro6;
1466 struct ifnet *ifp;
1467 u_long maxmtu = 0;
1468
1469 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
1470
1471 bzero(&sro6, sizeof(sro6));
1472 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1473 sro6.ro_dst.sin6_family = AF_INET6;
1474 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1475 sro6.ro_dst.sin6_addr = inc->inc6_faddr;
1476 rtalloc_ign((struct route *)&sro6, RTF_CLONING);
1477 }
1478 if (sro6.ro_rt != NULL) {
1479 ifp = sro6.ro_rt->rt_ifp;
1480 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0)
1481 maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp);
1482 else
1483 maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu,
1484 IN6_LINKMTU(sro6.ro_rt->rt_ifp));
1485 RTFREE(sro6.ro_rt);
1486 }
1487
1488 return (maxmtu);
1489 }
1490 #endif /* INET6 */
1491
1492 #ifdef IPSEC
1493 /* compute ESP/AH header size for TCP, including outer IP header. */
1494 size_t
1495 ipsec_hdrsiz_tcp(tp)
1496 struct tcpcb *tp;
1497 {
1498 struct inpcb *inp;
1499 struct mbuf *m;
1500 size_t hdrsiz;
1501 struct ip *ip;
1502 #ifdef INET6
1503 struct ip6_hdr *ip6;
1504 #endif
1505 struct tcphdr *th;
1506
1507 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1508 return 0;
1509 MGETHDR(m, M_DONTWAIT, MT_DATA);
1510 if (!m)
1511 return 0;
1512
1513 #ifdef INET6
1514 if ((inp->inp_vflag & INP_IPV6) != 0) {
1515 ip6 = mtod(m, struct ip6_hdr *);
1516 th = (struct tcphdr *)(ip6 + 1);
1517 m->m_pkthdr.len = m->m_len =
1518 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1519 tcpip_fillheaders(inp, ip6, th);
1520 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1521 } else
1522 #endif /* INET6 */
1523 {
1524 ip = mtod(m, struct ip *);
1525 th = (struct tcphdr *)(ip + 1);
1526 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1527 tcpip_fillheaders(inp, ip, th);
1528 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1529 }
1530
1531 m_free(m);
1532 return hdrsiz;
1533 }
1534 #endif /*IPSEC*/
1535
1536 /*
1537 * Move a TCP connection into TIME_WAIT state.
1538 * tcbinfo is unlocked.
1539 * inp is locked, and is unlocked before returning.
1540 */
1541 void
1542 tcp_twstart(tp)
1543 struct tcpcb *tp;
1544 {
1545 struct tcptw *tw;
1546 struct inpcb *inp;
1547 int tw_time, acknow;
1548 struct socket *so;
1549
1550 tw = uma_zalloc(tcptw_zone, M_NOWAIT);
1551 if (tw == NULL) {
1552 tw = tcp_timer_2msl_tw(1);
1553 if (tw == NULL) {
1554 tcp_close(tp);
1555 return;
1556 }
1557 }
1558 inp = tp->t_inpcb;
1559 tw->tw_inpcb = inp;
1560
1561 /*
1562 * Recover last window size sent.
1563 */
1564 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale;
1565
1566 /*
1567 * Set t_recent if timestamps are used on the connection.
1568 */
1569 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
1570 (TF_REQ_TSTMP|TF_RCVD_TSTMP))
1571 tw->t_recent = tp->ts_recent;
1572 else
1573 tw->t_recent = 0;
1574
1575 tw->snd_nxt = tp->snd_nxt;
1576 tw->rcv_nxt = tp->rcv_nxt;
1577 tw->iss = tp->iss;
1578 tw->irs = tp->irs;
1579 tw->cc_recv = tp->cc_recv;
1580 tw->cc_send = tp->cc_send;
1581 tw->t_starttime = tp->t_starttime;
1582 tw->tw_time = 0;
1583
1584 /* XXX
1585 * If this code will
1586 * be used for fin-wait-2 state also, then we may need
1587 * a ts_recent from the last segment.
1588 */
1589 /* Shorten TIME_WAIT [RFC-1644, p.28] */
1590 if (tp->cc_recv != 0 && (ticks - tp->t_starttime) < tcp_msl) {
1591 tw_time = tp->t_rxtcur * TCPTV_TWTRUNC;
1592 /* For T/TCP client, force ACK now. */
1593 acknow = 1;
1594 } else {
1595 tw_time = 2 * tcp_msl;
1596 acknow = tp->t_flags & TF_ACKNOW;
1597 }
1598 tcp_discardcb(tp);
1599 so = inp->inp_socket;
1600 so->so_pcb = NULL;
1601 tw->tw_cred = crhold(so->so_cred);
1602 tw->tw_so_options = so->so_options;
1603 if (acknow)
1604 tcp_twrespond(tw, so, NULL, TH_ACK);
1605 sotryfree(so);
1606 inp->inp_socket = NULL;
1607 inp->inp_ppcb = (caddr_t)tw;
1608 inp->inp_vflag |= INP_TIMEWAIT;
1609 tcp_timer_2msl_reset(tw, tw_time);
1610 INP_UNLOCK(inp);
1611 }
1612
1613 /*
1614 * The appromixate rate of ISN increase of Microsoft TCP stacks;
1615 * the actual rate is slightly higher due to the addition of
1616 * random positive increments.
1617 *
1618 * Most other new OSes use semi-randomized ISN values, so we
1619 * do not need to worry about them.
1620 */
1621 #define MS_ISN_BYTES_PER_SECOND 250000
1622
1623 /*
1624 * Determine if the ISN we will generate has advanced beyond the last
1625 * sequence number used by the previous connection. If so, indicate
1626 * that it is safe to recycle this tw socket by returning 1.
1627 */
1628 int
1629 tcp_twrecycleable(struct tcptw *tw)
1630 {
1631 tcp_seq new_iss = tw->iss;
1632 tcp_seq new_irs = tw->irs;
1633
1634 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
1635 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
1636
1637 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
1638 return 1;
1639 else
1640 return 0;
1641 }
1642
1643 struct tcptw *
1644 tcp_twclose(struct tcptw *tw, int reuse)
1645 {
1646 struct inpcb *inp;
1647
1648 inp = tw->tw_inpcb;
1649 tw->tw_inpcb = NULL;
1650 tcp_timer_2msl_stop(tw);
1651 inp->inp_ppcb = NULL;
1652 #ifdef INET6
1653 if (inp->inp_vflag & INP_IPV6PROTO)
1654 in6_pcbdetach(inp);
1655 else
1656 #endif
1657 in_pcbdetach(inp);
1658 tcpstat.tcps_closed++;
1659 crfree(tw->tw_cred);
1660 tw->tw_cred = NULL;
1661 if (reuse)
1662 return (tw);
1663 uma_zfree(tcptw_zone, tw);
1664 return (NULL);
1665 }
1666
1667 /*
1668 * One of so and msrc must be non-NULL for use by the MAC Framework to
1669 * construct a label for ay resulting packet.
1670 */
1671 int
1672 tcp_twrespond(struct tcptw *tw, struct socket *so, struct mbuf *msrc,
1673 int flags)
1674 {
1675 struct inpcb *inp = tw->tw_inpcb;
1676 struct tcphdr *th;
1677 struct mbuf *m;
1678 struct ip *ip = NULL;
1679 u_int8_t *optp;
1680 u_int hdrlen, optlen;
1681 int error;
1682 #ifdef INET6
1683 struct ip6_hdr *ip6 = NULL;
1684 int isipv6 = inp->inp_inc.inc_isipv6;
1685 #endif
1686
1687 KASSERT(so != NULL || msrc != NULL,
1688 ("tcp_twrespond: so and msrc NULL"));
1689
1690 m = m_gethdr(M_DONTWAIT, MT_HEADER);
1691 if (m == NULL)
1692 return (ENOBUFS);
1693 m->m_data += max_linkhdr;
1694
1695 #ifdef MAC
1696 if (so != NULL)
1697 mac_create_mbuf_from_socket(so, m);
1698 else
1699 mac_create_mbuf_netlayer(msrc, m);
1700 #endif
1701
1702 #ifdef INET6
1703 if (isipv6) {
1704 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1705 ip6 = mtod(m, struct ip6_hdr *);
1706 th = (struct tcphdr *)(ip6 + 1);
1707 tcpip_fillheaders(inp, ip6, th);
1708 } else
1709 #endif
1710 {
1711 hdrlen = sizeof(struct tcpiphdr);
1712 ip = mtod(m, struct ip *);
1713 th = (struct tcphdr *)(ip + 1);
1714 tcpip_fillheaders(inp, ip, th);
1715 }
1716 optp = (u_int8_t *)(th + 1);
1717
1718 /*
1719 * Send a timestamp and echo-reply if both our side and our peer
1720 * have sent timestamps in our SYN's and this is not a RST.
1721 */
1722 if (tw->t_recent && flags == TH_ACK) {
1723 u_int32_t *lp = (u_int32_t *)optp;
1724
1725 /* Form timestamp option as shown in appendix A of RFC 1323. */
1726 *lp++ = htonl(TCPOPT_TSTAMP_HDR);
1727 *lp++ = htonl(ticks);
1728 *lp = htonl(tw->t_recent);
1729 optp += TCPOLEN_TSTAMP_APPA;
1730 }
1731
1732 /*
1733 * Send `CC-family' options if needed, and it's not a RST.
1734 */
1735 if (tw->cc_recv != 0 && flags == TH_ACK) {
1736 u_int32_t *lp = (u_int32_t *)optp;
1737
1738 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC));
1739 *lp = htonl(tw->cc_send);
1740 optp += TCPOLEN_CC_APPA;
1741 }
1742 optlen = optp - (u_int8_t *)(th + 1);
1743
1744 m->m_len = hdrlen + optlen;
1745 m->m_pkthdr.len = m->m_len;
1746
1747 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
1748
1749 th->th_seq = htonl(tw->snd_nxt);
1750 th->th_ack = htonl(tw->rcv_nxt);
1751 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1752 th->th_flags = flags;
1753 th->th_win = htons(tw->last_win);
1754
1755 #ifdef INET6
1756 if (isipv6) {
1757 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
1758 sizeof(struct tcphdr) + optlen);
1759 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
1760 error = ip6_output(m, inp->in6p_outputopts, NULL,
1761 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
1762 } else
1763 #endif
1764 {
1765 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1766 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
1767 m->m_pkthdr.csum_flags = CSUM_TCP;
1768 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1769 ip->ip_len = m->m_pkthdr.len;
1770 if (path_mtu_discovery)
1771 ip->ip_off |= IP_DF;
1772 error = ip_output(m, inp->inp_options, NULL,
1773 (tw->tw_so_options & SO_DONTROUTE), NULL, inp);
1774 }
1775 if (flags & TH_ACK)
1776 tcpstat.tcps_sndacks++;
1777 else
1778 tcpstat.tcps_sndctrl++;
1779 tcpstat.tcps_sndtotal++;
1780 return (error);
1781 }
1782
1783 /*
1784 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1785 *
1786 * This code attempts to calculate the bandwidth-delay product as a
1787 * means of determining the optimal window size to maximize bandwidth,
1788 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1789 * routers. This code also does a fairly good job keeping RTTs in check
1790 * across slow links like modems. We implement an algorithm which is very
1791 * similar (but not meant to be) TCP/Vegas. The code operates on the
1792 * transmitter side of a TCP connection and so only effects the transmit
1793 * side of the connection.
1794 *
1795 * BACKGROUND: TCP makes no provision for the management of buffer space
1796 * at the end points or at the intermediate routers and switches. A TCP
1797 * stream, whether using NewReno or not, will eventually buffer as
1798 * many packets as it is able and the only reason this typically works is
1799 * due to the fairly small default buffers made available for a connection
1800 * (typicaly 16K or 32K). As machines use larger windows and/or window
1801 * scaling it is now fairly easy for even a single TCP connection to blow-out
1802 * all available buffer space not only on the local interface, but on
1803 * intermediate routers and switches as well. NewReno makes a misguided
1804 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1805 * then backing off, then steadily increasing the window again until another
1806 * failure occurs, ad-infinitum. This results in terrible oscillation that
1807 * is only made worse as network loads increase and the idea of intentionally
1808 * blowing out network buffers is, frankly, a terrible way to manage network
1809 * resources.
1810 *
1811 * It is far better to limit the transmit window prior to the failure
1812 * condition being achieved. There are two general ways to do this: First
1813 * you can 'scan' through different transmit window sizes and locate the
1814 * point where the RTT stops increasing, indicating that you have filled the
1815 * pipe, then scan backwards until you note that RTT stops decreasing, then
1816 * repeat ad-infinitum. This method works in principle but has severe
1817 * implementation issues due to RTT variances, timer granularity, and
1818 * instability in the algorithm which can lead to many false positives and
1819 * create oscillations as well as interact badly with other TCP streams
1820 * implementing the same algorithm.
1821 *
1822 * The second method is to limit the window to the bandwidth delay product
1823 * of the link. This is the method we implement. RTT variances and our
1824 * own manipulation of the congestion window, bwnd, can potentially
1825 * destabilize the algorithm. For this reason we have to stabilize the
1826 * elements used to calculate the window. We do this by using the minimum
1827 * observed RTT, the long term average of the observed bandwidth, and
1828 * by adding two segments worth of slop. It isn't perfect but it is able
1829 * to react to changing conditions and gives us a very stable basis on
1830 * which to extend the algorithm.
1831 */
1832 void
1833 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
1834 {
1835 u_long bw;
1836 u_long bwnd;
1837 int save_ticks;
1838
1839 /*
1840 * If inflight_enable is disabled in the middle of a tcp connection,
1841 * make sure snd_bwnd is effectively disabled.
1842 */
1843 if (tcp_inflight_enable == 0) {
1844 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1845 tp->snd_bandwidth = 0;
1846 return;
1847 }
1848
1849 /*
1850 * Figure out the bandwidth. Due to the tick granularity this
1851 * is a very rough number and it MUST be averaged over a fairly
1852 * long period of time. XXX we need to take into account a link
1853 * that is not using all available bandwidth, but for now our
1854 * slop will ramp us up if this case occurs and the bandwidth later
1855 * increases.
1856 *
1857 * Note: if ticks rollover 'bw' may wind up negative. We must
1858 * effectively reset t_bw_rtttime for this case.
1859 */
1860 save_ticks = ticks;
1861 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
1862 return;
1863
1864 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
1865 (save_ticks - tp->t_bw_rtttime);
1866 tp->t_bw_rtttime = save_ticks;
1867 tp->t_bw_rtseq = ack_seq;
1868 if (tp->t_bw_rtttime == 0 || (int)bw < 0)
1869 return;
1870 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
1871
1872 tp->snd_bandwidth = bw;
1873
1874 /*
1875 * Calculate the semi-static bandwidth delay product, plus two maximal
1876 * segments. The additional slop puts us squarely in the sweet
1877 * spot and also handles the bandwidth run-up case and stabilization.
1878 * Without the slop we could be locking ourselves into a lower
1879 * bandwidth.
1880 *
1881 * Situations Handled:
1882 * (1) Prevents over-queueing of packets on LANs, especially on
1883 * high speed LANs, allowing larger TCP buffers to be
1884 * specified, and also does a good job preventing
1885 * over-queueing of packets over choke points like modems
1886 * (at least for the transmit side).
1887 *
1888 * (2) Is able to handle changing network loads (bandwidth
1889 * drops so bwnd drops, bandwidth increases so bwnd
1890 * increases).
1891 *
1892 * (3) Theoretically should stabilize in the face of multiple
1893 * connections implementing the same algorithm (this may need
1894 * a little work).
1895 *
1896 * (4) Stability value (defaults to 20 = 2 maximal packets) can
1897 * be adjusted with a sysctl but typically only needs to be
1898 * on very slow connections. A value no smaller then 5
1899 * should be used, but only reduce this default if you have
1900 * no other choice.
1901 */
1902 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
1903 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10;
1904 #undef USERTT
1905
1906 if (tcp_inflight_debug > 0) {
1907 static int ltime;
1908 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
1909 ltime = ticks;
1910 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
1911 tp,
1912 bw,
1913 tp->t_rttbest,
1914 tp->t_srtt,
1915 bwnd
1916 );
1917 }
1918 }
1919 if ((long)bwnd < tcp_inflight_min)
1920 bwnd = tcp_inflight_min;
1921 if (bwnd > tcp_inflight_max)
1922 bwnd = tcp_inflight_max;
1923 if ((long)bwnd < tp->t_maxseg * 2)
1924 bwnd = tp->t_maxseg * 2;
1925 tp->snd_bwnd = bwnd;
1926 }
1927
Cache object: f2f5b036dc041b5519d1bea4c904eaf0
|