1 /*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
30 * $FreeBSD: releng/5.3/sys/netinet/tcp_subr.c 146003 2005-05-08 10:23:51Z cperciva $
31 */
32
33 #include "opt_compat.h"
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_ipsec.h"
37 #include "opt_mac.h"
38 #include "opt_tcpdebug.h"
39 #include "opt_tcp_sack.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/mac.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #ifdef INET6
50 #include <sys/domain.h>
51 #endif
52 #include <sys/proc.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/protosw.h>
56 #include <sys/random.h>
57
58 #include <vm/uma.h>
59
60 #include <net/route.h>
61 #include <net/if.h>
62
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/ip.h>
66 #ifdef INET6
67 #include <netinet/ip6.h>
68 #endif
69 #include <netinet/in_pcb.h>
70 #ifdef INET6
71 #include <netinet6/in6_pcb.h>
72 #endif
73 #include <netinet/in_var.h>
74 #include <netinet/ip_var.h>
75 #ifdef INET6
76 #include <netinet6/ip6_var.h>
77 #include <netinet6/nd6.h>
78 #endif
79 #include <netinet/tcp.h>
80 #include <netinet/tcp_fsm.h>
81 #include <netinet/tcp_seq.h>
82 #include <netinet/tcp_timer.h>
83 #include <netinet/tcp_var.h>
84 #ifdef INET6
85 #include <netinet6/tcp6_var.h>
86 #endif
87 #include <netinet/tcpip.h>
88 #ifdef TCPDEBUG
89 #include <netinet/tcp_debug.h>
90 #endif
91 #include <netinet6/ip6protosw.h>
92
93 #ifdef IPSEC
94 #include <netinet6/ipsec.h>
95 #ifdef INET6
96 #include <netinet6/ipsec6.h>
97 #endif
98 #endif /*IPSEC*/
99
100 #ifdef FAST_IPSEC
101 #include <netipsec/ipsec.h>
102 #include <netipsec/xform.h>
103 #ifdef INET6
104 #include <netipsec/ipsec6.h>
105 #endif
106 #include <netipsec/key.h>
107 #define IPSEC
108 #endif /*FAST_IPSEC*/
109
110 #include <machine/in_cksum.h>
111 #include <sys/md5.h>
112
113 int tcp_mssdflt = TCP_MSS;
114 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
115 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
116
117 #ifdef INET6
118 int tcp_v6mssdflt = TCP6_MSS;
119 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
120 CTLFLAG_RW, &tcp_v6mssdflt , 0,
121 "Default TCP Maximum Segment Size for IPv6");
122 #endif
123
124 /*
125 * Minimum MSS we accept and use. This prevents DoS attacks where
126 * we are forced to a ridiculous low MSS like 20 and send hundreds
127 * of packets instead of one. The effect scales with the available
128 * bandwidth and quickly saturates the CPU and network interface
129 * with packet generation and sending. Set to zero to disable MINMSS
130 * checking. This setting prevents us from sending too small packets.
131 */
132 int tcp_minmss = TCP_MINMSS;
133 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
134 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
135 /*
136 * Number of TCP segments per second we accept from remote host
137 * before we start to calculate average segment size. If average
138 * segment size drops below the minimum TCP MSS we assume a DoS
139 * attack and reset+drop the connection. Care has to be taken not to
140 * set this value too small to not kill interactive type connections
141 * (telnet, SSH) which send many small packets.
142 */
143 int tcp_minmssoverload = TCP_MINMSSOVERLOAD;
144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
145 &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to"
146 "be under the MINMSS Size");
147
148 #if 0
149 static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
150 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
151 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time");
152 #endif
153
154 int tcp_do_rfc1323 = 1;
155 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
156 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
157
158 int tcp_do_rfc1644 = 0;
159 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
160 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
161
162 static int tcp_tcbhashsize = 0;
163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
164 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
165
166 static int do_tcpdrain = 1;
167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
168 "Enable tcp_drain routine for extra help when low on mbufs");
169
170 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
171 &tcbinfo.ipi_count, 0, "Number of active PCBs");
172
173 static int icmp_may_rst = 1;
174 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
175 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
176
177 static int tcp_isn_reseed_interval = 0;
178 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
179 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
180
181 /*
182 * TCP bandwidth limiting sysctls. Note that the default lower bound of
183 * 1024 exists only for debugging. A good production default would be
184 * something like 6100.
185 */
186 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0,
187 "TCP inflight data limiting");
188
189 static int tcp_inflight_enable = 1;
190 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, enable, CTLFLAG_RW,
191 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting");
192
193 static int tcp_inflight_debug = 0;
194 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, debug, CTLFLAG_RW,
195 &tcp_inflight_debug, 0, "Debug TCP inflight calculations");
196
197 static int tcp_inflight_min = 6144;
198 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, min, CTLFLAG_RW,
199 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window");
200
201 static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT;
202 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, max, CTLFLAG_RW,
203 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window");
204
205 static int tcp_inflight_stab = 20;
206 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, stab, CTLFLAG_RW,
207 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets");
208
209 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 0, "TCP SACK");
210 int tcp_do_sack = 1;
211 SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, enable, CTLFLAG_RW,
212 &tcp_do_sack, 0, "Enable/Disable TCP SACK support");
213
214 uma_zone_t sack_hole_zone;
215
216 static struct inpcb *tcp_notify(struct inpcb *, int);
217 static void tcp_discardcb(struct tcpcb *);
218 static void tcp_isn_tick(void *);
219
220 /*
221 * Target size of TCP PCB hash tables. Must be a power of two.
222 *
223 * Note that this can be overridden by the kernel environment
224 * variable net.inet.tcp.tcbhashsize
225 */
226 #ifndef TCBHASHSIZE
227 #define TCBHASHSIZE 512
228 #endif
229
230 /*
231 * XXX
232 * Callouts should be moved into struct tcp directly. They are currently
233 * separate because the tcpcb structure is exported to userland for sysctl
234 * parsing purposes, which do not know about callouts.
235 */
236 struct tcpcb_mem {
237 struct tcpcb tcb;
238 struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep;
239 struct callout tcpcb_mem_2msl, tcpcb_mem_delack;
240 };
241
242 static uma_zone_t tcpcb_zone;
243 static uma_zone_t tcptw_zone;
244 struct callout isn_callout;
245
246 /*
247 * Tcp initialization
248 */
249 void
250 tcp_init()
251 {
252 int hashsize = TCBHASHSIZE;
253
254 tcp_ccgen = 1;
255
256 tcp_delacktime = TCPTV_DELACK;
257 tcp_keepinit = TCPTV_KEEP_INIT;
258 tcp_keepidle = TCPTV_KEEP_IDLE;
259 tcp_keepintvl = TCPTV_KEEPINTVL;
260 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
261 tcp_msl = TCPTV_MSL;
262 tcp_rexmit_min = TCPTV_MIN;
263 tcp_rexmit_slop = TCPTV_CPU_VAR;
264
265 INP_INFO_LOCK_INIT(&tcbinfo, "tcp");
266 LIST_INIT(&tcb);
267 tcbinfo.listhead = &tcb;
268 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
269 if (!powerof2(hashsize)) {
270 printf("WARNING: TCB hash size not a power of 2\n");
271 hashsize = 512; /* safe default */
272 }
273 tcp_tcbhashsize = hashsize;
274 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
275 tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
276 &tcbinfo.porthashmask);
277 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
278 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
279 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
280 #ifdef INET6
281 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
282 #else /* INET6 */
283 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
284 #endif /* INET6 */
285 if (max_protohdr < TCP_MINPROTOHDR)
286 max_protohdr = TCP_MINPROTOHDR;
287 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
288 panic("tcp_init");
289 #undef TCP_MINPROTOHDR
290 /*
291 * These have to be type stable for the benefit of the timers.
292 */
293 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
294 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
295 uma_zone_set_max(tcpcb_zone, maxsockets);
296 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
297 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
298 uma_zone_set_max(tcptw_zone, maxsockets / 5);
299 tcp_timer_init();
300 syncache_init();
301 tcp_hc_init();
302 tcp_reass_init();
303 callout_init(&isn_callout, CALLOUT_MPSAFE);
304 tcp_isn_tick(NULL);
305 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
306 SHUTDOWN_PRI_DEFAULT);
307 sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
308 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
309 }
310
311 void
312 tcp_fini(xtp)
313 void *xtp;
314 {
315 callout_stop(&isn_callout);
316
317 }
318
319 /*
320 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
321 * tcp_template used to store this data in mbufs, but we now recopy it out
322 * of the tcpcb each time to conserve mbufs.
323 */
324 void
325 tcpip_fillheaders(inp, ip_ptr, tcp_ptr)
326 struct inpcb *inp;
327 void *ip_ptr;
328 void *tcp_ptr;
329 {
330 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
331
332 #ifdef INET6
333 if ((inp->inp_vflag & INP_IPV6) != 0) {
334 struct ip6_hdr *ip6;
335
336 ip6 = (struct ip6_hdr *)ip_ptr;
337 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
338 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
339 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
340 (IPV6_VERSION & IPV6_VERSION_MASK);
341 ip6->ip6_nxt = IPPROTO_TCP;
342 ip6->ip6_plen = sizeof(struct tcphdr);
343 ip6->ip6_src = inp->in6p_laddr;
344 ip6->ip6_dst = inp->in6p_faddr;
345 } else
346 #endif
347 {
348 struct ip *ip;
349
350 ip = (struct ip *)ip_ptr;
351 ip->ip_v = IPVERSION;
352 ip->ip_hl = 5;
353 ip->ip_tos = inp->inp_ip_tos;
354 ip->ip_len = 0;
355 ip->ip_id = 0;
356 ip->ip_off = 0;
357 ip->ip_ttl = inp->inp_ip_ttl;
358 ip->ip_sum = 0;
359 ip->ip_p = IPPROTO_TCP;
360 ip->ip_src = inp->inp_laddr;
361 ip->ip_dst = inp->inp_faddr;
362 }
363 th->th_sport = inp->inp_lport;
364 th->th_dport = inp->inp_fport;
365 th->th_seq = 0;
366 th->th_ack = 0;
367 th->th_x2 = 0;
368 th->th_off = 5;
369 th->th_flags = 0;
370 th->th_win = 0;
371 th->th_urp = 0;
372 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
373 }
374
375 /*
376 * Create template to be used to send tcp packets on a connection.
377 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
378 * use for this function is in keepalives, which use tcp_respond.
379 */
380 struct tcptemp *
381 tcpip_maketemplate(inp)
382 struct inpcb *inp;
383 {
384 struct mbuf *m;
385 struct tcptemp *n;
386
387 m = m_get(M_DONTWAIT, MT_HEADER);
388 if (m == NULL)
389 return (0);
390 m->m_len = sizeof(struct tcptemp);
391 n = mtod(m, struct tcptemp *);
392
393 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
394 return (n);
395 }
396
397 /*
398 * Send a single message to the TCP at address specified by
399 * the given TCP/IP header. If m == NULL, then we make a copy
400 * of the tcpiphdr at ti and send directly to the addressed host.
401 * This is used to force keep alive messages out using the TCP
402 * template for a connection. If flags are given then we send
403 * a message back to the TCP which originated the * segment ti,
404 * and discard the mbuf containing it and any other attached mbufs.
405 *
406 * In any case the ack and sequence number of the transmitted
407 * segment are as specified by the parameters.
408 *
409 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
410 */
411 void
412 tcp_respond(tp, ipgen, th, m, ack, seq, flags)
413 struct tcpcb *tp;
414 void *ipgen;
415 register struct tcphdr *th;
416 register struct mbuf *m;
417 tcp_seq ack, seq;
418 int flags;
419 {
420 register int tlen;
421 int win = 0;
422 struct ip *ip;
423 struct tcphdr *nth;
424 #ifdef INET6
425 struct ip6_hdr *ip6;
426 int isipv6;
427 #endif /* INET6 */
428 int ipflags = 0;
429 struct inpcb *inp;
430
431 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
432
433 #ifdef INET6
434 isipv6 = ((struct ip *)ipgen)->ip_v == 6;
435 ip6 = ipgen;
436 #endif /* INET6 */
437 ip = ipgen;
438
439 if (tp != NULL) {
440 inp = tp->t_inpcb;
441 KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
442 INP_INFO_WLOCK_ASSERT(&tcbinfo);
443 INP_LOCK_ASSERT(inp);
444 } else
445 inp = NULL;
446
447 if (tp != NULL) {
448 if (!(flags & TH_RST)) {
449 win = sbspace(&inp->inp_socket->so_rcv);
450 if (win > (long)TCP_MAXWIN << tp->rcv_scale)
451 win = (long)TCP_MAXWIN << tp->rcv_scale;
452 }
453 }
454 if (m == NULL) {
455 m = m_gethdr(M_DONTWAIT, MT_HEADER);
456 if (m == NULL)
457 return;
458 tlen = 0;
459 m->m_data += max_linkhdr;
460 #ifdef INET6
461 if (isipv6) {
462 bcopy((caddr_t)ip6, mtod(m, caddr_t),
463 sizeof(struct ip6_hdr));
464 ip6 = mtod(m, struct ip6_hdr *);
465 nth = (struct tcphdr *)(ip6 + 1);
466 } else
467 #endif /* INET6 */
468 {
469 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
470 ip = mtod(m, struct ip *);
471 nth = (struct tcphdr *)(ip + 1);
472 }
473 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
474 flags = TH_ACK;
475 } else {
476 m_freem(m->m_next);
477 m->m_next = NULL;
478 m->m_data = (caddr_t)ipgen;
479 /* m_len is set later */
480 tlen = 0;
481 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
482 #ifdef INET6
483 if (isipv6) {
484 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
485 nth = (struct tcphdr *)(ip6 + 1);
486 } else
487 #endif /* INET6 */
488 {
489 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
490 nth = (struct tcphdr *)(ip + 1);
491 }
492 if (th != nth) {
493 /*
494 * this is usually a case when an extension header
495 * exists between the IPv6 header and the
496 * TCP header.
497 */
498 nth->th_sport = th->th_sport;
499 nth->th_dport = th->th_dport;
500 }
501 xchg(nth->th_dport, nth->th_sport, n_short);
502 #undef xchg
503 }
504 #ifdef INET6
505 if (isipv6) {
506 ip6->ip6_flow = 0;
507 ip6->ip6_vfc = IPV6_VERSION;
508 ip6->ip6_nxt = IPPROTO_TCP;
509 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
510 tlen));
511 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
512 } else
513 #endif
514 {
515 tlen += sizeof (struct tcpiphdr);
516 ip->ip_len = tlen;
517 ip->ip_ttl = ip_defttl;
518 if (path_mtu_discovery)
519 ip->ip_off |= IP_DF;
520 }
521 m->m_len = tlen;
522 m->m_pkthdr.len = tlen;
523 m->m_pkthdr.rcvif = NULL;
524 #ifdef MAC
525 if (inp != NULL) {
526 /*
527 * Packet is associated with a socket, so allow the
528 * label of the response to reflect the socket label.
529 */
530 INP_LOCK_ASSERT(inp);
531 mac_create_mbuf_from_inpcb(inp, m);
532 } else {
533 /*
534 * Packet is not associated with a socket, so possibly
535 * update the label in place.
536 */
537 mac_reflect_mbuf_tcp(m);
538 }
539 #endif
540 nth->th_seq = htonl(seq);
541 nth->th_ack = htonl(ack);
542 nth->th_x2 = 0;
543 nth->th_off = sizeof (struct tcphdr) >> 2;
544 nth->th_flags = flags;
545 if (tp != NULL)
546 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
547 else
548 nth->th_win = htons((u_short)win);
549 nth->th_urp = 0;
550 #ifdef INET6
551 if (isipv6) {
552 nth->th_sum = 0;
553 nth->th_sum = in6_cksum(m, IPPROTO_TCP,
554 sizeof(struct ip6_hdr),
555 tlen - sizeof(struct ip6_hdr));
556 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
557 NULL, NULL);
558 } else
559 #endif /* INET6 */
560 {
561 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
562 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
563 m->m_pkthdr.csum_flags = CSUM_TCP;
564 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
565 }
566 #ifdef TCPDEBUG
567 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
568 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
569 #endif
570 #ifdef INET6
571 if (isipv6)
572 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp);
573 else
574 #endif /* INET6 */
575 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp);
576 }
577
578 /*
579 * Create a new TCP control block, making an
580 * empty reassembly queue and hooking it to the argument
581 * protocol control block. The `inp' parameter must have
582 * come from the zone allocator set up in tcp_init().
583 */
584 struct tcpcb *
585 tcp_newtcpcb(inp)
586 struct inpcb *inp;
587 {
588 struct tcpcb_mem *tm;
589 struct tcpcb *tp;
590 #ifdef INET6
591 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
592 #endif /* INET6 */
593 int callout_flag;
594
595 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO);
596 if (tm == NULL)
597 return (NULL);
598 tp = &tm->tcb;
599 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
600 tp->t_maxseg = tp->t_maxopd =
601 #ifdef INET6
602 isipv6 ? tcp_v6mssdflt :
603 #endif /* INET6 */
604 tcp_mssdflt;
605
606 /* Set up our timeouts. */
607 /*
608 * XXXRW: Are these actually MPSAFE? I think so, but need to
609 * review the timed wait code, as it has some list variables,
610 * etc, that are global.
611 */
612 callout_flag = debug_mpsafenet ? CALLOUT_MPSAFE : 0;
613 callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, callout_flag);
614 callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, callout_flag);
615 callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, callout_flag);
616 callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, callout_flag);
617 callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, callout_flag);
618
619 if (tcp_do_rfc1323)
620 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
621 if (tcp_do_rfc1644)
622 tp->t_flags |= TF_REQ_CC;
623 tp->sack_enable = tcp_do_sack;
624 tp->t_inpcb = inp; /* XXX */
625 /*
626 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
627 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
628 * reasonable initial retransmit time.
629 */
630 tp->t_srtt = TCPTV_SRTTBASE;
631 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
632 tp->t_rttmin = tcp_rexmit_min;
633 tp->t_rxtcur = TCPTV_RTOBASE;
634 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
635 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
636 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
637 tp->t_rcvtime = ticks;
638 tp->t_bw_rtttime = ticks;
639 /*
640 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
641 * because the socket may be bound to an IPv6 wildcard address,
642 * which may match an IPv4-mapped IPv6 address.
643 */
644 inp->inp_ip_ttl = ip_defttl;
645 inp->inp_ppcb = (caddr_t)tp;
646 return (tp); /* XXX */
647 }
648
649 /*
650 * Drop a TCP connection, reporting
651 * the specified error. If connection is synchronized,
652 * then send a RST to peer.
653 */
654 struct tcpcb *
655 tcp_drop(tp, errno)
656 register struct tcpcb *tp;
657 int errno;
658 {
659 struct socket *so = tp->t_inpcb->inp_socket;
660
661 if (TCPS_HAVERCVDSYN(tp->t_state)) {
662 tp->t_state = TCPS_CLOSED;
663 (void) tcp_output(tp);
664 tcpstat.tcps_drops++;
665 } else
666 tcpstat.tcps_conndrops++;
667 if (errno == ETIMEDOUT && tp->t_softerror)
668 errno = tp->t_softerror;
669 so->so_error = errno;
670 return (tcp_close(tp));
671 }
672
673 static void
674 tcp_discardcb(tp)
675 struct tcpcb *tp;
676 {
677 struct tseg_qent *q;
678 struct inpcb *inp = tp->t_inpcb;
679 struct socket *so = inp->inp_socket;
680 #ifdef INET6
681 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
682 #endif /* INET6 */
683
684 /*
685 * Make sure that all of our timers are stopped before we
686 * delete the PCB.
687 */
688 callout_stop(tp->tt_rexmt);
689 callout_stop(tp->tt_persist);
690 callout_stop(tp->tt_keep);
691 callout_stop(tp->tt_2msl);
692 callout_stop(tp->tt_delack);
693
694 /*
695 * If we got enough samples through the srtt filter,
696 * save the rtt and rttvar in the routing entry.
697 * 'Enough' is arbitrarily defined as 4 rtt samples.
698 * 4 samples is enough for the srtt filter to converge
699 * to within enough % of the correct value; fewer samples
700 * and we could save a bogus rtt. The danger is not high
701 * as tcp quickly recovers from everything.
702 * XXX: Works very well but needs some more statistics!
703 */
704 if (tp->t_rttupdated >= 4) {
705 struct hc_metrics_lite metrics;
706 u_long ssthresh;
707
708 bzero(&metrics, sizeof(metrics));
709 /*
710 * Update the ssthresh always when the conditions below
711 * are satisfied. This gives us better new start value
712 * for the congestion avoidance for new connections.
713 * ssthresh is only set if packet loss occured on a session.
714 */
715 ssthresh = tp->snd_ssthresh;
716 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
717 /*
718 * convert the limit from user data bytes to
719 * packets then to packet data bytes.
720 */
721 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
722 if (ssthresh < 2)
723 ssthresh = 2;
724 ssthresh *= (u_long)(tp->t_maxseg +
725 #ifdef INET6
726 (isipv6 ? sizeof (struct ip6_hdr) +
727 sizeof (struct tcphdr) :
728 #endif
729 sizeof (struct tcpiphdr)
730 #ifdef INET6
731 )
732 #endif
733 );
734 } else
735 ssthresh = 0;
736 metrics.rmx_ssthresh = ssthresh;
737
738 metrics.rmx_rtt = tp->t_srtt;
739 metrics.rmx_rttvar = tp->t_rttvar;
740 /* XXX: This wraps if the pipe is more than 4 Gbit per second */
741 metrics.rmx_bandwidth = tp->snd_bandwidth;
742 metrics.rmx_cwnd = tp->snd_cwnd;
743 metrics.rmx_sendpipe = 0;
744 metrics.rmx_recvpipe = 0;
745
746 tcp_hc_update(&inp->inp_inc, &metrics);
747 }
748
749 /* free the reassembly queue, if any */
750 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
751 LIST_REMOVE(q, tqe_q);
752 m_freem(q->tqe_m);
753 uma_zfree(tcp_reass_zone, q);
754 tp->t_segqlen--;
755 tcp_reass_qsize--;
756 }
757 tcp_free_sackholes(tp);
758 inp->inp_ppcb = NULL;
759 tp->t_inpcb = NULL;
760 uma_zfree(tcpcb_zone, tp);
761 soisdisconnected(so);
762 }
763
764 /*
765 * Close a TCP control block:
766 * discard all space held by the tcp
767 * discard internet protocol block
768 * wake up any sleepers
769 */
770 struct tcpcb *
771 tcp_close(tp)
772 struct tcpcb *tp;
773 {
774 struct inpcb *inp = tp->t_inpcb;
775 #ifdef INET6
776 struct socket *so = inp->inp_socket;
777 #endif
778
779 tcp_discardcb(tp);
780 #ifdef INET6
781 if (INP_CHECK_SOCKAF(so, AF_INET6))
782 in6_pcbdetach(inp);
783 else
784 #endif
785 in_pcbdetach(inp);
786 tcpstat.tcps_closed++;
787 return (NULL);
788 }
789
790 void
791 tcp_drain()
792 {
793 if (do_tcpdrain)
794 {
795 struct inpcb *inpb;
796 struct tcpcb *tcpb;
797 struct tseg_qent *te;
798
799 /*
800 * Walk the tcpbs, if existing, and flush the reassembly queue,
801 * if there is one...
802 * XXX: The "Net/3" implementation doesn't imply that the TCP
803 * reassembly queue should be flushed, but in a situation
804 * where we're really low on mbufs, this is potentially
805 * usefull.
806 */
807 INP_INFO_RLOCK(&tcbinfo);
808 LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) {
809 if (inpb->inp_vflag & INP_TIMEWAIT)
810 continue;
811 INP_LOCK(inpb);
812 if ((tcpb = intotcpcb(inpb)) != NULL) {
813 while ((te = LIST_FIRST(&tcpb->t_segq))
814 != NULL) {
815 LIST_REMOVE(te, tqe_q);
816 m_freem(te->tqe_m);
817 uma_zfree(tcp_reass_zone, te);
818 tcpb->t_segqlen--;
819 tcp_reass_qsize--;
820 }
821 }
822 INP_UNLOCK(inpb);
823 }
824 INP_INFO_RUNLOCK(&tcbinfo);
825 }
826 }
827
828 /*
829 * Notify a tcp user of an asynchronous error;
830 * store error as soft error, but wake up user
831 * (for now, won't do anything until can select for soft error).
832 *
833 * Do not wake up user since there currently is no mechanism for
834 * reporting soft errors (yet - a kqueue filter may be added).
835 */
836 static struct inpcb *
837 tcp_notify(inp, error)
838 struct inpcb *inp;
839 int error;
840 {
841 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
842
843 /*
844 * Ignore some errors if we are hooked up.
845 * If connection hasn't completed, has retransmitted several times,
846 * and receives a second error, give up now. This is better
847 * than waiting a long time to establish a connection that
848 * can never complete.
849 */
850 if (tp->t_state == TCPS_ESTABLISHED &&
851 (error == EHOSTUNREACH || error == ENETUNREACH ||
852 error == EHOSTDOWN)) {
853 return inp;
854 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
855 tp->t_softerror) {
856 tcp_drop(tp, error);
857 return (struct inpcb *)0;
858 } else {
859 tp->t_softerror = error;
860 return inp;
861 }
862 #if 0
863 wakeup( &so->so_timeo);
864 sorwakeup(so);
865 sowwakeup(so);
866 #endif
867 }
868
869 static int
870 tcp_pcblist(SYSCTL_HANDLER_ARGS)
871 {
872 int error, i, n, s;
873 struct inpcb *inp, **inp_list;
874 inp_gen_t gencnt;
875 struct xinpgen xig;
876
877 /*
878 * The process of preparing the TCB list is too time-consuming and
879 * resource-intensive to repeat twice on every request.
880 */
881 if (req->oldptr == NULL) {
882 n = tcbinfo.ipi_count;
883 req->oldidx = 2 * (sizeof xig)
884 + (n + n/8) * sizeof(struct xtcpcb);
885 return 0;
886 }
887
888 if (req->newptr != NULL)
889 return EPERM;
890
891 /*
892 * OK, now we're committed to doing something.
893 */
894 s = splnet();
895 INP_INFO_RLOCK(&tcbinfo);
896 gencnt = tcbinfo.ipi_gencnt;
897 n = tcbinfo.ipi_count;
898 INP_INFO_RUNLOCK(&tcbinfo);
899 splx(s);
900
901 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
902 + n * sizeof(struct xtcpcb));
903 if (error != 0)
904 return (error);
905
906 xig.xig_len = sizeof xig;
907 xig.xig_count = n;
908 xig.xig_gen = gencnt;
909 xig.xig_sogen = so_gencnt;
910 error = SYSCTL_OUT(req, &xig, sizeof xig);
911 if (error)
912 return error;
913
914 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
915 if (inp_list == NULL)
916 return ENOMEM;
917
918 s = splnet();
919 INP_INFO_RLOCK(&tcbinfo);
920 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp != NULL && i < n;
921 inp = LIST_NEXT(inp, inp_list)) {
922 INP_LOCK(inp);
923 if (inp->inp_gencnt <= gencnt) {
924 /*
925 * XXX: This use of cr_cansee(), introduced with
926 * TCP state changes, is not quite right, but for
927 * now, better than nothing.
928 */
929 if (inp->inp_vflag & INP_TIMEWAIT)
930 error = cr_cansee(req->td->td_ucred,
931 intotw(inp)->tw_cred);
932 else
933 error = cr_canseesocket(req->td->td_ucred,
934 inp->inp_socket);
935 if (error == 0)
936 inp_list[i++] = inp;
937 }
938 INP_UNLOCK(inp);
939 }
940 INP_INFO_RUNLOCK(&tcbinfo);
941 splx(s);
942 n = i;
943
944 error = 0;
945 for (i = 0; i < n; i++) {
946 inp = inp_list[i];
947 if (inp->inp_gencnt <= gencnt) {
948 struct xtcpcb xt;
949 caddr_t inp_ppcb;
950
951 bzero(&xt, sizeof(xt));
952 xt.xt_len = sizeof xt;
953 /* XXX should avoid extra copy */
954 bcopy(inp, &xt.xt_inp, sizeof *inp);
955 inp_ppcb = inp->inp_ppcb;
956 if (inp_ppcb == NULL)
957 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
958 else if (inp->inp_vflag & INP_TIMEWAIT) {
959 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
960 xt.xt_tp.t_state = TCPS_TIME_WAIT;
961 } else
962 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
963 if (inp->inp_socket != NULL)
964 sotoxsocket(inp->inp_socket, &xt.xt_socket);
965 else {
966 bzero(&xt.xt_socket, sizeof xt.xt_socket);
967 xt.xt_socket.xso_protocol = IPPROTO_TCP;
968 }
969 xt.xt_inp.inp_gencnt = inp->inp_gencnt;
970 error = SYSCTL_OUT(req, &xt, sizeof xt);
971 }
972 }
973 if (!error) {
974 /*
975 * Give the user an updated idea of our state.
976 * If the generation differs from what we told
977 * her before, she knows that something happened
978 * while we were processing this request, and it
979 * might be necessary to retry.
980 */
981 s = splnet();
982 INP_INFO_RLOCK(&tcbinfo);
983 xig.xig_gen = tcbinfo.ipi_gencnt;
984 xig.xig_sogen = so_gencnt;
985 xig.xig_count = tcbinfo.ipi_count;
986 INP_INFO_RUNLOCK(&tcbinfo);
987 splx(s);
988 error = SYSCTL_OUT(req, &xig, sizeof xig);
989 }
990 free(inp_list, M_TEMP);
991 return error;
992 }
993
994 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
995 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
996
997 static int
998 tcp_getcred(SYSCTL_HANDLER_ARGS)
999 {
1000 struct xucred xuc;
1001 struct sockaddr_in addrs[2];
1002 struct inpcb *inp;
1003 int error, s;
1004
1005 error = suser_cred(req->td->td_ucred, SUSER_ALLOWJAIL);
1006 if (error)
1007 return (error);
1008 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1009 if (error)
1010 return (error);
1011 s = splnet();
1012 INP_INFO_RLOCK(&tcbinfo);
1013 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
1014 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1015 if (inp == NULL) {
1016 error = ENOENT;
1017 goto outunlocked;
1018 }
1019 INP_LOCK(inp);
1020 if (inp->inp_socket == NULL) {
1021 error = ENOENT;
1022 goto out;
1023 }
1024 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1025 if (error)
1026 goto out;
1027 cru2x(inp->inp_socket->so_cred, &xuc);
1028 out:
1029 INP_UNLOCK(inp);
1030 outunlocked:
1031 INP_INFO_RUNLOCK(&tcbinfo);
1032 splx(s);
1033 if (error == 0)
1034 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1035 return (error);
1036 }
1037
1038 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
1039 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1040 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
1041
1042 #ifdef INET6
1043 static int
1044 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1045 {
1046 struct xucred xuc;
1047 struct sockaddr_in6 addrs[2];
1048 struct inpcb *inp;
1049 int error, s, mapped = 0;
1050
1051 error = suser_cred(req->td->td_ucred, SUSER_ALLOWJAIL);
1052 if (error)
1053 return (error);
1054 error = SYSCTL_IN(req, addrs, sizeof(addrs));
1055 if (error)
1056 return (error);
1057 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1058 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1059 mapped = 1;
1060 else
1061 return (EINVAL);
1062 }
1063 s = splnet();
1064 INP_INFO_RLOCK(&tcbinfo);
1065 if (mapped == 1)
1066 inp = in_pcblookup_hash(&tcbinfo,
1067 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1068 addrs[1].sin6_port,
1069 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1070 addrs[0].sin6_port,
1071 0, NULL);
1072 else
1073 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr,
1074 addrs[1].sin6_port,
1075 &addrs[0].sin6_addr, addrs[0].sin6_port,
1076 0, NULL);
1077 if (inp == NULL) {
1078 error = ENOENT;
1079 goto outunlocked;
1080 }
1081 INP_LOCK(inp);
1082 if (inp->inp_socket == NULL) {
1083 error = ENOENT;
1084 goto out;
1085 }
1086 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1087 if (error)
1088 goto out;
1089 cru2x(inp->inp_socket->so_cred, &xuc);
1090 out:
1091 INP_UNLOCK(inp);
1092 outunlocked:
1093 INP_INFO_RUNLOCK(&tcbinfo);
1094 splx(s);
1095 if (error == 0)
1096 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1097 return (error);
1098 }
1099
1100 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
1101 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1102 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
1103 #endif
1104
1105
1106 void
1107 tcp_ctlinput(cmd, sa, vip)
1108 int cmd;
1109 struct sockaddr *sa;
1110 void *vip;
1111 {
1112 struct ip *ip = vip;
1113 struct tcphdr *th;
1114 struct in_addr faddr;
1115 struct inpcb *inp;
1116 struct tcpcb *tp;
1117 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1118 tcp_seq icmp_seq;
1119 int s;
1120
1121 faddr = ((struct sockaddr_in *)sa)->sin_addr;
1122 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1123 return;
1124
1125 if (cmd == PRC_QUENCH)
1126 notify = tcp_quench;
1127 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1128 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
1129 notify = tcp_drop_syn_sent;
1130 else if (cmd == PRC_MSGSIZE)
1131 notify = tcp_mtudisc;
1132 /*
1133 * Redirects don't need to be handled up here.
1134 */
1135 else if (PRC_IS_REDIRECT(cmd))
1136 return;
1137 /*
1138 * Hostdead is ugly because it goes linearly through all PCBs.
1139 * XXX: We never get this from ICMP, otherwise it makes an
1140 * excellent DoS attack on machines with many connections.
1141 */
1142 else if (cmd == PRC_HOSTDEAD)
1143 ip = NULL;
1144 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
1145 return;
1146 if (ip != NULL) {
1147 s = splnet();
1148 th = (struct tcphdr *)((caddr_t)ip
1149 + (ip->ip_hl << 2));
1150 INP_INFO_WLOCK(&tcbinfo);
1151 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1152 ip->ip_src, th->th_sport, 0, NULL);
1153 if (inp != NULL) {
1154 INP_LOCK(inp);
1155 if (inp->inp_socket != NULL) {
1156 icmp_seq = htonl(th->th_seq);
1157 tp = intotcpcb(inp);
1158 if (SEQ_GEQ(icmp_seq, tp->snd_una) &&
1159 SEQ_LT(icmp_seq, tp->snd_max))
1160 inp = (*notify)(inp, inetctlerrmap[cmd]);
1161 }
1162 if (inp != NULL)
1163 INP_UNLOCK(inp);
1164 } else {
1165 struct in_conninfo inc;
1166
1167 inc.inc_fport = th->th_dport;
1168 inc.inc_lport = th->th_sport;
1169 inc.inc_faddr = faddr;
1170 inc.inc_laddr = ip->ip_src;
1171 #ifdef INET6
1172 inc.inc_isipv6 = 0;
1173 #endif
1174 syncache_unreach(&inc, th);
1175 }
1176 INP_INFO_WUNLOCK(&tcbinfo);
1177 splx(s);
1178 } else
1179 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
1180 }
1181
1182 #ifdef INET6
1183 void
1184 tcp6_ctlinput(cmd, sa, d)
1185 int cmd;
1186 struct sockaddr *sa;
1187 void *d;
1188 {
1189 struct tcphdr th;
1190 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1191 struct ip6_hdr *ip6;
1192 struct mbuf *m;
1193 struct ip6ctlparam *ip6cp = NULL;
1194 const struct sockaddr_in6 *sa6_src = NULL;
1195 int off;
1196 struct tcp_portonly {
1197 u_int16_t th_sport;
1198 u_int16_t th_dport;
1199 } *thp;
1200
1201 if (sa->sa_family != AF_INET6 ||
1202 sa->sa_len != sizeof(struct sockaddr_in6))
1203 return;
1204
1205 if (cmd == PRC_QUENCH)
1206 notify = tcp_quench;
1207 else if (cmd == PRC_MSGSIZE)
1208 notify = tcp_mtudisc;
1209 else if (!PRC_IS_REDIRECT(cmd) &&
1210 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1211 return;
1212
1213 /* if the parameter is from icmp6, decode it. */
1214 if (d != NULL) {
1215 ip6cp = (struct ip6ctlparam *)d;
1216 m = ip6cp->ip6c_m;
1217 ip6 = ip6cp->ip6c_ip6;
1218 off = ip6cp->ip6c_off;
1219 sa6_src = ip6cp->ip6c_src;
1220 } else {
1221 m = NULL;
1222 ip6 = NULL;
1223 off = 0; /* fool gcc */
1224 sa6_src = &sa6_any;
1225 }
1226
1227 if (ip6 != NULL) {
1228 struct in_conninfo inc;
1229 /*
1230 * XXX: We assume that when IPV6 is non NULL,
1231 * M and OFF are valid.
1232 */
1233
1234 /* check if we can safely examine src and dst ports */
1235 if (m->m_pkthdr.len < off + sizeof(*thp))
1236 return;
1237
1238 bzero(&th, sizeof(th));
1239 m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1240
1241 in6_pcbnotify(&tcbinfo, sa, th.th_dport,
1242 (struct sockaddr *)ip6cp->ip6c_src,
1243 th.th_sport, cmd, NULL, notify);
1244
1245 inc.inc_fport = th.th_dport;
1246 inc.inc_lport = th.th_sport;
1247 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1248 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1249 inc.inc_isipv6 = 1;
1250 INP_INFO_WLOCK(&tcbinfo);
1251 syncache_unreach(&inc, &th);
1252 INP_INFO_WUNLOCK(&tcbinfo);
1253 } else
1254 in6_pcbnotify(&tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
1255 0, cmd, NULL, notify);
1256 }
1257 #endif /* INET6 */
1258
1259
1260 /*
1261 * Following is where TCP initial sequence number generation occurs.
1262 *
1263 * There are two places where we must use initial sequence numbers:
1264 * 1. In SYN-ACK packets.
1265 * 2. In SYN packets.
1266 *
1267 * All ISNs for SYN-ACK packets are generated by the syncache. See
1268 * tcp_syncache.c for details.
1269 *
1270 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1271 * depends on this property. In addition, these ISNs should be
1272 * unguessable so as to prevent connection hijacking. To satisfy
1273 * the requirements of this situation, the algorithm outlined in
1274 * RFC 1948 is used, with only small modifications.
1275 *
1276 * Implementation details:
1277 *
1278 * Time is based off the system timer, and is corrected so that it
1279 * increases by one megabyte per second. This allows for proper
1280 * recycling on high speed LANs while still leaving over an hour
1281 * before rollover.
1282 *
1283 * As reading the *exact* system time is too expensive to be done
1284 * whenever setting up a TCP connection, we increment the time
1285 * offset in two ways. First, a small random positive increment
1286 * is added to isn_offset for each connection that is set up.
1287 * Second, the function tcp_isn_tick fires once per clock tick
1288 * and increments isn_offset as necessary so that sequence numbers
1289 * are incremented at approximately ISN_BYTES_PER_SECOND. The
1290 * random positive increments serve only to ensure that the same
1291 * exact sequence number is never sent out twice (as could otherwise
1292 * happen when a port is recycled in less than the system tick
1293 * interval.)
1294 *
1295 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1296 * between seeding of isn_secret. This is normally set to zero,
1297 * as reseeding should not be necessary.
1298 *
1299 */
1300
1301 #define ISN_BYTES_PER_SECOND 1048576
1302 #define ISN_STATIC_INCREMENT 4096
1303 #define ISN_RANDOM_INCREMENT (4096 - 1)
1304
1305 u_char isn_secret[32];
1306 int isn_last_reseed;
1307 u_int32_t isn_offset, isn_offset_old;
1308 MD5_CTX isn_ctx;
1309
1310 tcp_seq
1311 tcp_new_isn(tp)
1312 struct tcpcb *tp;
1313 {
1314 u_int32_t md5_buffer[4];
1315 tcp_seq new_isn;
1316
1317 /* Seed if this is the first use, reseed if requested. */
1318 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) &&
1319 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1320 < (u_int)ticks))) {
1321 read_random(&isn_secret, sizeof(isn_secret));
1322 isn_last_reseed = ticks;
1323 }
1324
1325 /* Compute the md5 hash and return the ISN. */
1326 MD5Init(&isn_ctx);
1327 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1328 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1329 #ifdef INET6
1330 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1331 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1332 sizeof(struct in6_addr));
1333 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1334 sizeof(struct in6_addr));
1335 } else
1336 #endif
1337 {
1338 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1339 sizeof(struct in_addr));
1340 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1341 sizeof(struct in_addr));
1342 }
1343 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1344 MD5Final((u_char *) &md5_buffer, &isn_ctx);
1345 new_isn = (tcp_seq) md5_buffer[0];
1346 isn_offset += ISN_STATIC_INCREMENT +
1347 (arc4random() & ISN_RANDOM_INCREMENT);
1348 new_isn += isn_offset;
1349 return new_isn;
1350 }
1351
1352 /*
1353 * Increment the offset to the next ISN_BYTES_PER_SECOND / hz boundary
1354 * to keep time flowing at a relatively constant rate. If the random
1355 * increments have already pushed us past the projected offset, do nothing.
1356 */
1357 static void
1358 tcp_isn_tick(xtp)
1359 void *xtp;
1360 {
1361 u_int32_t projected_offset;
1362
1363 projected_offset = isn_offset_old + ISN_BYTES_PER_SECOND / hz;
1364
1365 if (projected_offset > isn_offset)
1366 isn_offset = projected_offset;
1367
1368 isn_offset_old = isn_offset;
1369 callout_reset(&isn_callout, 1, tcp_isn_tick, NULL);
1370 }
1371
1372 /*
1373 * When a source quench is received, close congestion window
1374 * to one segment. We will gradually open it again as we proceed.
1375 */
1376 struct inpcb *
1377 tcp_quench(inp, errno)
1378 struct inpcb *inp;
1379 int errno;
1380 {
1381 struct tcpcb *tp = intotcpcb(inp);
1382
1383 if (tp != NULL)
1384 tp->snd_cwnd = tp->t_maxseg;
1385 return (inp);
1386 }
1387
1388 /*
1389 * When a specific ICMP unreachable message is received and the
1390 * connection state is SYN-SENT, drop the connection. This behavior
1391 * is controlled by the icmp_may_rst sysctl.
1392 */
1393 struct inpcb *
1394 tcp_drop_syn_sent(inp, errno)
1395 struct inpcb *inp;
1396 int errno;
1397 {
1398 struct tcpcb *tp = intotcpcb(inp);
1399
1400 if (tp != NULL && tp->t_state == TCPS_SYN_SENT) {
1401 tcp_drop(tp, errno);
1402 return (struct inpcb *)0;
1403 }
1404 return inp;
1405 }
1406
1407 /*
1408 * When `need fragmentation' ICMP is received, update our idea of the MSS
1409 * based on the new value in the route. Also nudge TCP to send something,
1410 * since we know the packet we just sent was dropped.
1411 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1412 */
1413 struct inpcb *
1414 tcp_mtudisc(inp, errno)
1415 struct inpcb *inp;
1416 int errno;
1417 {
1418 struct tcpcb *tp = intotcpcb(inp);
1419 struct rmxp_tao tao;
1420 struct socket *so = inp->inp_socket;
1421 u_int maxmtu;
1422 u_int romtu;
1423 int mss;
1424 #ifdef INET6
1425 int isipv6;
1426 #endif /* INET6 */
1427 bzero(&tao, sizeof(tao));
1428
1429 if (tp != NULL) {
1430 #ifdef INET6
1431 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1432 #endif
1433 maxmtu = tcp_hc_getmtu(&inp->inp_inc); /* IPv4 and IPv6 */
1434 romtu =
1435 #ifdef INET6
1436 isipv6 ? tcp_maxmtu6(&inp->inp_inc) :
1437 #endif /* INET6 */
1438 tcp_maxmtu(&inp->inp_inc);
1439 if (!maxmtu)
1440 maxmtu = romtu;
1441 else
1442 maxmtu = min(maxmtu, romtu);
1443 if (!maxmtu) {
1444 tp->t_maxopd = tp->t_maxseg =
1445 #ifdef INET6
1446 isipv6 ? tcp_v6mssdflt :
1447 #endif /* INET6 */
1448 tcp_mssdflt;
1449 return inp;
1450 }
1451 mss = maxmtu -
1452 #ifdef INET6
1453 (isipv6 ?
1454 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1455 #endif /* INET6 */
1456 sizeof(struct tcpiphdr)
1457 #ifdef INET6
1458 )
1459 #endif /* INET6 */
1460 ;
1461
1462 if (tcp_do_rfc1644) {
1463 tcp_hc_gettao(&inp->inp_inc, &tao);
1464 if (tao.tao_mssopt)
1465 mss = min(mss, tao.tao_mssopt);
1466 }
1467 /*
1468 * XXX - The above conditional probably violates the TCP
1469 * spec. The problem is that, since we don't know the
1470 * other end's MSS, we are supposed to use a conservative
1471 * default. But, if we do that, then MTU discovery will
1472 * never actually take place, because the conservative
1473 * default is much less than the MTUs typically seen
1474 * on the Internet today. For the moment, we'll sweep
1475 * this under the carpet.
1476 *
1477 * The conservative default might not actually be a problem
1478 * if the only case this occurs is when sending an initial
1479 * SYN with options and data to a host we've never talked
1480 * to before. Then, they will reply with an MSS value which
1481 * will get recorded and the new parameters should get
1482 * recomputed. For Further Study.
1483 */
1484 if (tp->t_maxopd <= mss)
1485 return inp;
1486 tp->t_maxopd = mss;
1487
1488 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1489 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1490 mss -= TCPOLEN_TSTAMP_APPA;
1491 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
1492 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)
1493 mss -= TCPOLEN_CC_APPA;
1494 #if (MCLBYTES & (MCLBYTES - 1)) == 0
1495 if (mss > MCLBYTES)
1496 mss &= ~(MCLBYTES-1);
1497 #else
1498 if (mss > MCLBYTES)
1499 mss = mss / MCLBYTES * MCLBYTES;
1500 #endif
1501 if (so->so_snd.sb_hiwat < mss)
1502 mss = so->so_snd.sb_hiwat;
1503
1504 tp->t_maxseg = mss;
1505
1506 tcpstat.tcps_mturesent++;
1507 tp->t_rtttime = 0;
1508 tp->snd_nxt = tp->snd_una;
1509 tcp_output(tp);
1510 }
1511 return inp;
1512 }
1513
1514 /*
1515 * Look-up the routing entry to the peer of this inpcb. If no route
1516 * is found and it cannot be allocated, then return NULL. This routine
1517 * is called by TCP routines that access the rmx structure and by tcp_mss
1518 * to get the interface MTU.
1519 */
1520 u_long
1521 tcp_maxmtu(inc)
1522 struct in_conninfo *inc;
1523 {
1524 struct route sro;
1525 struct sockaddr_in *dst;
1526 struct ifnet *ifp;
1527 u_long maxmtu = 0;
1528
1529 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
1530
1531 bzero(&sro, sizeof(sro));
1532 if (inc->inc_faddr.s_addr != INADDR_ANY) {
1533 dst = (struct sockaddr_in *)&sro.ro_dst;
1534 dst->sin_family = AF_INET;
1535 dst->sin_len = sizeof(*dst);
1536 dst->sin_addr = inc->inc_faddr;
1537 rtalloc_ign(&sro, RTF_CLONING);
1538 }
1539 if (sro.ro_rt != NULL) {
1540 ifp = sro.ro_rt->rt_ifp;
1541 if (sro.ro_rt->rt_rmx.rmx_mtu == 0)
1542 maxmtu = ifp->if_mtu;
1543 else
1544 maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
1545 RTFREE(sro.ro_rt);
1546 }
1547 return (maxmtu);
1548 }
1549
1550 #ifdef INET6
1551 u_long
1552 tcp_maxmtu6(inc)
1553 struct in_conninfo *inc;
1554 {
1555 struct route_in6 sro6;
1556 struct ifnet *ifp;
1557 u_long maxmtu = 0;
1558
1559 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
1560
1561 bzero(&sro6, sizeof(sro6));
1562 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1563 sro6.ro_dst.sin6_family = AF_INET6;
1564 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1565 sro6.ro_dst.sin6_addr = inc->inc6_faddr;
1566 rtalloc_ign((struct route *)&sro6, RTF_CLONING);
1567 }
1568 if (sro6.ro_rt != NULL) {
1569 ifp = sro6.ro_rt->rt_ifp;
1570 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0)
1571 maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp);
1572 else
1573 maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu,
1574 IN6_LINKMTU(sro6.ro_rt->rt_ifp));
1575 RTFREE(sro6.ro_rt);
1576 }
1577
1578 return (maxmtu);
1579 }
1580 #endif /* INET6 */
1581
1582 #ifdef IPSEC
1583 /* compute ESP/AH header size for TCP, including outer IP header. */
1584 size_t
1585 ipsec_hdrsiz_tcp(tp)
1586 struct tcpcb *tp;
1587 {
1588 struct inpcb *inp;
1589 struct mbuf *m;
1590 size_t hdrsiz;
1591 struct ip *ip;
1592 #ifdef INET6
1593 struct ip6_hdr *ip6;
1594 #endif
1595 struct tcphdr *th;
1596
1597 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1598 return 0;
1599 MGETHDR(m, M_DONTWAIT, MT_DATA);
1600 if (!m)
1601 return 0;
1602
1603 #ifdef INET6
1604 if ((inp->inp_vflag & INP_IPV6) != 0) {
1605 ip6 = mtod(m, struct ip6_hdr *);
1606 th = (struct tcphdr *)(ip6 + 1);
1607 m->m_pkthdr.len = m->m_len =
1608 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1609 tcpip_fillheaders(inp, ip6, th);
1610 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1611 } else
1612 #endif /* INET6 */
1613 {
1614 ip = mtod(m, struct ip *);
1615 th = (struct tcphdr *)(ip + 1);
1616 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1617 tcpip_fillheaders(inp, ip, th);
1618 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1619 }
1620
1621 m_free(m);
1622 return hdrsiz;
1623 }
1624 #endif /*IPSEC*/
1625
1626 /*
1627 * Move a TCP connection into TIME_WAIT state.
1628 * tcbinfo is unlocked.
1629 * inp is locked, and is unlocked before returning.
1630 */
1631 void
1632 tcp_twstart(tp)
1633 struct tcpcb *tp;
1634 {
1635 struct tcptw *tw;
1636 struct inpcb *inp;
1637 int tw_time, acknow;
1638 struct socket *so;
1639
1640 tw = uma_zalloc(tcptw_zone, M_NOWAIT);
1641 if (tw == NULL) {
1642 tw = tcp_timer_2msl_tw(1);
1643 if (tw == NULL) {
1644 tcp_close(tp);
1645 return;
1646 }
1647 }
1648 inp = tp->t_inpcb;
1649 tw->tw_inpcb = inp;
1650
1651 /*
1652 * Recover last window size sent.
1653 */
1654 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale;
1655
1656 /*
1657 * Set t_recent if timestamps are used on the connection.
1658 */
1659 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
1660 (TF_REQ_TSTMP|TF_RCVD_TSTMP))
1661 tw->t_recent = tp->ts_recent;
1662 else
1663 tw->t_recent = 0;
1664
1665 tw->snd_nxt = tp->snd_nxt;
1666 tw->rcv_nxt = tp->rcv_nxt;
1667 tw->iss = tp->iss;
1668 tw->irs = tp->irs;
1669 tw->cc_recv = tp->cc_recv;
1670 tw->cc_send = tp->cc_send;
1671 tw->t_starttime = tp->t_starttime;
1672 tw->tw_time = 0;
1673
1674 /* XXX
1675 * If this code will
1676 * be used for fin-wait-2 state also, then we may need
1677 * a ts_recent from the last segment.
1678 */
1679 /* Shorten TIME_WAIT [RFC-1644, p.28] */
1680 if (tp->cc_recv != 0 && (ticks - tp->t_starttime) < tcp_msl) {
1681 tw_time = tp->t_rxtcur * TCPTV_TWTRUNC;
1682 /* For T/TCP client, force ACK now. */
1683 acknow = 1;
1684 } else {
1685 tw_time = 2 * tcp_msl;
1686 acknow = tp->t_flags & TF_ACKNOW;
1687 }
1688 tcp_discardcb(tp);
1689 so = inp->inp_socket;
1690 ACCEPT_LOCK();
1691 SOCK_LOCK(so);
1692 so->so_pcb = NULL;
1693 tw->tw_cred = crhold(so->so_cred);
1694 tw->tw_so_options = so->so_options;
1695 sotryfree(so);
1696 inp->inp_socket = NULL;
1697 if (acknow)
1698 tcp_twrespond(tw, TH_ACK);
1699 inp->inp_ppcb = (caddr_t)tw;
1700 inp->inp_vflag |= INP_TIMEWAIT;
1701 tcp_timer_2msl_reset(tw, tw_time);
1702 INP_UNLOCK(inp);
1703 }
1704
1705 /*
1706 * The appromixate rate of ISN increase of Microsoft TCP stacks;
1707 * the actual rate is slightly higher due to the addition of
1708 * random positive increments.
1709 *
1710 * Most other new OSes use semi-randomized ISN values, so we
1711 * do not need to worry about them.
1712 */
1713 #define MS_ISN_BYTES_PER_SECOND 250000
1714
1715 /*
1716 * Determine if the ISN we will generate has advanced beyond the last
1717 * sequence number used by the previous connection. If so, indicate
1718 * that it is safe to recycle this tw socket by returning 1.
1719 */
1720 int
1721 tcp_twrecycleable(struct tcptw *tw)
1722 {
1723 tcp_seq new_iss = tw->iss;
1724 tcp_seq new_irs = tw->irs;
1725
1726 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
1727 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
1728
1729 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
1730 return 1;
1731 else
1732 return 0;
1733 }
1734
1735 struct tcptw *
1736 tcp_twclose(struct tcptw *tw, int reuse)
1737 {
1738 struct inpcb *inp;
1739
1740 inp = tw->tw_inpcb;
1741 tw->tw_inpcb = NULL;
1742 tcp_timer_2msl_stop(tw);
1743 inp->inp_ppcb = NULL;
1744 #ifdef INET6
1745 if (inp->inp_vflag & INP_IPV6PROTO)
1746 in6_pcbdetach(inp);
1747 else
1748 #endif
1749 in_pcbdetach(inp);
1750 tcpstat.tcps_closed++;
1751 crfree(tw->tw_cred);
1752 tw->tw_cred = NULL;
1753 if (reuse)
1754 return (tw);
1755 uma_zfree(tcptw_zone, tw);
1756 return (NULL);
1757 }
1758
1759 int
1760 tcp_twrespond(struct tcptw *tw, int flags)
1761 {
1762 struct inpcb *inp = tw->tw_inpcb;
1763 struct tcphdr *th;
1764 struct mbuf *m;
1765 struct ip *ip = NULL;
1766 u_int8_t *optp;
1767 u_int hdrlen, optlen;
1768 int error;
1769 #ifdef INET6
1770 struct ip6_hdr *ip6 = NULL;
1771 int isipv6 = inp->inp_inc.inc_isipv6;
1772 #endif
1773
1774 m = m_gethdr(M_DONTWAIT, MT_HEADER);
1775 if (m == NULL)
1776 return (ENOBUFS);
1777 m->m_data += max_linkhdr;
1778
1779 #ifdef MAC
1780 mac_create_mbuf_from_inpcb(inp, m);
1781 #endif
1782
1783 #ifdef INET6
1784 if (isipv6) {
1785 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1786 ip6 = mtod(m, struct ip6_hdr *);
1787 th = (struct tcphdr *)(ip6 + 1);
1788 tcpip_fillheaders(inp, ip6, th);
1789 } else
1790 #endif
1791 {
1792 hdrlen = sizeof(struct tcpiphdr);
1793 ip = mtod(m, struct ip *);
1794 th = (struct tcphdr *)(ip + 1);
1795 tcpip_fillheaders(inp, ip, th);
1796 }
1797 optp = (u_int8_t *)(th + 1);
1798
1799 /*
1800 * Send a timestamp and echo-reply if both our side and our peer
1801 * have sent timestamps in our SYN's and this is not a RST.
1802 */
1803 if (tw->t_recent && flags == TH_ACK) {
1804 u_int32_t *lp = (u_int32_t *)optp;
1805
1806 /* Form timestamp option as shown in appendix A of RFC 1323. */
1807 *lp++ = htonl(TCPOPT_TSTAMP_HDR);
1808 *lp++ = htonl(ticks);
1809 *lp = htonl(tw->t_recent);
1810 optp += TCPOLEN_TSTAMP_APPA;
1811 }
1812
1813 /*
1814 * Send `CC-family' options if needed, and it's not a RST.
1815 */
1816 if (tw->cc_recv != 0 && flags == TH_ACK) {
1817 u_int32_t *lp = (u_int32_t *)optp;
1818
1819 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC));
1820 *lp = htonl(tw->cc_send);
1821 optp += TCPOLEN_CC_APPA;
1822 }
1823 optlen = optp - (u_int8_t *)(th + 1);
1824
1825 m->m_len = hdrlen + optlen;
1826 m->m_pkthdr.len = m->m_len;
1827
1828 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
1829
1830 th->th_seq = htonl(tw->snd_nxt);
1831 th->th_ack = htonl(tw->rcv_nxt);
1832 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1833 th->th_flags = flags;
1834 th->th_win = htons(tw->last_win);
1835
1836 #ifdef INET6
1837 if (isipv6) {
1838 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
1839 sizeof(struct tcphdr) + optlen);
1840 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
1841 error = ip6_output(m, inp->in6p_outputopts, NULL,
1842 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
1843 } else
1844 #endif
1845 {
1846 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1847 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
1848 m->m_pkthdr.csum_flags = CSUM_TCP;
1849 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1850 ip->ip_len = m->m_pkthdr.len;
1851 if (path_mtu_discovery)
1852 ip->ip_off |= IP_DF;
1853 error = ip_output(m, inp->inp_options, NULL,
1854 (tw->tw_so_options & SO_DONTROUTE), NULL, inp);
1855 }
1856 if (flags & TH_ACK)
1857 tcpstat.tcps_sndacks++;
1858 else
1859 tcpstat.tcps_sndctrl++;
1860 tcpstat.tcps_sndtotal++;
1861 return (error);
1862 }
1863
1864 /*
1865 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1866 *
1867 * This code attempts to calculate the bandwidth-delay product as a
1868 * means of determining the optimal window size to maximize bandwidth,
1869 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1870 * routers. This code also does a fairly good job keeping RTTs in check
1871 * across slow links like modems. We implement an algorithm which is very
1872 * similar (but not meant to be) TCP/Vegas. The code operates on the
1873 * transmitter side of a TCP connection and so only effects the transmit
1874 * side of the connection.
1875 *
1876 * BACKGROUND: TCP makes no provision for the management of buffer space
1877 * at the end points or at the intermediate routers and switches. A TCP
1878 * stream, whether using NewReno or not, will eventually buffer as
1879 * many packets as it is able and the only reason this typically works is
1880 * due to the fairly small default buffers made available for a connection
1881 * (typicaly 16K or 32K). As machines use larger windows and/or window
1882 * scaling it is now fairly easy for even a single TCP connection to blow-out
1883 * all available buffer space not only on the local interface, but on
1884 * intermediate routers and switches as well. NewReno makes a misguided
1885 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1886 * then backing off, then steadily increasing the window again until another
1887 * failure occurs, ad-infinitum. This results in terrible oscillation that
1888 * is only made worse as network loads increase and the idea of intentionally
1889 * blowing out network buffers is, frankly, a terrible way to manage network
1890 * resources.
1891 *
1892 * It is far better to limit the transmit window prior to the failure
1893 * condition being achieved. There are two general ways to do this: First
1894 * you can 'scan' through different transmit window sizes and locate the
1895 * point where the RTT stops increasing, indicating that you have filled the
1896 * pipe, then scan backwards until you note that RTT stops decreasing, then
1897 * repeat ad-infinitum. This method works in principle but has severe
1898 * implementation issues due to RTT variances, timer granularity, and
1899 * instability in the algorithm which can lead to many false positives and
1900 * create oscillations as well as interact badly with other TCP streams
1901 * implementing the same algorithm.
1902 *
1903 * The second method is to limit the window to the bandwidth delay product
1904 * of the link. This is the method we implement. RTT variances and our
1905 * own manipulation of the congestion window, bwnd, can potentially
1906 * destabilize the algorithm. For this reason we have to stabilize the
1907 * elements used to calculate the window. We do this by using the minimum
1908 * observed RTT, the long term average of the observed bandwidth, and
1909 * by adding two segments worth of slop. It isn't perfect but it is able
1910 * to react to changing conditions and gives us a very stable basis on
1911 * which to extend the algorithm.
1912 */
1913 void
1914 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
1915 {
1916 u_long bw;
1917 u_long bwnd;
1918 int save_ticks;
1919
1920 /*
1921 * If inflight_enable is disabled in the middle of a tcp connection,
1922 * make sure snd_bwnd is effectively disabled.
1923 */
1924 if (tcp_inflight_enable == 0) {
1925 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1926 tp->snd_bandwidth = 0;
1927 return;
1928 }
1929
1930 /*
1931 * Figure out the bandwidth. Due to the tick granularity this
1932 * is a very rough number and it MUST be averaged over a fairly
1933 * long period of time. XXX we need to take into account a link
1934 * that is not using all available bandwidth, but for now our
1935 * slop will ramp us up if this case occurs and the bandwidth later
1936 * increases.
1937 *
1938 * Note: if ticks rollover 'bw' may wind up negative. We must
1939 * effectively reset t_bw_rtttime for this case.
1940 */
1941 save_ticks = ticks;
1942 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
1943 return;
1944
1945 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
1946 (save_ticks - tp->t_bw_rtttime);
1947 tp->t_bw_rtttime = save_ticks;
1948 tp->t_bw_rtseq = ack_seq;
1949 if (tp->t_bw_rtttime == 0 || (int)bw < 0)
1950 return;
1951 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
1952
1953 tp->snd_bandwidth = bw;
1954
1955 /*
1956 * Calculate the semi-static bandwidth delay product, plus two maximal
1957 * segments. The additional slop puts us squarely in the sweet
1958 * spot and also handles the bandwidth run-up case and stabilization.
1959 * Without the slop we could be locking ourselves into a lower
1960 * bandwidth.
1961 *
1962 * Situations Handled:
1963 * (1) Prevents over-queueing of packets on LANs, especially on
1964 * high speed LANs, allowing larger TCP buffers to be
1965 * specified, and also does a good job preventing
1966 * over-queueing of packets over choke points like modems
1967 * (at least for the transmit side).
1968 *
1969 * (2) Is able to handle changing network loads (bandwidth
1970 * drops so bwnd drops, bandwidth increases so bwnd
1971 * increases).
1972 *
1973 * (3) Theoretically should stabilize in the face of multiple
1974 * connections implementing the same algorithm (this may need
1975 * a little work).
1976 *
1977 * (4) Stability value (defaults to 20 = 2 maximal packets) can
1978 * be adjusted with a sysctl but typically only needs to be
1979 * on very slow connections. A value no smaller then 5
1980 * should be used, but only reduce this default if you have
1981 * no other choice.
1982 */
1983 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
1984 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10;
1985 #undef USERTT
1986
1987 if (tcp_inflight_debug > 0) {
1988 static int ltime;
1989 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
1990 ltime = ticks;
1991 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
1992 tp,
1993 bw,
1994 tp->t_rttbest,
1995 tp->t_srtt,
1996 bwnd
1997 );
1998 }
1999 }
2000 if ((long)bwnd < tcp_inflight_min)
2001 bwnd = tcp_inflight_min;
2002 if (bwnd > tcp_inflight_max)
2003 bwnd = tcp_inflight_max;
2004 if ((long)bwnd < tp->t_maxseg * 2)
2005 bwnd = tp->t_maxseg * 2;
2006 tp->snd_bwnd = bwnd;
2007 }
2008
2009 #ifdef TCP_SIGNATURE
2010 /*
2011 * Callback function invoked by m_apply() to digest TCP segment data
2012 * contained within an mbuf chain.
2013 */
2014 static int
2015 tcp_signature_apply(void *fstate, void *data, u_int len)
2016 {
2017
2018 MD5Update(fstate, (u_char *)data, len);
2019 return (0);
2020 }
2021
2022 /*
2023 * Compute TCP-MD5 hash of a TCPv4 segment. (RFC2385)
2024 *
2025 * Parameters:
2026 * m pointer to head of mbuf chain
2027 * off0 offset to TCP header within the mbuf chain
2028 * len length of TCP segment data, excluding options
2029 * optlen length of TCP segment options
2030 * buf pointer to storage for computed MD5 digest
2031 * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
2032 *
2033 * We do this over ip, tcphdr, segment data, and the key in the SADB.
2034 * When called from tcp_input(), we can be sure that th_sum has been
2035 * zeroed out and verified already.
2036 *
2037 * This function is for IPv4 use only. Calling this function with an
2038 * IPv6 packet in the mbuf chain will yield undefined results.
2039 *
2040 * Return 0 if successful, otherwise return -1.
2041 *
2042 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
2043 * search with the destination IP address, and a 'magic SPI' to be
2044 * determined by the application. This is hardcoded elsewhere to 1179
2045 * right now. Another branch of this code exists which uses the SPD to
2046 * specify per-application flows but it is unstable.
2047 */
2048 int
2049 tcp_signature_compute(struct mbuf *m, int off0, int len, int optlen,
2050 u_char *buf, u_int direction)
2051 {
2052 union sockaddr_union dst;
2053 struct ippseudo ippseudo;
2054 MD5_CTX ctx;
2055 int doff;
2056 struct ip *ip;
2057 struct ipovly *ipovly;
2058 struct secasvar *sav;
2059 struct tcphdr *th;
2060 u_short savecsum;
2061
2062 KASSERT(m != NULL, ("NULL mbuf chain"));
2063 KASSERT(buf != NULL, ("NULL signature pointer"));
2064
2065 /* Extract the destination from the IP header in the mbuf. */
2066 ip = mtod(m, struct ip *);
2067 bzero(&dst, sizeof(union sockaddr_union));
2068 dst.sa.sa_len = sizeof(struct sockaddr_in);
2069 dst.sa.sa_family = AF_INET;
2070 dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ?
2071 ip->ip_src : ip->ip_dst;
2072
2073 /* Look up an SADB entry which matches the address of the peer. */
2074 sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI));
2075 if (sav == NULL) {
2076 printf("%s: SADB lookup failed for %s\n", __func__,
2077 inet_ntoa(dst.sin.sin_addr));
2078 return (EINVAL);
2079 }
2080
2081 MD5Init(&ctx);
2082 ipovly = (struct ipovly *)ip;
2083 th = (struct tcphdr *)((u_char *)ip + off0);
2084 doff = off0 + sizeof(struct tcphdr) + optlen;
2085
2086 /*
2087 * Step 1: Update MD5 hash with IP pseudo-header.
2088 *
2089 * XXX The ippseudo header MUST be digested in network byte order,
2090 * or else we'll fail the regression test. Assume all fields we've
2091 * been doing arithmetic on have been in host byte order.
2092 * XXX One cannot depend on ipovly->ih_len here. When called from
2093 * tcp_output(), the underlying ip_len member has not yet been set.
2094 */
2095 ippseudo.ippseudo_src = ipovly->ih_src;
2096 ippseudo.ippseudo_dst = ipovly->ih_dst;
2097 ippseudo.ippseudo_pad = 0;
2098 ippseudo.ippseudo_p = IPPROTO_TCP;
2099 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen);
2100 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
2101
2102 /*
2103 * Step 2: Update MD5 hash with TCP header, excluding options.
2104 * The TCP checksum must be set to zero.
2105 */
2106 savecsum = th->th_sum;
2107 th->th_sum = 0;
2108 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
2109 th->th_sum = savecsum;
2110
2111 /*
2112 * Step 3: Update MD5 hash with TCP segment data.
2113 * Use m_apply() to avoid an early m_pullup().
2114 */
2115 if (len > 0)
2116 m_apply(m, doff, len, tcp_signature_apply, &ctx);
2117
2118 /*
2119 * Step 4: Update MD5 hash with shared secret.
2120 */
2121 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth));
2122 MD5Final(buf, &ctx);
2123
2124 key_sa_recordxfer(sav, m);
2125 KEY_FREESAV(&sav);
2126 return (0);
2127 }
2128 #endif /* TCP_SIGNATURE */
Cache object: 72b5a5643d13bc27b3e76440bf4a3840
|