1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_ipsec.h"
40 #include "opt_kern_tls.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/arb.h>
45 #include <sys/callout.h>
46 #include <sys/eventhandler.h>
47 #ifdef TCP_HHOOK
48 #include <sys/hhook.h>
49 #endif
50 #include <sys/kernel.h>
51 #ifdef TCP_HHOOK
52 #include <sys/khelp.h>
53 #endif
54 #ifdef KERN_TLS
55 #include <sys/ktls.h>
56 #endif
57 #include <sys/qmath.h>
58 #include <sys/stats.h>
59 #include <sys/sysctl.h>
60 #include <sys/jail.h>
61 #include <sys/malloc.h>
62 #include <sys/refcount.h>
63 #include <sys/mbuf.h>
64 #include <sys/priv.h>
65 #include <sys/proc.h>
66 #include <sys/sdt.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/protosw.h>
70 #include <sys/random.h>
71
72 #include <vm/uma.h>
73
74 #include <net/route.h>
75 #include <net/route/nhop.h>
76 #include <net/if.h>
77 #include <net/if_var.h>
78 #include <net/vnet.h>
79
80 #include <netinet/in.h>
81 #include <netinet/in_fib.h>
82 #include <netinet/in_kdtrace.h>
83 #include <netinet/in_pcb.h>
84 #include <netinet/in_systm.h>
85 #include <netinet/in_var.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip_icmp.h>
88 #include <netinet/ip_var.h>
89 #ifdef INET6
90 #include <netinet/icmp6.h>
91 #include <netinet/ip6.h>
92 #include <netinet6/in6_fib.h>
93 #include <netinet6/in6_pcb.h>
94 #include <netinet6/ip6_var.h>
95 #include <netinet6/scope6_var.h>
96 #include <netinet6/nd6.h>
97 #endif
98
99 #include <netinet/tcp.h>
100 #ifdef INVARIANTS
101 #define TCPSTATES
102 #endif
103 #include <netinet/tcp_fsm.h>
104 #include <netinet/tcp_seq.h>
105 #include <netinet/tcp_timer.h>
106 #include <netinet/tcp_var.h>
107 #include <netinet/tcp_ecn.h>
108 #include <netinet/tcp_log_buf.h>
109 #include <netinet/tcp_syncache.h>
110 #include <netinet/tcp_hpts.h>
111 #include <netinet/cc/cc.h>
112 #include <netinet/tcpip.h>
113 #include <netinet/tcp_fastopen.h>
114 #ifdef TCPPCAP
115 #include <netinet/tcp_pcap.h>
116 #endif
117 #ifdef TCP_OFFLOAD
118 #include <netinet/tcp_offload.h>
119 #endif
120 #include <netinet/udp.h>
121 #include <netinet/udp_var.h>
122 #ifdef INET6
123 #include <netinet6/tcp6_var.h>
124 #endif
125
126 #include <netipsec/ipsec_support.h>
127
128 #include <machine/in_cksum.h>
129 #include <crypto/siphash/siphash.h>
130
131 #include <security/mac/mac_framework.h>
132
133 #ifdef INET6
134 static ip6proto_ctlinput_t tcp6_ctlinput;
135 static udp_tun_icmp_t tcp6_ctlinput_viaudp;
136 #endif
137
138 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS;
139 #ifdef INET6
140 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS;
141 #endif
142
143 #ifdef NETFLIX_EXP_DETECTION
144 /* Sack attack detection thresholds and such */
145 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack_attack,
146 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
147 "Sack Attack detection thresholds");
148 int32_t tcp_force_detection = 0;
149 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, force_detection,
150 CTLFLAG_RW,
151 &tcp_force_detection, 0,
152 "Do we force detection even if the INP has it off?");
153 int32_t tcp_sack_to_ack_thresh = 700; /* 70 % */
154 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sack_to_ack_thresh,
155 CTLFLAG_RW,
156 &tcp_sack_to_ack_thresh, 700,
157 "Percentage of sacks to acks we must see above (10.1 percent is 101)?");
158 int32_t tcp_sack_to_move_thresh = 600; /* 60 % */
159 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, move_thresh,
160 CTLFLAG_RW,
161 &tcp_sack_to_move_thresh, 600,
162 "Percentage of sack moves we must see above (10.1 percent is 101)");
163 int32_t tcp_restoral_thresh = 650; /* 65 % (sack:2:ack -5%) */
164 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, restore_thresh,
165 CTLFLAG_RW,
166 &tcp_restoral_thresh, 550,
167 "Percentage of sack to ack percentage we must see below to restore(10.1 percent is 101)");
168 int32_t tcp_sad_decay_val = 800;
169 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, decay_per,
170 CTLFLAG_RW,
171 &tcp_sad_decay_val, 800,
172 "The decay percentage (10.1 percent equals 101 )");
173 int32_t tcp_map_minimum = 500;
174 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, nummaps,
175 CTLFLAG_RW,
176 &tcp_map_minimum, 500,
177 "Number of Map enteries before we start detection");
178 int32_t tcp_attack_on_turns_on_logging = 0;
179 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, attacks_logged,
180 CTLFLAG_RW,
181 &tcp_attack_on_turns_on_logging, 0,
182 "When we have a positive hit on attack, do we turn on logging?");
183 int32_t tcp_sad_pacing_interval = 2000;
184 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_pacing_int,
185 CTLFLAG_RW,
186 &tcp_sad_pacing_interval, 2000,
187 "What is the minimum pacing interval for a classified attacker?");
188
189 int32_t tcp_sad_low_pps = 100;
190 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_low_pps,
191 CTLFLAG_RW,
192 &tcp_sad_low_pps, 100,
193 "What is the input pps that below which we do not decay?");
194 #endif
195 uint32_t tcp_ack_war_time_window = 1000;
196 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_timewindow,
197 CTLFLAG_RW,
198 &tcp_ack_war_time_window, 1000,
199 "If the tcp_stack does ack-war prevention how many milliseconds are in its time window?");
200 uint32_t tcp_ack_war_cnt = 5;
201 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_cnt,
202 CTLFLAG_RW,
203 &tcp_ack_war_cnt, 5,
204 "If the tcp_stack does ack-war prevention how many acks can be sent in its time window?");
205
206 struct rwlock tcp_function_lock;
207
208 static int
209 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS)
210 {
211 int error, new;
212
213 new = V_tcp_mssdflt;
214 error = sysctl_handle_int(oidp, &new, 0, req);
215 if (error == 0 && req->newptr) {
216 if (new < TCP_MINMSS)
217 error = EINVAL;
218 else
219 V_tcp_mssdflt = new;
220 }
221 return (error);
222 }
223
224 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
225 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
226 &VNET_NAME(tcp_mssdflt), 0, &sysctl_net_inet_tcp_mss_check, "I",
227 "Default TCP Maximum Segment Size");
228
229 #ifdef INET6
230 static int
231 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS)
232 {
233 int error, new;
234
235 new = V_tcp_v6mssdflt;
236 error = sysctl_handle_int(oidp, &new, 0, req);
237 if (error == 0 && req->newptr) {
238 if (new < TCP_MINMSS)
239 error = EINVAL;
240 else
241 V_tcp_v6mssdflt = new;
242 }
243 return (error);
244 }
245
246 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
247 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
248 &VNET_NAME(tcp_v6mssdflt), 0, &sysctl_net_inet_tcp_mss_v6_check, "I",
249 "Default TCP Maximum Segment Size for IPv6");
250 #endif /* INET6 */
251
252 /*
253 * Minimum MSS we accept and use. This prevents DoS attacks where
254 * we are forced to a ridiculous low MSS like 20 and send hundreds
255 * of packets instead of one. The effect scales with the available
256 * bandwidth and quickly saturates the CPU and network interface
257 * with packet generation and sending. Set to zero to disable MINMSS
258 * checking. This setting prevents us from sending too small packets.
259 */
260 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS;
261 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_VNET | CTLFLAG_RW,
262 &VNET_NAME(tcp_minmss), 0,
263 "Minimum TCP Maximum Segment Size");
264
265 VNET_DEFINE(int, tcp_do_rfc1323) = 1;
266 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_VNET | CTLFLAG_RW,
267 &VNET_NAME(tcp_do_rfc1323), 0,
268 "Enable rfc1323 (high performance TCP) extensions");
269
270 /*
271 * As of June 2021, several TCP stacks violate RFC 7323 from September 2014.
272 * Some stacks negotiate TS, but never send them after connection setup. Some
273 * stacks negotiate TS, but don't send them when sending keep-alive segments.
274 * These include modern widely deployed TCP stacks.
275 * Therefore tolerating violations for now...
276 */
277 VNET_DEFINE(int, tcp_tolerate_missing_ts) = 1;
278 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tolerate_missing_ts, CTLFLAG_VNET | CTLFLAG_RW,
279 &VNET_NAME(tcp_tolerate_missing_ts), 0,
280 "Tolerate missing TCP timestamps");
281
282 VNET_DEFINE(int, tcp_ts_offset_per_conn) = 1;
283 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ts_offset_per_conn, CTLFLAG_VNET | CTLFLAG_RW,
284 &VNET_NAME(tcp_ts_offset_per_conn), 0,
285 "Initialize TCP timestamps per connection instead of per host pair");
286
287 /* How many connections are pacing */
288 static volatile uint32_t number_of_tcp_connections_pacing = 0;
289 static uint32_t shadow_num_connections = 0;
290
291 static int tcp_pacing_limit = 10000;
292 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pacing_limit, CTLFLAG_RW,
293 &tcp_pacing_limit, 1000,
294 "If the TCP stack does pacing, is there a limit (-1 = no, 0 = no pacing N = number of connections)");
295
296 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pacing_count, CTLFLAG_RD,
297 &shadow_num_connections, 0, "Number of TCP connections being paced");
298
299 static int tcp_log_debug = 0;
300 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
301 &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
302
303 static int tcp_tcbhashsize;
304 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
305 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
306
307 static int do_tcpdrain = 1;
308 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
309 "Enable tcp_drain routine for extra help when low on mbufs");
310
311 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_VNET | CTLFLAG_RD,
312 &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs");
313
314 VNET_DEFINE_STATIC(int, icmp_may_rst) = 1;
315 #define V_icmp_may_rst VNET(icmp_may_rst)
316 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_VNET | CTLFLAG_RW,
317 &VNET_NAME(icmp_may_rst), 0,
318 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
319
320 VNET_DEFINE_STATIC(int, tcp_isn_reseed_interval) = 0;
321 #define V_tcp_isn_reseed_interval VNET(tcp_isn_reseed_interval)
322 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_VNET | CTLFLAG_RW,
323 &VNET_NAME(tcp_isn_reseed_interval), 0,
324 "Seconds between reseeding of ISN secret");
325
326 static int tcp_soreceive_stream;
327 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN,
328 &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets");
329
330 VNET_DEFINE(uma_zone_t, sack_hole_zone);
331 #define V_sack_hole_zone VNET(sack_hole_zone)
332 VNET_DEFINE(uint32_t, tcp_map_entries_limit) = 0; /* unlimited */
333 static int
334 sysctl_net_inet_tcp_map_limit_check(SYSCTL_HANDLER_ARGS)
335 {
336 int error;
337 uint32_t new;
338
339 new = V_tcp_map_entries_limit;
340 error = sysctl_handle_int(oidp, &new, 0, req);
341 if (error == 0 && req->newptr) {
342 /* only allow "" and value > minimum */
343 if (new > 0 && new < TCP_MIN_MAP_ENTRIES_LIMIT)
344 error = EINVAL;
345 else
346 V_tcp_map_entries_limit = new;
347 }
348 return (error);
349 }
350 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, map_limit,
351 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
352 &VNET_NAME(tcp_map_entries_limit), 0,
353 &sysctl_net_inet_tcp_map_limit_check, "IU",
354 "Total sendmap entries limit");
355
356 VNET_DEFINE(uint32_t, tcp_map_split_limit) = 0; /* unlimited */
357 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, split_limit, CTLFLAG_VNET | CTLFLAG_RW,
358 &VNET_NAME(tcp_map_split_limit), 0,
359 "Total sendmap split entries limit");
360
361 #ifdef TCP_HHOOK
362 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]);
363 #endif
364
365 #define TS_OFFSET_SECRET_LENGTH SIPHASH_KEY_LENGTH
366 VNET_DEFINE_STATIC(u_char, ts_offset_secret[TS_OFFSET_SECRET_LENGTH]);
367 #define V_ts_offset_secret VNET(ts_offset_secret)
368
369 static int tcp_default_fb_init(struct tcpcb *tp);
370 static void tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged);
371 static int tcp_default_handoff_ok(struct tcpcb *tp);
372 static struct inpcb *tcp_notify(struct inpcb *, int);
373 static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int);
374 static struct inpcb *tcp_mtudisc(struct inpcb *, int);
375 static struct inpcb *tcp_drop_syn_sent(struct inpcb *, int);
376 static char * tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th,
377 const void *ip4hdr, const void *ip6hdr);
378 static ipproto_ctlinput_t tcp_ctlinput;
379 static udp_tun_icmp_t tcp_ctlinput_viaudp;
380
381 static struct tcp_function_block tcp_def_funcblk = {
382 .tfb_tcp_block_name = "freebsd",
383 .tfb_tcp_output = tcp_default_output,
384 .tfb_tcp_do_segment = tcp_do_segment,
385 .tfb_tcp_ctloutput = tcp_default_ctloutput,
386 .tfb_tcp_handoff_ok = tcp_default_handoff_ok,
387 .tfb_tcp_fb_init = tcp_default_fb_init,
388 .tfb_tcp_fb_fini = tcp_default_fb_fini,
389 };
390
391 static int tcp_fb_cnt = 0;
392 struct tcp_funchead t_functions;
393 static struct tcp_function_block *tcp_func_set_ptr = &tcp_def_funcblk;
394
395 void
396 tcp_record_dsack(struct tcpcb *tp, tcp_seq start, tcp_seq end, int tlp)
397 {
398 TCPSTAT_INC(tcps_dsack_count);
399 tp->t_dsack_pack++;
400 if (tlp == 0) {
401 if (SEQ_GT(end, start)) {
402 tp->t_dsack_bytes += (end - start);
403 TCPSTAT_ADD(tcps_dsack_bytes, (end - start));
404 } else {
405 tp->t_dsack_tlp_bytes += (start - end);
406 TCPSTAT_ADD(tcps_dsack_bytes, (start - end));
407 }
408 } else {
409 if (SEQ_GT(end, start)) {
410 tp->t_dsack_bytes += (end - start);
411 TCPSTAT_ADD(tcps_dsack_tlp_bytes, (end - start));
412 } else {
413 tp->t_dsack_tlp_bytes += (start - end);
414 TCPSTAT_ADD(tcps_dsack_tlp_bytes, (start - end));
415 }
416 }
417 }
418
419 static struct tcp_function_block *
420 find_tcp_functions_locked(struct tcp_function_set *fs)
421 {
422 struct tcp_function *f;
423 struct tcp_function_block *blk=NULL;
424
425 TAILQ_FOREACH(f, &t_functions, tf_next) {
426 if (strcmp(f->tf_name, fs->function_set_name) == 0) {
427 blk = f->tf_fb;
428 break;
429 }
430 }
431 return(blk);
432 }
433
434 static struct tcp_function_block *
435 find_tcp_fb_locked(struct tcp_function_block *blk, struct tcp_function **s)
436 {
437 struct tcp_function_block *rblk=NULL;
438 struct tcp_function *f;
439
440 TAILQ_FOREACH(f, &t_functions, tf_next) {
441 if (f->tf_fb == blk) {
442 rblk = blk;
443 if (s) {
444 *s = f;
445 }
446 break;
447 }
448 }
449 return (rblk);
450 }
451
452 struct tcp_function_block *
453 find_and_ref_tcp_functions(struct tcp_function_set *fs)
454 {
455 struct tcp_function_block *blk;
456
457 rw_rlock(&tcp_function_lock);
458 blk = find_tcp_functions_locked(fs);
459 if (blk)
460 refcount_acquire(&blk->tfb_refcnt);
461 rw_runlock(&tcp_function_lock);
462 return(blk);
463 }
464
465 struct tcp_function_block *
466 find_and_ref_tcp_fb(struct tcp_function_block *blk)
467 {
468 struct tcp_function_block *rblk;
469
470 rw_rlock(&tcp_function_lock);
471 rblk = find_tcp_fb_locked(blk, NULL);
472 if (rblk)
473 refcount_acquire(&rblk->tfb_refcnt);
474 rw_runlock(&tcp_function_lock);
475 return(rblk);
476 }
477
478 /* Find a matching alias for the given tcp_function_block. */
479 int
480 find_tcp_function_alias(struct tcp_function_block *blk,
481 struct tcp_function_set *fs)
482 {
483 struct tcp_function *f;
484 int found;
485
486 found = 0;
487 rw_rlock(&tcp_function_lock);
488 TAILQ_FOREACH(f, &t_functions, tf_next) {
489 if ((f->tf_fb == blk) &&
490 (strncmp(f->tf_name, blk->tfb_tcp_block_name,
491 TCP_FUNCTION_NAME_LEN_MAX) != 0)) {
492 /* Matching function block with different name. */
493 strncpy(fs->function_set_name, f->tf_name,
494 TCP_FUNCTION_NAME_LEN_MAX);
495 found = 1;
496 break;
497 }
498 }
499 /* Null terminate the string appropriately. */
500 if (found) {
501 fs->function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0';
502 } else {
503 fs->function_set_name[0] = '\0';
504 }
505 rw_runlock(&tcp_function_lock);
506 return (found);
507 }
508
509 static struct tcp_function_block *
510 find_and_ref_tcp_default_fb(void)
511 {
512 struct tcp_function_block *rblk;
513
514 rw_rlock(&tcp_function_lock);
515 rblk = tcp_func_set_ptr;
516 refcount_acquire(&rblk->tfb_refcnt);
517 rw_runlock(&tcp_function_lock);
518 return (rblk);
519 }
520
521 void
522 tcp_switch_back_to_default(struct tcpcb *tp)
523 {
524 struct tcp_function_block *tfb;
525
526 KASSERT(tp->t_fb != &tcp_def_funcblk,
527 ("%s: called by the built-in default stack", __func__));
528
529 /*
530 * Release the old stack. This function will either find a new one
531 * or panic.
532 */
533 if (tp->t_fb->tfb_tcp_fb_fini != NULL)
534 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
535 refcount_release(&tp->t_fb->tfb_refcnt);
536
537 /*
538 * Now, we'll find a new function block to use.
539 * Start by trying the current user-selected
540 * default, unless this stack is the user-selected
541 * default.
542 */
543 tfb = find_and_ref_tcp_default_fb();
544 if (tfb == tp->t_fb) {
545 refcount_release(&tfb->tfb_refcnt);
546 tfb = NULL;
547 }
548 /* Does the stack accept this connection? */
549 if (tfb != NULL && tfb->tfb_tcp_handoff_ok != NULL &&
550 (*tfb->tfb_tcp_handoff_ok)(tp)) {
551 refcount_release(&tfb->tfb_refcnt);
552 tfb = NULL;
553 }
554 /* Try to use that stack. */
555 if (tfb != NULL) {
556 /* Initialize the new stack. If it succeeds, we are done. */
557 tp->t_fb = tfb;
558 if (tp->t_fb->tfb_tcp_fb_init == NULL ||
559 (*tp->t_fb->tfb_tcp_fb_init)(tp) == 0)
560 return;
561
562 /*
563 * Initialization failed. Release the reference count on
564 * the stack.
565 */
566 refcount_release(&tfb->tfb_refcnt);
567 }
568
569 /*
570 * If that wasn't feasible, use the built-in default
571 * stack which is not allowed to reject anyone.
572 */
573 tfb = find_and_ref_tcp_fb(&tcp_def_funcblk);
574 if (tfb == NULL) {
575 /* there always should be a default */
576 panic("Can't refer to tcp_def_funcblk");
577 }
578 if (tfb->tfb_tcp_handoff_ok != NULL) {
579 if ((*tfb->tfb_tcp_handoff_ok) (tp)) {
580 /* The default stack cannot say no */
581 panic("Default stack rejects a new session?");
582 }
583 }
584 tp->t_fb = tfb;
585 if (tp->t_fb->tfb_tcp_fb_init != NULL &&
586 (*tp->t_fb->tfb_tcp_fb_init)(tp)) {
587 /* The default stack cannot fail */
588 panic("Default stack initialization failed");
589 }
590 }
591
592 static bool
593 tcp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
594 const struct sockaddr *sa, void *ctx)
595 {
596 struct ip *iph;
597 #ifdef INET6
598 struct ip6_hdr *ip6;
599 #endif
600 struct udphdr *uh;
601 struct tcphdr *th;
602 int thlen;
603 uint16_t port;
604
605 TCPSTAT_INC(tcps_tunneled_pkts);
606 if ((m->m_flags & M_PKTHDR) == 0) {
607 /* Can't handle one that is not a pkt hdr */
608 TCPSTAT_INC(tcps_tunneled_errs);
609 goto out;
610 }
611 thlen = sizeof(struct tcphdr);
612 if (m->m_len < off + sizeof(struct udphdr) + thlen &&
613 (m = m_pullup(m, off + sizeof(struct udphdr) + thlen)) == NULL) {
614 TCPSTAT_INC(tcps_tunneled_errs);
615 goto out;
616 }
617 iph = mtod(m, struct ip *);
618 uh = (struct udphdr *)((caddr_t)iph + off);
619 th = (struct tcphdr *)(uh + 1);
620 thlen = th->th_off << 2;
621 if (m->m_len < off + sizeof(struct udphdr) + thlen) {
622 m = m_pullup(m, off + sizeof(struct udphdr) + thlen);
623 if (m == NULL) {
624 TCPSTAT_INC(tcps_tunneled_errs);
625 goto out;
626 } else {
627 iph = mtod(m, struct ip *);
628 uh = (struct udphdr *)((caddr_t)iph + off);
629 th = (struct tcphdr *)(uh + 1);
630 }
631 }
632 m->m_pkthdr.tcp_tun_port = port = uh->uh_sport;
633 bcopy(th, uh, m->m_len - off);
634 m->m_len -= sizeof(struct udphdr);
635 m->m_pkthdr.len -= sizeof(struct udphdr);
636 /*
637 * We use the same algorithm for
638 * both UDP and TCP for c-sum. So
639 * the code in tcp_input will skip
640 * the checksum. So we do nothing
641 * with the flag (m->m_pkthdr.csum_flags).
642 */
643 switch (iph->ip_v) {
644 #ifdef INET
645 case IPVERSION:
646 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
647 tcp_input_with_port(&m, &off, IPPROTO_TCP, port);
648 break;
649 #endif
650 #ifdef INET6
651 case IPV6_VERSION >> 4:
652 ip6 = mtod(m, struct ip6_hdr *);
653 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
654 tcp6_input_with_port(&m, &off, IPPROTO_TCP, port);
655 break;
656 #endif
657 default:
658 goto out;
659 break;
660 }
661 return (true);
662 out:
663 m_freem(m);
664
665 return (true);
666 }
667
668 static int
669 sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS)
670 {
671 int error=ENOENT;
672 struct tcp_function_set fs;
673 struct tcp_function_block *blk;
674
675 memset(&fs, 0, sizeof(fs));
676 rw_rlock(&tcp_function_lock);
677 blk = find_tcp_fb_locked(tcp_func_set_ptr, NULL);
678 if (blk) {
679 /* Found him */
680 strcpy(fs.function_set_name, blk->tfb_tcp_block_name);
681 fs.pcbcnt = blk->tfb_refcnt;
682 }
683 rw_runlock(&tcp_function_lock);
684 error = sysctl_handle_string(oidp, fs.function_set_name,
685 sizeof(fs.function_set_name), req);
686
687 /* Check for error or no change */
688 if (error != 0 || req->newptr == NULL)
689 return(error);
690
691 rw_wlock(&tcp_function_lock);
692 blk = find_tcp_functions_locked(&fs);
693 if ((blk == NULL) ||
694 (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) {
695 error = ENOENT;
696 goto done;
697 }
698 tcp_func_set_ptr = blk;
699 done:
700 rw_wunlock(&tcp_function_lock);
701 return (error);
702 }
703
704 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_default,
705 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
706 NULL, 0, sysctl_net_inet_default_tcp_functions, "A",
707 "Set/get the default TCP functions");
708
709 static int
710 sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS)
711 {
712 int error, cnt, linesz;
713 struct tcp_function *f;
714 char *buffer, *cp;
715 size_t bufsz, outsz;
716 bool alias;
717
718 cnt = 0;
719 rw_rlock(&tcp_function_lock);
720 TAILQ_FOREACH(f, &t_functions, tf_next) {
721 cnt++;
722 }
723 rw_runlock(&tcp_function_lock);
724
725 bufsz = (cnt+2) * ((TCP_FUNCTION_NAME_LEN_MAX * 2) + 13) + 1;
726 buffer = malloc(bufsz, M_TEMP, M_WAITOK);
727
728 error = 0;
729 cp = buffer;
730
731 linesz = snprintf(cp, bufsz, "\n%-32s%c %-32s %s\n", "Stack", 'D',
732 "Alias", "PCB count");
733 cp += linesz;
734 bufsz -= linesz;
735 outsz = linesz;
736
737 rw_rlock(&tcp_function_lock);
738 TAILQ_FOREACH(f, &t_functions, tf_next) {
739 alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name);
740 linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n",
741 f->tf_fb->tfb_tcp_block_name,
742 (f->tf_fb == tcp_func_set_ptr) ? '*' : ' ',
743 alias ? f->tf_name : "-",
744 f->tf_fb->tfb_refcnt);
745 if (linesz >= bufsz) {
746 error = EOVERFLOW;
747 break;
748 }
749 cp += linesz;
750 bufsz -= linesz;
751 outsz += linesz;
752 }
753 rw_runlock(&tcp_function_lock);
754 if (error == 0)
755 error = sysctl_handle_string(oidp, buffer, outsz + 1, req);
756 free(buffer, M_TEMP);
757 return (error);
758 }
759
760 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_available,
761 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
762 NULL, 0, sysctl_net_inet_list_available, "A",
763 "list available TCP Function sets");
764
765 VNET_DEFINE(int, tcp_udp_tunneling_port) = TCP_TUNNELING_PORT_DEFAULT;
766
767 #ifdef INET
768 VNET_DEFINE(struct socket *, udp4_tun_socket) = NULL;
769 #define V_udp4_tun_socket VNET(udp4_tun_socket)
770 #endif
771 #ifdef INET6
772 VNET_DEFINE(struct socket *, udp6_tun_socket) = NULL;
773 #define V_udp6_tun_socket VNET(udp6_tun_socket)
774 #endif
775
776 static struct sx tcpoudp_lock;
777
778 static void
779 tcp_over_udp_stop(void)
780 {
781
782 sx_assert(&tcpoudp_lock, SA_XLOCKED);
783
784 #ifdef INET
785 if (V_udp4_tun_socket != NULL) {
786 soclose(V_udp4_tun_socket);
787 V_udp4_tun_socket = NULL;
788 }
789 #endif
790 #ifdef INET6
791 if (V_udp6_tun_socket != NULL) {
792 soclose(V_udp6_tun_socket);
793 V_udp6_tun_socket = NULL;
794 }
795 #endif
796 }
797
798 static int
799 tcp_over_udp_start(void)
800 {
801 uint16_t port;
802 int ret;
803 #ifdef INET
804 struct sockaddr_in sin;
805 #endif
806 #ifdef INET6
807 struct sockaddr_in6 sin6;
808 #endif
809
810 sx_assert(&tcpoudp_lock, SA_XLOCKED);
811
812 port = V_tcp_udp_tunneling_port;
813 if (ntohs(port) == 0) {
814 /* Must have a port set */
815 return (EINVAL);
816 }
817 #ifdef INET
818 if (V_udp4_tun_socket != NULL) {
819 /* Already running -- must stop first */
820 return (EALREADY);
821 }
822 #endif
823 #ifdef INET6
824 if (V_udp6_tun_socket != NULL) {
825 /* Already running -- must stop first */
826 return (EALREADY);
827 }
828 #endif
829 #ifdef INET
830 if ((ret = socreate(PF_INET, &V_udp4_tun_socket,
831 SOCK_DGRAM, IPPROTO_UDP,
832 curthread->td_ucred, curthread))) {
833 tcp_over_udp_stop();
834 return (ret);
835 }
836 /* Call the special UDP hook. */
837 if ((ret = udp_set_kernel_tunneling(V_udp4_tun_socket,
838 tcp_recv_udp_tunneled_packet,
839 tcp_ctlinput_viaudp,
840 NULL))) {
841 tcp_over_udp_stop();
842 return (ret);
843 }
844 /* Ok, we have a socket, bind it to the port. */
845 memset(&sin, 0, sizeof(struct sockaddr_in));
846 sin.sin_len = sizeof(struct sockaddr_in);
847 sin.sin_family = AF_INET;
848 sin.sin_port = htons(port);
849 if ((ret = sobind(V_udp4_tun_socket,
850 (struct sockaddr *)&sin, curthread))) {
851 tcp_over_udp_stop();
852 return (ret);
853 }
854 #endif
855 #ifdef INET6
856 if ((ret = socreate(PF_INET6, &V_udp6_tun_socket,
857 SOCK_DGRAM, IPPROTO_UDP,
858 curthread->td_ucred, curthread))) {
859 tcp_over_udp_stop();
860 return (ret);
861 }
862 /* Call the special UDP hook. */
863 if ((ret = udp_set_kernel_tunneling(V_udp6_tun_socket,
864 tcp_recv_udp_tunneled_packet,
865 tcp6_ctlinput_viaudp,
866 NULL))) {
867 tcp_over_udp_stop();
868 return (ret);
869 }
870 /* Ok, we have a socket, bind it to the port. */
871 memset(&sin6, 0, sizeof(struct sockaddr_in6));
872 sin6.sin6_len = sizeof(struct sockaddr_in6);
873 sin6.sin6_family = AF_INET6;
874 sin6.sin6_port = htons(port);
875 if ((ret = sobind(V_udp6_tun_socket,
876 (struct sockaddr *)&sin6, curthread))) {
877 tcp_over_udp_stop();
878 return (ret);
879 }
880 #endif
881 return (0);
882 }
883
884 static int
885 sysctl_net_inet_tcp_udp_tunneling_port_check(SYSCTL_HANDLER_ARGS)
886 {
887 int error;
888 uint32_t old, new;
889
890 old = V_tcp_udp_tunneling_port;
891 new = old;
892 error = sysctl_handle_int(oidp, &new, 0, req);
893 if ((error == 0) &&
894 (req->newptr != NULL)) {
895 if ((new < TCP_TUNNELING_PORT_MIN) ||
896 (new > TCP_TUNNELING_PORT_MAX)) {
897 error = EINVAL;
898 } else {
899 sx_xlock(&tcpoudp_lock);
900 V_tcp_udp_tunneling_port = new;
901 if (old != 0) {
902 tcp_over_udp_stop();
903 }
904 if (new != 0) {
905 error = tcp_over_udp_start();
906 if (error != 0) {
907 V_tcp_udp_tunneling_port = 0;
908 }
909 }
910 sx_xunlock(&tcpoudp_lock);
911 }
912 }
913 return (error);
914 }
915
916 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_port,
917 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
918 &VNET_NAME(tcp_udp_tunneling_port),
919 0, &sysctl_net_inet_tcp_udp_tunneling_port_check, "IU",
920 "Tunneling port for tcp over udp");
921
922 VNET_DEFINE(int, tcp_udp_tunneling_overhead) = TCP_TUNNELING_OVERHEAD_DEFAULT;
923
924 static int
925 sysctl_net_inet_tcp_udp_tunneling_overhead_check(SYSCTL_HANDLER_ARGS)
926 {
927 int error, new;
928
929 new = V_tcp_udp_tunneling_overhead;
930 error = sysctl_handle_int(oidp, &new, 0, req);
931 if (error == 0 && req->newptr) {
932 if ((new < TCP_TUNNELING_OVERHEAD_MIN) ||
933 (new > TCP_TUNNELING_OVERHEAD_MAX))
934 error = EINVAL;
935 else
936 V_tcp_udp_tunneling_overhead = new;
937 }
938 return (error);
939 }
940
941 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_overhead,
942 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
943 &VNET_NAME(tcp_udp_tunneling_overhead),
944 0, &sysctl_net_inet_tcp_udp_tunneling_overhead_check, "IU",
945 "MSS reduction when using tcp over udp");
946
947 /*
948 * Exports one (struct tcp_function_info) for each alias/name.
949 */
950 static int
951 sysctl_net_inet_list_func_info(SYSCTL_HANDLER_ARGS)
952 {
953 int cnt, error;
954 struct tcp_function *f;
955 struct tcp_function_info tfi;
956
957 /*
958 * We don't allow writes.
959 */
960 if (req->newptr != NULL)
961 return (EINVAL);
962
963 /*
964 * Wire the old buffer so we can directly copy the functions to
965 * user space without dropping the lock.
966 */
967 if (req->oldptr != NULL) {
968 error = sysctl_wire_old_buffer(req, 0);
969 if (error)
970 return (error);
971 }
972
973 /*
974 * Walk the list and copy out matching entries. If INVARIANTS
975 * is compiled in, also walk the list to verify the length of
976 * the list matches what we have recorded.
977 */
978 rw_rlock(&tcp_function_lock);
979
980 cnt = 0;
981 #ifndef INVARIANTS
982 if (req->oldptr == NULL) {
983 cnt = tcp_fb_cnt;
984 goto skip_loop;
985 }
986 #endif
987 TAILQ_FOREACH(f, &t_functions, tf_next) {
988 #ifdef INVARIANTS
989 cnt++;
990 #endif
991 if (req->oldptr != NULL) {
992 bzero(&tfi, sizeof(tfi));
993 tfi.tfi_refcnt = f->tf_fb->tfb_refcnt;
994 tfi.tfi_id = f->tf_fb->tfb_id;
995 (void)strlcpy(tfi.tfi_alias, f->tf_name,
996 sizeof(tfi.tfi_alias));
997 (void)strlcpy(tfi.tfi_name,
998 f->tf_fb->tfb_tcp_block_name, sizeof(tfi.tfi_name));
999 error = SYSCTL_OUT(req, &tfi, sizeof(tfi));
1000 /*
1001 * Don't stop on error, as that is the
1002 * mechanism we use to accumulate length
1003 * information if the buffer was too short.
1004 */
1005 }
1006 }
1007 KASSERT(cnt == tcp_fb_cnt,
1008 ("%s: cnt (%d) != tcp_fb_cnt (%d)", __func__, cnt, tcp_fb_cnt));
1009 #ifndef INVARIANTS
1010 skip_loop:
1011 #endif
1012 rw_runlock(&tcp_function_lock);
1013 if (req->oldptr == NULL)
1014 error = SYSCTL_OUT(req, NULL,
1015 (cnt + 1) * sizeof(struct tcp_function_info));
1016
1017 return (error);
1018 }
1019
1020 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, function_info,
1021 CTLTYPE_OPAQUE | CTLFLAG_SKIP | CTLFLAG_RD | CTLFLAG_MPSAFE,
1022 NULL, 0, sysctl_net_inet_list_func_info, "S,tcp_function_info",
1023 "List TCP function block name-to-ID mappings");
1024
1025 /*
1026 * tfb_tcp_handoff_ok() function for the default stack.
1027 * Note that we'll basically try to take all comers.
1028 */
1029 static int
1030 tcp_default_handoff_ok(struct tcpcb *tp)
1031 {
1032
1033 return (0);
1034 }
1035
1036 /*
1037 * tfb_tcp_fb_init() function for the default stack.
1038 *
1039 * This handles making sure we have appropriate timers set if you are
1040 * transitioning a socket that has some amount of setup done.
1041 *
1042 * The init() fuction from the default can *never* return non-zero i.e.
1043 * it is required to always succeed since it is the stack of last resort!
1044 */
1045 static int
1046 tcp_default_fb_init(struct tcpcb *tp)
1047 {
1048 struct socket *so = tptosocket(tp);
1049
1050 INP_WLOCK_ASSERT(tptoinpcb(tp));
1051
1052 KASSERT(tp->t_state >= 0 && tp->t_state < TCPS_TIME_WAIT,
1053 ("%s: connection %p in unexpected state %d", __func__, tp,
1054 tp->t_state));
1055
1056 /*
1057 * Nothing to do for ESTABLISHED or LISTEN states. And, we don't
1058 * know what to do for unexpected states (which includes TIME_WAIT).
1059 */
1060 if (tp->t_state <= TCPS_LISTEN || tp->t_state >= TCPS_TIME_WAIT)
1061 return (0);
1062
1063 /*
1064 * Make sure some kind of transmission timer is set if there is
1065 * outstanding data.
1066 */
1067 if ((!TCPS_HAVEESTABLISHED(tp->t_state) || sbavail(&so->so_snd) ||
1068 tp->snd_una != tp->snd_max) && !(tcp_timer_active(tp, TT_REXMT) ||
1069 tcp_timer_active(tp, TT_PERSIST))) {
1070 /*
1071 * If the session has established and it looks like it should
1072 * be in the persist state, set the persist timer. Otherwise,
1073 * set the retransmit timer.
1074 */
1075 if (TCPS_HAVEESTABLISHED(tp->t_state) && tp->snd_wnd == 0 &&
1076 (int32_t)(tp->snd_nxt - tp->snd_una) <
1077 (int32_t)sbavail(&so->so_snd))
1078 tcp_setpersist(tp);
1079 else
1080 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
1081 }
1082
1083 /* All non-embryonic sessions get a keepalive timer. */
1084 if (!tcp_timer_active(tp, TT_KEEP))
1085 tcp_timer_activate(tp, TT_KEEP,
1086 TCPS_HAVEESTABLISHED(tp->t_state) ? TP_KEEPIDLE(tp) :
1087 TP_KEEPINIT(tp));
1088
1089 /*
1090 * Make sure critical variables are initialized
1091 * if transitioning while in Recovery.
1092 */
1093 if IN_FASTRECOVERY(tp->t_flags) {
1094 if (tp->sackhint.recover_fs == 0)
1095 tp->sackhint.recover_fs = max(1,
1096 tp->snd_nxt - tp->snd_una);
1097 }
1098
1099 return (0);
1100 }
1101
1102 /*
1103 * tfb_tcp_fb_fini() function for the default stack.
1104 *
1105 * This changes state as necessary (or prudent) to prepare for another stack
1106 * to assume responsibility for the connection.
1107 */
1108 static void
1109 tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged)
1110 {
1111
1112 INP_WLOCK_ASSERT(tptoinpcb(tp));
1113 }
1114
1115 /*
1116 * Target size of TCP PCB hash tables. Must be a power of two.
1117 *
1118 * Note that this can be overridden by the kernel environment
1119 * variable net.inet.tcp.tcbhashsize
1120 */
1121 #ifndef TCBHASHSIZE
1122 #define TCBHASHSIZE 0
1123 #endif
1124
1125 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
1126 MALLOC_DEFINE(M_TCPFUNCTIONS, "tcpfunc", "TCP function set memory");
1127
1128 static struct mtx isn_mtx;
1129
1130 #define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
1131 #define ISN_LOCK() mtx_lock(&isn_mtx)
1132 #define ISN_UNLOCK() mtx_unlock(&isn_mtx)
1133
1134 INPCBSTORAGE_DEFINE(tcpcbstor, tcpcb, "tcpinp", "tcp_inpcb", "tcp", "tcphash");
1135
1136 /*
1137 * Take a value and get the next power of 2 that doesn't overflow.
1138 * Used to size the tcp_inpcb hash buckets.
1139 */
1140 static int
1141 maketcp_hashsize(int size)
1142 {
1143 int hashsize;
1144
1145 /*
1146 * auto tune.
1147 * get the next power of 2 higher than maxsockets.
1148 */
1149 hashsize = 1 << fls(size);
1150 /* catch overflow, and just go one power of 2 smaller */
1151 if (hashsize < size) {
1152 hashsize = 1 << (fls(size) - 1);
1153 }
1154 return (hashsize);
1155 }
1156
1157 static volatile int next_tcp_stack_id = 1;
1158
1159 /*
1160 * Register a TCP function block with the name provided in the names
1161 * array. (Note that this function does NOT automatically register
1162 * blk->tfb_tcp_block_name as a stack name. Therefore, you should
1163 * explicitly include blk->tfb_tcp_block_name in the list of names if
1164 * you wish to register the stack with that name.)
1165 *
1166 * Either all name registrations will succeed or all will fail. If
1167 * a name registration fails, the function will update the num_names
1168 * argument to point to the array index of the name that encountered
1169 * the failure.
1170 *
1171 * Returns 0 on success, or an error code on failure.
1172 */
1173 int
1174 register_tcp_functions_as_names(struct tcp_function_block *blk, int wait,
1175 const char *names[], int *num_names)
1176 {
1177 struct tcp_function *n;
1178 struct tcp_function_set fs;
1179 int error, i;
1180
1181 KASSERT(names != NULL && *num_names > 0,
1182 ("%s: Called with 0-length name list", __func__));
1183 KASSERT(names != NULL, ("%s: Called with NULL name list", __func__));
1184 KASSERT(rw_initialized(&tcp_function_lock),
1185 ("%s: called too early", __func__));
1186
1187 if ((blk->tfb_tcp_output == NULL) ||
1188 (blk->tfb_tcp_do_segment == NULL) ||
1189 (blk->tfb_tcp_ctloutput == NULL) ||
1190 (strlen(blk->tfb_tcp_block_name) == 0)) {
1191 /*
1192 * These functions are required and you
1193 * need a name.
1194 */
1195 *num_names = 0;
1196 return (EINVAL);
1197 }
1198
1199 if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) {
1200 *num_names = 0;
1201 return (EINVAL);
1202 }
1203
1204 refcount_init(&blk->tfb_refcnt, 0);
1205 blk->tfb_id = atomic_fetchadd_int(&next_tcp_stack_id, 1);
1206 for (i = 0; i < *num_names; i++) {
1207 n = malloc(sizeof(struct tcp_function), M_TCPFUNCTIONS, wait);
1208 if (n == NULL) {
1209 error = ENOMEM;
1210 goto cleanup;
1211 }
1212 n->tf_fb = blk;
1213
1214 (void)strlcpy(fs.function_set_name, names[i],
1215 sizeof(fs.function_set_name));
1216 rw_wlock(&tcp_function_lock);
1217 if (find_tcp_functions_locked(&fs) != NULL) {
1218 /* Duplicate name space not allowed */
1219 rw_wunlock(&tcp_function_lock);
1220 free(n, M_TCPFUNCTIONS);
1221 error = EALREADY;
1222 goto cleanup;
1223 }
1224 (void)strlcpy(n->tf_name, names[i], sizeof(n->tf_name));
1225 TAILQ_INSERT_TAIL(&t_functions, n, tf_next);
1226 tcp_fb_cnt++;
1227 rw_wunlock(&tcp_function_lock);
1228 }
1229 return(0);
1230
1231 cleanup:
1232 /*
1233 * Deregister the names we just added. Because registration failed
1234 * for names[i], we don't need to deregister that name.
1235 */
1236 *num_names = i;
1237 rw_wlock(&tcp_function_lock);
1238 while (--i >= 0) {
1239 TAILQ_FOREACH(n, &t_functions, tf_next) {
1240 if (!strncmp(n->tf_name, names[i],
1241 TCP_FUNCTION_NAME_LEN_MAX)) {
1242 TAILQ_REMOVE(&t_functions, n, tf_next);
1243 tcp_fb_cnt--;
1244 n->tf_fb = NULL;
1245 free(n, M_TCPFUNCTIONS);
1246 break;
1247 }
1248 }
1249 }
1250 rw_wunlock(&tcp_function_lock);
1251 return (error);
1252 }
1253
1254 /*
1255 * Register a TCP function block using the name provided in the name
1256 * argument.
1257 *
1258 * Returns 0 on success, or an error code on failure.
1259 */
1260 int
1261 register_tcp_functions_as_name(struct tcp_function_block *blk, const char *name,
1262 int wait)
1263 {
1264 const char *name_list[1];
1265 int num_names, rv;
1266
1267 num_names = 1;
1268 if (name != NULL)
1269 name_list[0] = name;
1270 else
1271 name_list[0] = blk->tfb_tcp_block_name;
1272 rv = register_tcp_functions_as_names(blk, wait, name_list, &num_names);
1273 return (rv);
1274 }
1275
1276 /*
1277 * Register a TCP function block using the name defined in
1278 * blk->tfb_tcp_block_name.
1279 *
1280 * Returns 0 on success, or an error code on failure.
1281 */
1282 int
1283 register_tcp_functions(struct tcp_function_block *blk, int wait)
1284 {
1285
1286 return (register_tcp_functions_as_name(blk, NULL, wait));
1287 }
1288
1289 /*
1290 * Deregister all names associated with a function block. This
1291 * functionally removes the function block from use within the system.
1292 *
1293 * When called with a true quiesce argument, mark the function block
1294 * as being removed so no more stacks will use it and determine
1295 * whether the removal would succeed.
1296 *
1297 * When called with a false quiesce argument, actually attempt the
1298 * removal.
1299 *
1300 * When called with a force argument, attempt to switch all TCBs to
1301 * use the default stack instead of returning EBUSY.
1302 *
1303 * Returns 0 on success (or if the removal would succeed, or an error
1304 * code on failure.
1305 */
1306 int
1307 deregister_tcp_functions(struct tcp_function_block *blk, bool quiesce,
1308 bool force)
1309 {
1310 struct tcp_function *f;
1311
1312 if (blk == &tcp_def_funcblk) {
1313 /* You can't un-register the default */
1314 return (EPERM);
1315 }
1316 rw_wlock(&tcp_function_lock);
1317 if (blk == tcp_func_set_ptr) {
1318 /* You can't free the current default */
1319 rw_wunlock(&tcp_function_lock);
1320 return (EBUSY);
1321 }
1322 /* Mark the block so no more stacks can use it. */
1323 blk->tfb_flags |= TCP_FUNC_BEING_REMOVED;
1324 /*
1325 * If TCBs are still attached to the stack, attempt to switch them
1326 * to the default stack.
1327 */
1328 if (force && blk->tfb_refcnt) {
1329 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo,
1330 INPLOOKUP_WLOCKPCB);
1331 struct inpcb *inp;
1332 struct tcpcb *tp;
1333 VNET_ITERATOR_DECL(vnet_iter);
1334
1335 rw_wunlock(&tcp_function_lock);
1336
1337 VNET_LIST_RLOCK();
1338 VNET_FOREACH(vnet_iter) {
1339 CURVNET_SET(vnet_iter);
1340 while ((inp = inp_next(&inpi)) != NULL) {
1341 tp = intotcpcb(inp);
1342 if (tp == NULL || tp->t_fb != blk)
1343 continue;
1344 tcp_switch_back_to_default(tp);
1345 }
1346 CURVNET_RESTORE();
1347 }
1348 VNET_LIST_RUNLOCK();
1349
1350 rw_wlock(&tcp_function_lock);
1351 }
1352 if (blk->tfb_refcnt) {
1353 /* TCBs still attached. */
1354 rw_wunlock(&tcp_function_lock);
1355 return (EBUSY);
1356 }
1357 if (quiesce) {
1358 /* Skip removal. */
1359 rw_wunlock(&tcp_function_lock);
1360 return (0);
1361 }
1362 /* Remove any function names that map to this function block. */
1363 while (find_tcp_fb_locked(blk, &f) != NULL) {
1364 TAILQ_REMOVE(&t_functions, f, tf_next);
1365 tcp_fb_cnt--;
1366 f->tf_fb = NULL;
1367 free(f, M_TCPFUNCTIONS);
1368 }
1369 rw_wunlock(&tcp_function_lock);
1370 return (0);
1371 }
1372
1373 static void
1374 tcp_drain(void)
1375 {
1376 struct epoch_tracker et;
1377 VNET_ITERATOR_DECL(vnet_iter);
1378
1379 if (!do_tcpdrain)
1380 return;
1381
1382 NET_EPOCH_ENTER(et);
1383 VNET_LIST_RLOCK_NOSLEEP();
1384 VNET_FOREACH(vnet_iter) {
1385 CURVNET_SET(vnet_iter);
1386 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo,
1387 INPLOOKUP_WLOCKPCB);
1388 struct inpcb *inpb;
1389 struct tcpcb *tcpb;
1390
1391 /*
1392 * Walk the tcpbs, if existing, and flush the reassembly queue,
1393 * if there is one...
1394 * XXX: The "Net/3" implementation doesn't imply that the TCP
1395 * reassembly queue should be flushed, but in a situation
1396 * where we're really low on mbufs, this is potentially
1397 * useful.
1398 */
1399 while ((inpb = inp_next(&inpi)) != NULL) {
1400 if ((tcpb = intotcpcb(inpb)) != NULL) {
1401 tcp_reass_flush(tcpb);
1402 tcp_clean_sackreport(tcpb);
1403 #ifdef TCP_BLACKBOX
1404 tcp_log_drain(tcpb);
1405 #endif
1406 #ifdef TCPPCAP
1407 if (tcp_pcap_aggressive_free) {
1408 /* Free the TCP PCAP queues. */
1409 tcp_pcap_drain(&(tcpb->t_inpkts));
1410 tcp_pcap_drain(&(tcpb->t_outpkts));
1411 }
1412 #endif
1413 }
1414 }
1415 CURVNET_RESTORE();
1416 }
1417 VNET_LIST_RUNLOCK_NOSLEEP();
1418 NET_EPOCH_EXIT(et);
1419 }
1420
1421 static void
1422 tcp_vnet_init(void *arg __unused)
1423 {
1424
1425 #ifdef TCP_HHOOK
1426 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN,
1427 &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
1428 printf("%s: WARNING: unable to register helper hook\n", __func__);
1429 if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT,
1430 &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
1431 printf("%s: WARNING: unable to register helper hook\n", __func__);
1432 #endif
1433 #ifdef STATS
1434 if (tcp_stats_init())
1435 printf("%s: WARNING: unable to initialise TCP stats\n",
1436 __func__);
1437 #endif
1438 in_pcbinfo_init(&V_tcbinfo, &tcpcbstor, tcp_tcbhashsize,
1439 tcp_tcbhashsize);
1440
1441 syncache_init();
1442 tcp_hc_init();
1443
1444 TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack);
1445 V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
1446 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1447
1448 tcp_fastopen_init();
1449
1450 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK);
1451 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK);
1452
1453 V_tcp_msl = TCPTV_MSL;
1454 }
1455 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH,
1456 tcp_vnet_init, NULL);
1457
1458 static void
1459 tcp_init(void *arg __unused)
1460 {
1461 const char *tcbhash_tuneable;
1462 int hashsize;
1463
1464 tcp_reass_global_init();
1465
1466 /* XXX virtualize those below? */
1467 tcp_delacktime = TCPTV_DELACK;
1468 tcp_keepinit = TCPTV_KEEP_INIT;
1469 tcp_keepidle = TCPTV_KEEP_IDLE;
1470 tcp_keepintvl = TCPTV_KEEPINTVL;
1471 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
1472 tcp_rexmit_initial = TCPTV_RTOBASE;
1473 if (tcp_rexmit_initial < 1)
1474 tcp_rexmit_initial = 1;
1475 tcp_rexmit_min = TCPTV_MIN;
1476 if (tcp_rexmit_min < 1)
1477 tcp_rexmit_min = 1;
1478 tcp_persmin = TCPTV_PERSMIN;
1479 tcp_persmax = TCPTV_PERSMAX;
1480 tcp_rexmit_slop = TCPTV_CPU_VAR;
1481 tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
1482
1483 /* Setup the tcp function block list */
1484 TAILQ_INIT(&t_functions);
1485 rw_init(&tcp_function_lock, "tcp_func_lock");
1486 register_tcp_functions(&tcp_def_funcblk, M_WAITOK);
1487 sx_init(&tcpoudp_lock, "TCP over UDP configuration");
1488 #ifdef TCP_BLACKBOX
1489 /* Initialize the TCP logging data. */
1490 tcp_log_init();
1491 #endif
1492 arc4rand(&V_ts_offset_secret, sizeof(V_ts_offset_secret), 0);
1493
1494 if (tcp_soreceive_stream) {
1495 #ifdef INET
1496 tcp_protosw.pr_soreceive = soreceive_stream;
1497 #endif
1498 #ifdef INET6
1499 tcp6_protosw.pr_soreceive = soreceive_stream;
1500 #endif /* INET6 */
1501 }
1502
1503 #ifdef INET6
1504 max_protohdr_grow(sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
1505 #else /* INET6 */
1506 max_protohdr_grow(sizeof(struct tcpiphdr));
1507 #endif /* INET6 */
1508
1509 ISN_LOCK_INIT();
1510 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
1511 SHUTDOWN_PRI_DEFAULT);
1512 EVENTHANDLER_REGISTER(vm_lowmem, tcp_drain, NULL, LOWMEM_PRI_DEFAULT);
1513 EVENTHANDLER_REGISTER(mbuf_lowmem, tcp_drain, NULL, LOWMEM_PRI_DEFAULT);
1514
1515 tcp_inp_lro_direct_queue = counter_u64_alloc(M_WAITOK);
1516 tcp_inp_lro_wokeup_queue = counter_u64_alloc(M_WAITOK);
1517 tcp_inp_lro_compressed = counter_u64_alloc(M_WAITOK);
1518 tcp_inp_lro_locks_taken = counter_u64_alloc(M_WAITOK);
1519 tcp_extra_mbuf = counter_u64_alloc(M_WAITOK);
1520 tcp_would_have_but = counter_u64_alloc(M_WAITOK);
1521 tcp_comp_total = counter_u64_alloc(M_WAITOK);
1522 tcp_uncomp_total = counter_u64_alloc(M_WAITOK);
1523 tcp_bad_csums = counter_u64_alloc(M_WAITOK);
1524 #ifdef TCPPCAP
1525 tcp_pcap_init();
1526 #endif
1527
1528 hashsize = TCBHASHSIZE;
1529 tcbhash_tuneable = "net.inet.tcp.tcbhashsize";
1530 TUNABLE_INT_FETCH(tcbhash_tuneable, &hashsize);
1531 if (hashsize == 0) {
1532 /*
1533 * Auto tune the hash size based on maxsockets.
1534 * A perfect hash would have a 1:1 mapping
1535 * (hashsize = maxsockets) however it's been
1536 * suggested that O(2) average is better.
1537 */
1538 hashsize = maketcp_hashsize(maxsockets / 4);
1539 /*
1540 * Our historical default is 512,
1541 * do not autotune lower than this.
1542 */
1543 if (hashsize < 512)
1544 hashsize = 512;
1545 if (bootverbose)
1546 printf("%s: %s auto tuned to %d\n", __func__,
1547 tcbhash_tuneable, hashsize);
1548 }
1549 /*
1550 * We require a hashsize to be a power of two.
1551 * Previously if it was not a power of two we would just reset it
1552 * back to 512, which could be a nasty surprise if you did not notice
1553 * the error message.
1554 * Instead what we do is clip it to the closest power of two lower
1555 * than the specified hash value.
1556 */
1557 if (!powerof2(hashsize)) {
1558 int oldhashsize = hashsize;
1559
1560 hashsize = maketcp_hashsize(hashsize);
1561 /* prevent absurdly low value */
1562 if (hashsize < 16)
1563 hashsize = 16;
1564 printf("%s: WARNING: TCB hash size not a power of 2, "
1565 "clipped from %d to %d.\n", __func__, oldhashsize,
1566 hashsize);
1567 }
1568 tcp_tcbhashsize = hashsize;
1569
1570 #ifdef INET
1571 IPPROTO_REGISTER(IPPROTO_TCP, tcp_input, tcp_ctlinput);
1572 #endif
1573 #ifdef INET6
1574 IP6PROTO_REGISTER(IPPROTO_TCP, tcp6_input, tcp6_ctlinput);
1575 #endif
1576 }
1577 SYSINIT(tcp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, tcp_init, NULL);
1578
1579 #ifdef VIMAGE
1580 static void
1581 tcp_destroy(void *unused __unused)
1582 {
1583 int n;
1584 #ifdef TCP_HHOOK
1585 int error;
1586 #endif
1587
1588 /*
1589 * All our processes are gone, all our sockets should be cleaned
1590 * up, which means, we should be past the tcp_discardcb() calls.
1591 * Sleep to let all tcpcb timers really disappear and cleanup.
1592 */
1593 for (;;) {
1594 INP_INFO_WLOCK(&V_tcbinfo);
1595 n = V_tcbinfo.ipi_count;
1596 INP_INFO_WUNLOCK(&V_tcbinfo);
1597 if (n == 0)
1598 break;
1599 pause("tcpdes", hz / 10);
1600 }
1601 tcp_hc_destroy();
1602 syncache_destroy();
1603 in_pcbinfo_destroy(&V_tcbinfo);
1604 /* tcp_discardcb() clears the sack_holes up. */
1605 uma_zdestroy(V_sack_hole_zone);
1606
1607 /*
1608 * Cannot free the zone until all tcpcbs are released as we attach
1609 * the allocations to them.
1610 */
1611 tcp_fastopen_destroy();
1612
1613 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES);
1614 VNET_PCPUSTAT_FREE(tcpstat);
1615
1616 #ifdef TCP_HHOOK
1617 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]);
1618 if (error != 0) {
1619 printf("%s: WARNING: unable to deregister helper hook "
1620 "type=%d, id=%d: error %d returned\n", __func__,
1621 HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, error);
1622 }
1623 error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_OUT]);
1624 if (error != 0) {
1625 printf("%s: WARNING: unable to deregister helper hook "
1626 "type=%d, id=%d: error %d returned\n", __func__,
1627 HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error);
1628 }
1629 #endif
1630 }
1631 VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL);
1632 #endif
1633
1634 void
1635 tcp_fini(void *xtp)
1636 {
1637
1638 }
1639
1640 /*
1641 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
1642 * tcp_template used to store this data in mbufs, but we now recopy it out
1643 * of the tcpcb each time to conserve mbufs.
1644 */
1645 void
1646 tcpip_fillheaders(struct inpcb *inp, uint16_t port, void *ip_ptr, void *tcp_ptr)
1647 {
1648 struct tcphdr *th = (struct tcphdr *)tcp_ptr;
1649
1650 INP_WLOCK_ASSERT(inp);
1651
1652 #ifdef INET6
1653 if ((inp->inp_vflag & INP_IPV6) != 0) {
1654 struct ip6_hdr *ip6;
1655
1656 ip6 = (struct ip6_hdr *)ip_ptr;
1657 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
1658 (inp->inp_flow & IPV6_FLOWINFO_MASK);
1659 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
1660 (IPV6_VERSION & IPV6_VERSION_MASK);
1661 if (port == 0)
1662 ip6->ip6_nxt = IPPROTO_TCP;
1663 else
1664 ip6->ip6_nxt = IPPROTO_UDP;
1665 ip6->ip6_plen = htons(sizeof(struct tcphdr));
1666 ip6->ip6_src = inp->in6p_laddr;
1667 ip6->ip6_dst = inp->in6p_faddr;
1668 }
1669 #endif /* INET6 */
1670 #if defined(INET6) && defined(INET)
1671 else
1672 #endif
1673 #ifdef INET
1674 {
1675 struct ip *ip;
1676
1677 ip = (struct ip *)ip_ptr;
1678 ip->ip_v = IPVERSION;
1679 ip->ip_hl = 5;
1680 ip->ip_tos = inp->inp_ip_tos;
1681 ip->ip_len = 0;
1682 ip->ip_id = 0;
1683 ip->ip_off = 0;
1684 ip->ip_ttl = inp->inp_ip_ttl;
1685 ip->ip_sum = 0;
1686 if (port == 0)
1687 ip->ip_p = IPPROTO_TCP;
1688 else
1689 ip->ip_p = IPPROTO_UDP;
1690 ip->ip_src = inp->inp_laddr;
1691 ip->ip_dst = inp->inp_faddr;
1692 }
1693 #endif /* INET */
1694 th->th_sport = inp->inp_lport;
1695 th->th_dport = inp->inp_fport;
1696 th->th_seq = 0;
1697 th->th_ack = 0;
1698 th->th_off = 5;
1699 tcp_set_flags(th, 0);
1700 th->th_win = 0;
1701 th->th_urp = 0;
1702 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
1703 }
1704
1705 /*
1706 * Create template to be used to send tcp packets on a connection.
1707 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
1708 * use for this function is in keepalives, which use tcp_respond.
1709 */
1710 struct tcptemp *
1711 tcpip_maketemplate(struct inpcb *inp)
1712 {
1713 struct tcptemp *t;
1714
1715 t = malloc(sizeof(*t), M_TEMP, M_NOWAIT);
1716 if (t == NULL)
1717 return (NULL);
1718 tcpip_fillheaders(inp, 0, (void *)&t->tt_ipgen, (void *)&t->tt_t);
1719 return (t);
1720 }
1721
1722 /*
1723 * Send a single message to the TCP at address specified by
1724 * the given TCP/IP header. If m == NULL, then we make a copy
1725 * of the tcpiphdr at th and send directly to the addressed host.
1726 * This is used to force keep alive messages out using the TCP
1727 * template for a connection. If flags are given then we send
1728 * a message back to the TCP which originated the segment th,
1729 * and discard the mbuf containing it and any other attached mbufs.
1730 *
1731 * In any case the ack and sequence number of the transmitted
1732 * segment are as specified by the parameters.
1733 *
1734 * NOTE: If m != NULL, then th must point to *inside* the mbuf.
1735 */
1736 void
1737 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
1738 tcp_seq ack, tcp_seq seq, uint16_t flags)
1739 {
1740 struct tcpopt to;
1741 struct inpcb *inp;
1742 struct ip *ip;
1743 struct mbuf *optm;
1744 struct udphdr *uh = NULL;
1745 struct tcphdr *nth;
1746 struct tcp_log_buffer *lgb;
1747 u_char *optp;
1748 #ifdef INET6
1749 struct ip6_hdr *ip6;
1750 int isipv6;
1751 #endif /* INET6 */
1752 int optlen, tlen, win, ulen;
1753 int ect = 0;
1754 bool incl_opts;
1755 uint16_t port;
1756 int output_ret;
1757 #ifdef INVARIANTS
1758 int thflags = tcp_get_flags(th);
1759 #endif
1760
1761 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
1762 NET_EPOCH_ASSERT();
1763
1764 #ifdef INET6
1765 isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4);
1766 ip6 = ipgen;
1767 #endif /* INET6 */
1768 ip = ipgen;
1769
1770 if (tp != NULL) {
1771 inp = tptoinpcb(tp);
1772 INP_LOCK_ASSERT(inp);
1773 } else
1774 inp = NULL;
1775
1776 if (m != NULL) {
1777 #ifdef INET6
1778 if (isipv6 && ip6 && (ip6->ip6_nxt == IPPROTO_UDP))
1779 port = m->m_pkthdr.tcp_tun_port;
1780 else
1781 #endif
1782 if (ip && (ip->ip_p == IPPROTO_UDP))
1783 port = m->m_pkthdr.tcp_tun_port;
1784 else
1785 port = 0;
1786 } else
1787 port = tp->t_port;
1788
1789 incl_opts = false;
1790 win = 0;
1791 if (tp != NULL) {
1792 if (!(flags & TH_RST)) {
1793 win = sbspace(&inp->inp_socket->so_rcv);
1794 if (win > TCP_MAXWIN << tp->rcv_scale)
1795 win = TCP_MAXWIN << tp->rcv_scale;
1796 }
1797 if ((tp->t_flags & TF_NOOPT) == 0)
1798 incl_opts = true;
1799 }
1800 if (m == NULL) {
1801 m = m_gethdr(M_NOWAIT, MT_DATA);
1802 if (m == NULL)
1803 return;
1804 m->m_data += max_linkhdr;
1805 #ifdef INET6
1806 if (isipv6) {
1807 bcopy((caddr_t)ip6, mtod(m, caddr_t),
1808 sizeof(struct ip6_hdr));
1809 ip6 = mtod(m, struct ip6_hdr *);
1810 nth = (struct tcphdr *)(ip6 + 1);
1811 if (port) {
1812 /* Insert a UDP header */
1813 uh = (struct udphdr *)nth;
1814 uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1815 uh->uh_dport = port;
1816 nth = (struct tcphdr *)(uh + 1);
1817 }
1818 } else
1819 #endif /* INET6 */
1820 {
1821 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
1822 ip = mtod(m, struct ip *);
1823 nth = (struct tcphdr *)(ip + 1);
1824 if (port) {
1825 /* Insert a UDP header */
1826 uh = (struct udphdr *)nth;
1827 uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1828 uh->uh_dport = port;
1829 nth = (struct tcphdr *)(uh + 1);
1830 }
1831 }
1832 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
1833 flags = TH_ACK;
1834 } else if ((!M_WRITABLE(m)) || (port != 0)) {
1835 struct mbuf *n;
1836
1837 /* Can't reuse 'm', allocate a new mbuf. */
1838 n = m_gethdr(M_NOWAIT, MT_DATA);
1839 if (n == NULL) {
1840 m_freem(m);
1841 return;
1842 }
1843
1844 if (!m_dup_pkthdr(n, m, M_NOWAIT)) {
1845 m_freem(m);
1846 m_freem(n);
1847 return;
1848 }
1849
1850 n->m_data += max_linkhdr;
1851 /* m_len is set later */
1852 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
1853 #ifdef INET6
1854 if (isipv6) {
1855 bcopy((caddr_t)ip6, mtod(n, caddr_t),
1856 sizeof(struct ip6_hdr));
1857 ip6 = mtod(n, struct ip6_hdr *);
1858 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1859 nth = (struct tcphdr *)(ip6 + 1);
1860 if (port) {
1861 /* Insert a UDP header */
1862 uh = (struct udphdr *)nth;
1863 uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1864 uh->uh_dport = port;
1865 nth = (struct tcphdr *)(uh + 1);
1866 }
1867 } else
1868 #endif /* INET6 */
1869 {
1870 bcopy((caddr_t)ip, mtod(n, caddr_t), sizeof(struct ip));
1871 ip = mtod(n, struct ip *);
1872 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
1873 nth = (struct tcphdr *)(ip + 1);
1874 if (port) {
1875 /* Insert a UDP header */
1876 uh = (struct udphdr *)nth;
1877 uh->uh_sport = htons(V_tcp_udp_tunneling_port);
1878 uh->uh_dport = port;
1879 nth = (struct tcphdr *)(uh + 1);
1880 }
1881 }
1882 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
1883 xchg(nth->th_dport, nth->th_sport, uint16_t);
1884 th = nth;
1885 m_freem(m);
1886 m = n;
1887 } else {
1888 /*
1889 * reuse the mbuf.
1890 * XXX MRT We inherit the FIB, which is lucky.
1891 */
1892 m_freem(m->m_next);
1893 m->m_next = NULL;
1894 m->m_data = (caddr_t)ipgen;
1895 /* m_len is set later */
1896 #ifdef INET6
1897 if (isipv6) {
1898 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
1899 nth = (struct tcphdr *)(ip6 + 1);
1900 } else
1901 #endif /* INET6 */
1902 {
1903 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
1904 nth = (struct tcphdr *)(ip + 1);
1905 }
1906 if (th != nth) {
1907 /*
1908 * this is usually a case when an extension header
1909 * exists between the IPv6 header and the
1910 * TCP header.
1911 */
1912 nth->th_sport = th->th_sport;
1913 nth->th_dport = th->th_dport;
1914 }
1915 xchg(nth->th_dport, nth->th_sport, uint16_t);
1916 #undef xchg
1917 }
1918 tlen = 0;
1919 #ifdef INET6
1920 if (isipv6)
1921 tlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
1922 #endif
1923 #if defined(INET) && defined(INET6)
1924 else
1925 #endif
1926 #ifdef INET
1927 tlen = sizeof (struct tcpiphdr);
1928 #endif
1929 if (port)
1930 tlen += sizeof (struct udphdr);
1931 #ifdef INVARIANTS
1932 m->m_len = 0;
1933 KASSERT(M_TRAILINGSPACE(m) >= tlen,
1934 ("Not enough trailing space for message (m=%p, need=%d, have=%ld)",
1935 m, tlen, (long)M_TRAILINGSPACE(m)));
1936 #endif
1937 m->m_len = tlen;
1938 to.to_flags = 0;
1939 if (incl_opts) {
1940 ect = tcp_ecn_output_established(tp, &flags, 0, false);
1941 /* Make sure we have room. */
1942 if (M_TRAILINGSPACE(m) < TCP_MAXOLEN) {
1943 m->m_next = m_get(M_NOWAIT, MT_DATA);
1944 if (m->m_next) {
1945 optp = mtod(m->m_next, u_char *);
1946 optm = m->m_next;
1947 } else
1948 incl_opts = false;
1949 } else {
1950 optp = (u_char *) (nth + 1);
1951 optm = m;
1952 }
1953 }
1954 if (incl_opts) {
1955 /* Timestamps. */
1956 if (tp->t_flags & TF_RCVD_TSTMP) {
1957 to.to_tsval = tcp_ts_getticks() + tp->ts_offset;
1958 to.to_tsecr = tp->ts_recent;
1959 to.to_flags |= TOF_TS;
1960 }
1961 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1962 /* TCP-MD5 (RFC2385). */
1963 if (tp->t_flags & TF_SIGNATURE)
1964 to.to_flags |= TOF_SIGNATURE;
1965 #endif
1966 /* Add the options. */
1967 tlen += optlen = tcp_addoptions(&to, optp);
1968
1969 /* Update m_len in the correct mbuf. */
1970 optm->m_len += optlen;
1971 } else
1972 optlen = 0;
1973 #ifdef INET6
1974 if (isipv6) {
1975 if (uh) {
1976 ulen = tlen - sizeof(struct ip6_hdr);
1977 uh->uh_ulen = htons(ulen);
1978 }
1979 ip6->ip6_flow = htonl(ect << 20);
1980 ip6->ip6_vfc = IPV6_VERSION;
1981 if (port)
1982 ip6->ip6_nxt = IPPROTO_UDP;
1983 else
1984 ip6->ip6_nxt = IPPROTO_TCP;
1985 ip6->ip6_plen = htons(tlen - sizeof(*ip6));
1986 }
1987 #endif
1988 #if defined(INET) && defined(INET6)
1989 else
1990 #endif
1991 #ifdef INET
1992 {
1993 if (uh) {
1994 ulen = tlen - sizeof(struct ip);
1995 uh->uh_ulen = htons(ulen);
1996 }
1997 ip->ip_tos = ect;
1998 ip->ip_len = htons(tlen);
1999 ip->ip_ttl = V_ip_defttl;
2000 if (port) {
2001 ip->ip_p = IPPROTO_UDP;
2002 } else {
2003 ip->ip_p = IPPROTO_TCP;
2004 }
2005 if (V_path_mtu_discovery)
2006 ip->ip_off |= htons(IP_DF);
2007 }
2008 #endif
2009 m->m_pkthdr.len = tlen;
2010 m->m_pkthdr.rcvif = NULL;
2011 #ifdef MAC
2012 if (inp != NULL) {
2013 /*
2014 * Packet is associated with a socket, so allow the
2015 * label of the response to reflect the socket label.
2016 */
2017 INP_LOCK_ASSERT(inp);
2018 mac_inpcb_create_mbuf(inp, m);
2019 } else {
2020 /*
2021 * Packet is not associated with a socket, so possibly
2022 * update the label in place.
2023 */
2024 mac_netinet_tcp_reply(m);
2025 }
2026 #endif
2027 nth->th_seq = htonl(seq);
2028 nth->th_ack = htonl(ack);
2029 nth->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
2030 tcp_set_flags(nth, flags);
2031 if (tp != NULL)
2032 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
2033 else
2034 nth->th_win = htons((u_short)win);
2035 nth->th_urp = 0;
2036
2037 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
2038 if (to.to_flags & TOF_SIGNATURE) {
2039 if (!TCPMD5_ENABLED() ||
2040 TCPMD5_OUTPUT(m, nth, to.to_signature) != 0) {
2041 m_freem(m);
2042 return;
2043 }
2044 }
2045 #endif
2046
2047 #ifdef INET6
2048 if (isipv6) {
2049 if (port) {
2050 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
2051 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2052 uh->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
2053 nth->th_sum = 0;
2054 } else {
2055 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
2056 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2057 nth->th_sum = in6_cksum_pseudo(ip6,
2058 tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0);
2059 }
2060 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
2061 }
2062 #endif /* INET6 */
2063 #if defined(INET6) && defined(INET)
2064 else
2065 #endif
2066 #ifdef INET
2067 {
2068 if (port) {
2069 uh->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2070 htons(ulen + IPPROTO_UDP));
2071 m->m_pkthdr.csum_flags = CSUM_UDP;
2072 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2073 nth->th_sum = 0;
2074 } else {
2075 m->m_pkthdr.csum_flags = CSUM_TCP;
2076 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
2077 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2078 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
2079 }
2080 }
2081 #endif /* INET */
2082 TCP_PROBE3(debug__output, tp, th, m);
2083 if (flags & TH_RST)
2084 TCP_PROBE5(accept__refused, NULL, NULL, m, tp, nth);
2085 lgb = NULL;
2086 if ((tp != NULL) && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2087 if (INP_WLOCKED(inp)) {
2088 union tcp_log_stackspecific log;
2089 struct timeval tv;
2090
2091 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2092 log.u_bbr.inhpts = inp->inp_in_hpts;
2093 log.u_bbr.flex8 = 4;
2094 log.u_bbr.pkts_out = tp->t_maxseg;
2095 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2096 log.u_bbr.delivered = 0;
2097 lgb = tcp_log_event_(tp, nth, NULL, NULL, TCP_LOG_OUT,
2098 ERRNO_UNK, 0, &log, false, NULL, NULL, 0, &tv);
2099 } else {
2100 /*
2101 * We can not log the packet, since we only own the
2102 * read lock, but a write lock is needed. The read lock
2103 * is not upgraded to a write lock, since only getting
2104 * the read lock was done intentionally to improve the
2105 * handling of SYN flooding attacks.
2106 * This happens only for pure SYN segments received in
2107 * the initial CLOSED state, or received in a more
2108 * advanced state than listen and the UDP encapsulation
2109 * port is unexpected.
2110 * The incoming SYN segments do not really belong to
2111 * the TCP connection and the handling does not change
2112 * the state of the TCP connection. Therefore, the
2113 * sending of the RST segments is not logged. Please
2114 * note that also the incoming SYN segments are not
2115 * logged.
2116 *
2117 * The following code ensures that the above description
2118 * is and stays correct.
2119 */
2120 KASSERT((thflags & (TH_ACK|TH_SYN)) == TH_SYN &&
2121 (tp->t_state == TCPS_CLOSED ||
2122 (tp->t_state > TCPS_LISTEN && tp->t_port != port)),
2123 ("%s: Logging of TCP segment with flags 0x%b and "
2124 "UDP encapsulation port %u skipped in state %s",
2125 __func__, thflags, PRINT_TH_FLAGS,
2126 ntohs(port), tcpstates[tp->t_state]));
2127 }
2128 }
2129
2130 if (flags & TH_ACK)
2131 TCPSTAT_INC(tcps_sndacks);
2132 else if (flags & (TH_SYN|TH_FIN|TH_RST))
2133 TCPSTAT_INC(tcps_sndctrl);
2134 TCPSTAT_INC(tcps_sndtotal);
2135
2136 #ifdef INET6
2137 if (isipv6) {
2138 TCP_PROBE5(send, NULL, tp, ip6, tp, nth);
2139 output_ret = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp);
2140 }
2141 #endif /* INET6 */
2142 #if defined(INET) && defined(INET6)
2143 else
2144 #endif
2145 #ifdef INET
2146 {
2147 TCP_PROBE5(send, NULL, tp, ip, tp, nth);
2148 output_ret = ip_output(m, NULL, NULL, 0, NULL, inp);
2149 }
2150 #endif
2151 if (lgb != NULL)
2152 lgb->tlb_errno = output_ret;
2153 }
2154
2155 /*
2156 * Create a new TCP control block, making an empty reassembly queue and hooking
2157 * it to the argument protocol control block. The `inp' parameter must have
2158 * come from the zone allocator set up by tcpcbstor declaration.
2159 */
2160 struct tcpcb *
2161 tcp_newtcpcb(struct inpcb *inp)
2162 {
2163 struct tcpcb *tp = intotcpcb(inp);
2164 #ifdef INET6
2165 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
2166 #endif /* INET6 */
2167
2168 /*
2169 * Historically allocation was done with M_ZERO. There is a lot of
2170 * code that rely on that. For now take safe approach and zero whole
2171 * tcpcb. This definitely can be optimized.
2172 */
2173 bzero(&tp->t_start_zero, t_zero_size);
2174
2175 /* Initialise cc_var struct for this tcpcb. */
2176 tp->t_ccv.type = IPPROTO_TCP;
2177 tp->t_ccv.ccvc.tcp = tp;
2178 rw_rlock(&tcp_function_lock);
2179 tp->t_fb = tcp_func_set_ptr;
2180 refcount_acquire(&tp->t_fb->tfb_refcnt);
2181 rw_runlock(&tcp_function_lock);
2182 /*
2183 * Use the current system default CC algorithm.
2184 */
2185 cc_attach(tp, CC_DEFAULT_ALGO());
2186
2187 if (CC_ALGO(tp)->cb_init != NULL)
2188 if (CC_ALGO(tp)->cb_init(&tp->t_ccv, NULL) > 0) {
2189 cc_detach(tp);
2190 if (tp->t_fb->tfb_tcp_fb_fini)
2191 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2192 refcount_release(&tp->t_fb->tfb_refcnt);
2193 return (NULL);
2194 }
2195
2196 #ifdef TCP_HHOOK
2197 if (khelp_init_osd(HELPER_CLASS_TCP, &tp->t_osd)) {
2198 if (tp->t_fb->tfb_tcp_fb_fini)
2199 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2200 refcount_release(&tp->t_fb->tfb_refcnt);
2201 return (NULL);
2202 }
2203 #endif
2204
2205 TAILQ_INIT(&tp->t_segq);
2206 tp->t_maxseg =
2207 #ifdef INET6
2208 isipv6 ? V_tcp_v6mssdflt :
2209 #endif /* INET6 */
2210 V_tcp_mssdflt;
2211
2212 callout_init_rw(&tp->t_callout, &inp->inp_lock, CALLOUT_RETURNUNLOCKED);
2213 for (int i = 0; i < TT_N; i++)
2214 tp->t_timers[i] = SBT_MAX;
2215
2216 switch (V_tcp_do_rfc1323) {
2217 case 0:
2218 break;
2219 default:
2220 case 1:
2221 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
2222 break;
2223 case 2:
2224 tp->t_flags = TF_REQ_SCALE;
2225 break;
2226 case 3:
2227 tp->t_flags = TF_REQ_TSTMP;
2228 break;
2229 }
2230 if (V_tcp_do_sack)
2231 tp->t_flags |= TF_SACK_PERMIT;
2232 TAILQ_INIT(&tp->snd_holes);
2233
2234 /*
2235 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
2236 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
2237 * reasonable initial retransmit time.
2238 */
2239 tp->t_srtt = TCPTV_SRTTBASE;
2240 tp->t_rttvar = ((tcp_rexmit_initial - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
2241 tp->t_rttmin = tcp_rexmit_min;
2242 tp->t_rxtcur = tcp_rexmit_initial;
2243 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
2244 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
2245 tp->t_rcvtime = ticks;
2246 /*
2247 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
2248 * because the socket may be bound to an IPv6 wildcard address,
2249 * which may match an IPv4-mapped IPv6 address.
2250 */
2251 inp->inp_ip_ttl = V_ip_defttl;
2252 #ifdef TCPHPTS
2253 /*
2254 * If using hpts lets drop a random number in so
2255 * not all new connections fall on the same CPU.
2256 */
2257 inp->inp_hpts_cpu = hpts_random_cpu(inp);
2258 #endif
2259 #ifdef TCPPCAP
2260 /*
2261 * Init the TCP PCAP queues.
2262 */
2263 tcp_pcap_tcpcb_init(tp);
2264 #endif
2265 #ifdef TCP_BLACKBOX
2266 /* Initialize the per-TCPCB log data. */
2267 tcp_log_tcpcbinit(tp);
2268 #endif
2269 tp->t_pacing_rate = -1;
2270 if (tp->t_fb->tfb_tcp_fb_init) {
2271 if ((*tp->t_fb->tfb_tcp_fb_init)(tp)) {
2272 refcount_release(&tp->t_fb->tfb_refcnt);
2273 return (NULL);
2274 }
2275 }
2276 #ifdef STATS
2277 if (V_tcp_perconn_stats_enable == 1)
2278 tp->t_stats = stats_blob_alloc(V_tcp_perconn_stats_dflt_tpl, 0);
2279 #endif
2280 if (V_tcp_do_lrd)
2281 tp->t_flags |= TF_LRD;
2282
2283 return (tp);
2284 }
2285
2286 /*
2287 * Drop a TCP connection, reporting
2288 * the specified error. If connection is synchronized,
2289 * then send a RST to peer.
2290 */
2291 struct tcpcb *
2292 tcp_drop(struct tcpcb *tp, int errno)
2293 {
2294 struct socket *so = tptosocket(tp);
2295
2296 NET_EPOCH_ASSERT();
2297 INP_WLOCK_ASSERT(tptoinpcb(tp));
2298
2299 if (TCPS_HAVERCVDSYN(tp->t_state)) {
2300 tcp_state_change(tp, TCPS_CLOSED);
2301 /* Don't use tcp_output() here due to possible recursion. */
2302 (void)tcp_output_nodrop(tp);
2303 TCPSTAT_INC(tcps_drops);
2304 } else
2305 TCPSTAT_INC(tcps_conndrops);
2306 if (errno == ETIMEDOUT && tp->t_softerror)
2307 errno = tp->t_softerror;
2308 so->so_error = errno;
2309 return (tcp_close(tp));
2310 }
2311
2312 void
2313 tcp_discardcb(struct tcpcb *tp)
2314 {
2315 struct inpcb *inp = tptoinpcb(tp);
2316 struct socket *so = tptosocket(tp);
2317 #ifdef INET6
2318 bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
2319 #endif
2320
2321 INP_WLOCK_ASSERT(inp);
2322
2323 tcp_timer_stop(tp);
2324 if (tp->t_fb->tfb_tcp_timer_stop_all) {
2325 tp->t_fb->tfb_tcp_timer_stop_all(tp);
2326 }
2327
2328 /* free the reassembly queue, if any */
2329 tcp_reass_flush(tp);
2330
2331 #ifdef TCP_OFFLOAD
2332 /* Disconnect offload device, if any. */
2333 if (tp->t_flags & TF_TOE)
2334 tcp_offload_detach(tp);
2335 #endif
2336
2337 tcp_free_sackholes(tp);
2338
2339 #ifdef TCPPCAP
2340 /* Free the TCP PCAP queues. */
2341 tcp_pcap_drain(&(tp->t_inpkts));
2342 tcp_pcap_drain(&(tp->t_outpkts));
2343 #endif
2344
2345 /* Allow the CC algorithm to clean up after itself. */
2346 if (CC_ALGO(tp)->cb_destroy != NULL)
2347 CC_ALGO(tp)->cb_destroy(&tp->t_ccv);
2348 CC_DATA(tp) = NULL;
2349 /* Detach from the CC algorithm */
2350 cc_detach(tp);
2351
2352 #ifdef TCP_HHOOK
2353 khelp_destroy_osd(&tp->t_osd);
2354 #endif
2355 #ifdef STATS
2356 stats_blob_destroy(tp->t_stats);
2357 #endif
2358
2359 CC_ALGO(tp) = NULL;
2360
2361 #ifdef TCP_BLACKBOX
2362 tcp_log_tcpcbfini(tp);
2363 #endif
2364 TCPSTATES_DEC(tp->t_state);
2365 if (tp->t_fb->tfb_tcp_fb_fini)
2366 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
2367
2368 /*
2369 * If we got enough samples through the srtt filter,
2370 * save the rtt and rttvar in the routing entry.
2371 * 'Enough' is arbitrarily defined as 4 rtt samples.
2372 * 4 samples is enough for the srtt filter to converge
2373 * to within enough % of the correct value; fewer samples
2374 * and we could save a bogus rtt. The danger is not high
2375 * as tcp quickly recovers from everything.
2376 * XXX: Works very well but needs some more statistics!
2377 *
2378 * XXXRRS: Updating must be after the stack fini() since
2379 * that may be converting some internal representation of
2380 * say srtt etc into the general one used by other stacks.
2381 * Lets also at least protect against the so being NULL
2382 * as RW stated below.
2383 */
2384 if ((tp->t_rttupdated >= 4) && (so != NULL)) {
2385 struct hc_metrics_lite metrics;
2386 uint32_t ssthresh;
2387
2388 bzero(&metrics, sizeof(metrics));
2389 /*
2390 * Update the ssthresh always when the conditions below
2391 * are satisfied. This gives us better new start value
2392 * for the congestion avoidance for new connections.
2393 * ssthresh is only set if packet loss occurred on a session.
2394 *
2395 * XXXRW: 'so' may be NULL here, and/or socket buffer may be
2396 * being torn down. Ideally this code would not use 'so'.
2397 */
2398 ssthresh = tp->snd_ssthresh;
2399 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
2400 /*
2401 * convert the limit from user data bytes to
2402 * packets then to packet data bytes.
2403 */
2404 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
2405 if (ssthresh < 2)
2406 ssthresh = 2;
2407 ssthresh *= (tp->t_maxseg +
2408 #ifdef INET6
2409 (isipv6 ? sizeof (struct ip6_hdr) +
2410 sizeof (struct tcphdr) :
2411 #endif
2412 sizeof (struct tcpiphdr)
2413 #ifdef INET6
2414 )
2415 #endif
2416 );
2417 } else
2418 ssthresh = 0;
2419 metrics.rmx_ssthresh = ssthresh;
2420
2421 metrics.rmx_rtt = tp->t_srtt;
2422 metrics.rmx_rttvar = tp->t_rttvar;
2423 metrics.rmx_cwnd = tp->snd_cwnd;
2424 metrics.rmx_sendpipe = 0;
2425 metrics.rmx_recvpipe = 0;
2426
2427 tcp_hc_update(&inp->inp_inc, &metrics);
2428 }
2429
2430 refcount_release(&tp->t_fb->tfb_refcnt);
2431 }
2432
2433 /*
2434 * Attempt to close a TCP control block, marking it as dropped, and freeing
2435 * the socket if we hold the only reference.
2436 */
2437 struct tcpcb *
2438 tcp_close(struct tcpcb *tp)
2439 {
2440 struct inpcb *inp = tptoinpcb(tp);
2441 struct socket *so = tptosocket(tp);
2442
2443 INP_WLOCK_ASSERT(inp);
2444
2445 #ifdef TCP_OFFLOAD
2446 if (tp->t_state == TCPS_LISTEN)
2447 tcp_offload_listen_stop(tp);
2448 #endif
2449 /*
2450 * This releases the TFO pending counter resource for TFO listen
2451 * sockets as well as passively-created TFO sockets that transition
2452 * from SYN_RECEIVED to CLOSED.
2453 */
2454 if (tp->t_tfo_pending) {
2455 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
2456 tp->t_tfo_pending = NULL;
2457 }
2458 #ifdef TCPHPTS
2459 tcp_hpts_remove(inp);
2460 #endif
2461 in_pcbdrop(inp);
2462 TCPSTAT_INC(tcps_closed);
2463 if (tp->t_state != TCPS_CLOSED)
2464 tcp_state_change(tp, TCPS_CLOSED);
2465 KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
2466 soisdisconnected(so);
2467 if (inp->inp_flags & INP_SOCKREF) {
2468 inp->inp_flags &= ~INP_SOCKREF;
2469 INP_WUNLOCK(inp);
2470 sorele(so);
2471 return (NULL);
2472 }
2473 return (tp);
2474 }
2475
2476 /*
2477 * Notify a tcp user of an asynchronous error;
2478 * store error as soft error, but wake up user
2479 * (for now, won't do anything until can select for soft error).
2480 *
2481 * Do not wake up user since there currently is no mechanism for
2482 * reporting soft errors (yet - a kqueue filter may be added).
2483 */
2484 static struct inpcb *
2485 tcp_notify(struct inpcb *inp, int error)
2486 {
2487 struct tcpcb *tp;
2488
2489 INP_WLOCK_ASSERT(inp);
2490
2491 tp = intotcpcb(inp);
2492 KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
2493
2494 /*
2495 * Ignore some errors if we are hooked up.
2496 * If connection hasn't completed, has retransmitted several times,
2497 * and receives a second error, give up now. This is better
2498 * than waiting a long time to establish a connection that
2499 * can never complete.
2500 */
2501 if (tp->t_state == TCPS_ESTABLISHED &&
2502 (error == EHOSTUNREACH || error == ENETUNREACH ||
2503 error == EHOSTDOWN)) {
2504 if (inp->inp_route.ro_nh) {
2505 NH_FREE(inp->inp_route.ro_nh);
2506 inp->inp_route.ro_nh = (struct nhop_object *)NULL;
2507 }
2508 return (inp);
2509 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
2510 tp->t_softerror) {
2511 tp = tcp_drop(tp, error);
2512 if (tp != NULL)
2513 return (inp);
2514 else
2515 return (NULL);
2516 } else {
2517 tp->t_softerror = error;
2518 return (inp);
2519 }
2520 #if 0
2521 wakeup( &so->so_timeo);
2522 sorwakeup(so);
2523 sowwakeup(so);
2524 #endif
2525 }
2526
2527 static int
2528 tcp_pcblist(SYSCTL_HANDLER_ARGS)
2529 {
2530 struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo,
2531 INPLOOKUP_RLOCKPCB);
2532 struct xinpgen xig;
2533 struct inpcb *inp;
2534 int error;
2535
2536 if (req->newptr != NULL)
2537 return (EPERM);
2538
2539 if (req->oldptr == NULL) {
2540 int n;
2541
2542 n = V_tcbinfo.ipi_count +
2543 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2544 n += imax(n / 8, 10);
2545 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb);
2546 return (0);
2547 }
2548
2549 if ((error = sysctl_wire_old_buffer(req, 0)) != 0)
2550 return (error);
2551
2552 bzero(&xig, sizeof(xig));
2553 xig.xig_len = sizeof xig;
2554 xig.xig_count = V_tcbinfo.ipi_count +
2555 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2556 xig.xig_gen = V_tcbinfo.ipi_gencnt;
2557 xig.xig_sogen = so_gencnt;
2558 error = SYSCTL_OUT(req, &xig, sizeof xig);
2559 if (error)
2560 return (error);
2561
2562 error = syncache_pcblist(req);
2563 if (error)
2564 return (error);
2565
2566 while ((inp = inp_next(&inpi)) != NULL) {
2567 if (inp->inp_gencnt <= xig.xig_gen &&
2568 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
2569 struct xtcpcb xt;
2570
2571 tcp_inptoxtp(inp, &xt);
2572 error = SYSCTL_OUT(req, &xt, sizeof xt);
2573 if (error) {
2574 INP_RUNLOCK(inp);
2575 break;
2576 } else
2577 continue;
2578 }
2579 }
2580
2581 if (!error) {
2582 /*
2583 * Give the user an updated idea of our state.
2584 * If the generation differs from what we told
2585 * her before, she knows that something happened
2586 * while we were processing this request, and it
2587 * might be necessary to retry.
2588 */
2589 xig.xig_gen = V_tcbinfo.ipi_gencnt;
2590 xig.xig_sogen = so_gencnt;
2591 xig.xig_count = V_tcbinfo.ipi_count +
2592 counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
2593 error = SYSCTL_OUT(req, &xig, sizeof xig);
2594 }
2595
2596 return (error);
2597 }
2598
2599 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
2600 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2601 NULL, 0, tcp_pcblist, "S,xtcpcb",
2602 "List of active TCP connections");
2603
2604 #ifdef INET
2605 static int
2606 tcp_getcred(SYSCTL_HANDLER_ARGS)
2607 {
2608 struct xucred xuc;
2609 struct sockaddr_in addrs[2];
2610 struct epoch_tracker et;
2611 struct inpcb *inp;
2612 int error;
2613
2614 error = priv_check(req->td, PRIV_NETINET_GETCRED);
2615 if (error)
2616 return (error);
2617 error = SYSCTL_IN(req, addrs, sizeof(addrs));
2618 if (error)
2619 return (error);
2620 NET_EPOCH_ENTER(et);
2621 inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
2622 addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL);
2623 NET_EPOCH_EXIT(et);
2624 if (inp != NULL) {
2625 if (error == 0)
2626 error = cr_canseeinpcb(req->td->td_ucred, inp);
2627 if (error == 0)
2628 cru2x(inp->inp_cred, &xuc);
2629 INP_RUNLOCK(inp);
2630 } else
2631 error = ENOENT;
2632 if (error == 0)
2633 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
2634 return (error);
2635 }
2636
2637 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
2638 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT,
2639 0, 0, tcp_getcred, "S,xucred",
2640 "Get the xucred of a TCP connection");
2641 #endif /* INET */
2642
2643 #ifdef INET6
2644 static int
2645 tcp6_getcred(SYSCTL_HANDLER_ARGS)
2646 {
2647 struct epoch_tracker et;
2648 struct xucred xuc;
2649 struct sockaddr_in6 addrs[2];
2650 struct inpcb *inp;
2651 int error;
2652 #ifdef INET
2653 int mapped = 0;
2654 #endif
2655
2656 error = priv_check(req->td, PRIV_NETINET_GETCRED);
2657 if (error)
2658 return (error);
2659 error = SYSCTL_IN(req, addrs, sizeof(addrs));
2660 if (error)
2661 return (error);
2662 if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
2663 (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
2664 return (error);
2665 }
2666 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
2667 #ifdef INET
2668 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
2669 mapped = 1;
2670 else
2671 #endif
2672 return (EINVAL);
2673 }
2674
2675 NET_EPOCH_ENTER(et);
2676 #ifdef INET
2677 if (mapped == 1)
2678 inp = in_pcblookup(&V_tcbinfo,
2679 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
2680 addrs[1].sin6_port,
2681 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
2682 addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL);
2683 else
2684 #endif
2685 inp = in6_pcblookup(&V_tcbinfo,
2686 &addrs[1].sin6_addr, addrs[1].sin6_port,
2687 &addrs[0].sin6_addr, addrs[0].sin6_port,
2688 INPLOOKUP_RLOCKPCB, NULL);
2689 NET_EPOCH_EXIT(et);
2690 if (inp != NULL) {
2691 if (error == 0)
2692 error = cr_canseeinpcb(req->td->td_ucred, inp);
2693 if (error == 0)
2694 cru2x(inp->inp_cred, &xuc);
2695 INP_RUNLOCK(inp);
2696 } else
2697 error = ENOENT;
2698 if (error == 0)
2699 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
2700 return (error);
2701 }
2702
2703 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
2704 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT,
2705 0, 0, tcp6_getcred, "S,xucred",
2706 "Get the xucred of a TCP6 connection");
2707 #endif /* INET6 */
2708
2709 #ifdef INET
2710 /* Path MTU to try next when a fragmentation-needed message is received. */
2711 static inline int
2712 tcp_next_pmtu(const struct icmp *icp, const struct ip *ip)
2713 {
2714 int mtu = ntohs(icp->icmp_nextmtu);
2715
2716 /* If no alternative MTU was proposed, try the next smaller one. */
2717 if (!mtu)
2718 mtu = ip_next_mtu(ntohs(ip->ip_len), 1);
2719 if (mtu < V_tcp_minmss + sizeof(struct tcpiphdr))
2720 mtu = V_tcp_minmss + sizeof(struct tcpiphdr);
2721
2722 return (mtu);
2723 }
2724
2725 static void
2726 tcp_ctlinput_with_port(struct icmp *icp, uint16_t port)
2727 {
2728 struct ip *ip;
2729 struct tcphdr *th;
2730 struct inpcb *inp;
2731 struct tcpcb *tp;
2732 struct inpcb *(*notify)(struct inpcb *, int);
2733 struct in_conninfo inc;
2734 tcp_seq icmp_tcp_seq;
2735 int errno, mtu;
2736
2737 errno = icmp_errmap(icp);
2738 switch (errno) {
2739 case 0:
2740 return;
2741 case EMSGSIZE:
2742 notify = tcp_mtudisc_notify;
2743 break;
2744 case ECONNREFUSED:
2745 if (V_icmp_may_rst)
2746 notify = tcp_drop_syn_sent;
2747 else
2748 notify = tcp_notify;
2749 break;
2750 case EHOSTUNREACH:
2751 if (V_icmp_may_rst && icp->icmp_type == ICMP_TIMXCEED)
2752 notify = tcp_drop_syn_sent;
2753 else
2754 notify = tcp_notify;
2755 break;
2756 default:
2757 notify = tcp_notify;
2758 }
2759
2760 ip = &icp->icmp_ip;
2761 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2762 icmp_tcp_seq = th->th_seq;
2763 inp = in_pcblookup(&V_tcbinfo, ip->ip_dst, th->th_dport, ip->ip_src,
2764 th->th_sport, INPLOOKUP_WLOCKPCB, NULL);
2765 if (inp != NULL) {
2766 tp = intotcpcb(inp);
2767 #ifdef TCP_OFFLOAD
2768 if (tp->t_flags & TF_TOE && errno == EMSGSIZE) {
2769 /*
2770 * MTU discovery for offloaded connections. Let
2771 * the TOE driver verify seq# and process it.
2772 */
2773 mtu = tcp_next_pmtu(icp, ip);
2774 tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu);
2775 goto out;
2776 }
2777 #endif
2778 if (tp->t_port != port)
2779 goto out;
2780 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
2781 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
2782 if (errno == EMSGSIZE) {
2783 /*
2784 * MTU discovery: we got a needfrag and
2785 * will potentially try a lower MTU.
2786 */
2787 mtu = tcp_next_pmtu(icp, ip);
2788
2789 /*
2790 * Only process the offered MTU if it
2791 * is smaller than the current one.
2792 */
2793 if (mtu < tp->t_maxseg +
2794 sizeof(struct tcpiphdr)) {
2795 bzero(&inc, sizeof(inc));
2796 inc.inc_faddr = ip->ip_dst;
2797 inc.inc_fibnum =
2798 inp->inp_inc.inc_fibnum;
2799 tcp_hc_updatemtu(&inc, mtu);
2800 inp = tcp_mtudisc(inp, mtu);
2801 }
2802 } else
2803 inp = (*notify)(inp, errno);
2804 }
2805 } else {
2806 bzero(&inc, sizeof(inc));
2807 inc.inc_fport = th->th_dport;
2808 inc.inc_lport = th->th_sport;
2809 inc.inc_faddr = ip->ip_dst;
2810 inc.inc_laddr = ip->ip_src;
2811 syncache_unreach(&inc, icmp_tcp_seq, port);
2812 }
2813 out:
2814 if (inp != NULL)
2815 INP_WUNLOCK(inp);
2816 }
2817
2818 static void
2819 tcp_ctlinput(struct icmp *icmp)
2820 {
2821 tcp_ctlinput_with_port(icmp, htons(0));
2822 }
2823
2824 static void
2825 tcp_ctlinput_viaudp(udp_tun_icmp_param_t param)
2826 {
2827 /* Its a tunneled TCP over UDP icmp */
2828 struct icmp *icmp = param.icmp;
2829 struct ip *outer_ip, *inner_ip;
2830 struct udphdr *udp;
2831 struct tcphdr *th, ttemp;
2832 int i_hlen, o_len;
2833 uint16_t port;
2834
2835 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
2836 inner_ip = &icmp->icmp_ip;
2837 i_hlen = inner_ip->ip_hl << 2;
2838 o_len = ntohs(outer_ip->ip_len);
2839 if (o_len <
2840 (sizeof(struct ip) + 8 + i_hlen + sizeof(struct udphdr) + offsetof(struct tcphdr, th_ack))) {
2841 /* Not enough data present */
2842 return;
2843 }
2844 /* Ok lets strip out the inner udphdr header by copying up on top of it the tcp hdr */
2845 udp = (struct udphdr *)(((caddr_t)inner_ip) + i_hlen);
2846 if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) {
2847 return;
2848 }
2849 port = udp->uh_dport;
2850 th = (struct tcphdr *)(udp + 1);
2851 memcpy(&ttemp, th, sizeof(struct tcphdr));
2852 memcpy(udp, &ttemp, sizeof(struct tcphdr));
2853 /* Now adjust down the size of the outer IP header */
2854 o_len -= sizeof(struct udphdr);
2855 outer_ip->ip_len = htons(o_len);
2856 /* Now call in to the normal handling code */
2857 tcp_ctlinput_with_port(icmp, port);
2858 }
2859 #endif /* INET */
2860
2861 #ifdef INET6
2862 static inline int
2863 tcp6_next_pmtu(const struct icmp6_hdr *icmp6)
2864 {
2865 int mtu = ntohl(icmp6->icmp6_mtu);
2866
2867 /*
2868 * If no alternative MTU was proposed, or the proposed MTU was too
2869 * small, set to the min.
2870 */
2871 if (mtu < IPV6_MMTU)
2872 mtu = IPV6_MMTU - 8; /* XXXNP: what is the adjustment for? */
2873 return (mtu);
2874 }
2875
2876 static void
2877 tcp6_ctlinput_with_port(struct ip6ctlparam *ip6cp, uint16_t port)
2878 {
2879 struct in6_addr *dst;
2880 struct inpcb *(*notify)(struct inpcb *, int);
2881 struct ip6_hdr *ip6;
2882 struct mbuf *m;
2883 struct inpcb *inp;
2884 struct tcpcb *tp;
2885 struct icmp6_hdr *icmp6;
2886 struct in_conninfo inc;
2887 struct tcp_ports {
2888 uint16_t th_sport;
2889 uint16_t th_dport;
2890 } t_ports;
2891 tcp_seq icmp_tcp_seq;
2892 unsigned int mtu;
2893 unsigned int off;
2894 int errno;
2895
2896 icmp6 = ip6cp->ip6c_icmp6;
2897 m = ip6cp->ip6c_m;
2898 ip6 = ip6cp->ip6c_ip6;
2899 off = ip6cp->ip6c_off;
2900 dst = &ip6cp->ip6c_finaldst->sin6_addr;
2901
2902 errno = icmp6_errmap(icmp6);
2903 switch (errno) {
2904 case 0:
2905 return;
2906 case EMSGSIZE:
2907 notify = tcp_mtudisc_notify;
2908 break;
2909 case ECONNREFUSED:
2910 if (V_icmp_may_rst)
2911 notify = tcp_drop_syn_sent;
2912 else
2913 notify = tcp_notify;
2914 break;
2915 case EHOSTUNREACH:
2916 /*
2917 * There are only four ICMPs that may reset connection:
2918 * - administratively prohibited
2919 * - port unreachable
2920 * - time exceeded in transit
2921 * - unknown next header
2922 */
2923 if (V_icmp_may_rst &&
2924 ((icmp6->icmp6_type == ICMP6_DST_UNREACH &&
2925 (icmp6->icmp6_code == ICMP6_DST_UNREACH_ADMIN ||
2926 icmp6->icmp6_code == ICMP6_DST_UNREACH_NOPORT)) ||
2927 (icmp6->icmp6_type == ICMP6_TIME_EXCEEDED &&
2928 icmp6->icmp6_code == ICMP6_TIME_EXCEED_TRANSIT) ||
2929 (icmp6->icmp6_type == ICMP6_PARAM_PROB &&
2930 icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER)))
2931 notify = tcp_drop_syn_sent;
2932 else
2933 notify = tcp_notify;
2934 break;
2935 default:
2936 notify = tcp_notify;
2937 }
2938
2939 /* Check if we can safely get the ports from the tcp hdr */
2940 if (m == NULL ||
2941 (m->m_pkthdr.len <
2942 (int32_t) (off + sizeof(struct tcp_ports)))) {
2943 return;
2944 }
2945 bzero(&t_ports, sizeof(struct tcp_ports));
2946 m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
2947 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_dst, t_ports.th_dport,
2948 &ip6->ip6_src, t_ports.th_sport, INPLOOKUP_WLOCKPCB, NULL);
2949 off += sizeof(struct tcp_ports);
2950 if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
2951 goto out;
2952 }
2953 m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
2954 if (inp != NULL) {
2955 tp = intotcpcb(inp);
2956 #ifdef TCP_OFFLOAD
2957 if (tp->t_flags & TF_TOE && errno == EMSGSIZE) {
2958 /* MTU discovery for offloaded connections. */
2959 mtu = tcp6_next_pmtu(icmp6);
2960 tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu);
2961 goto out;
2962 }
2963 #endif
2964 if (tp->t_port != port)
2965 goto out;
2966 if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
2967 SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
2968 if (errno == EMSGSIZE) {
2969 /*
2970 * MTU discovery:
2971 * If we got a needfrag set the MTU
2972 * in the route to the suggested new
2973 * value (if given) and then notify.
2974 */
2975 mtu = tcp6_next_pmtu(icmp6);
2976
2977 bzero(&inc, sizeof(inc));
2978 inc.inc_fibnum = M_GETFIB(m);
2979 inc.inc_flags |= INC_ISIPV6;
2980 inc.inc6_faddr = *dst;
2981 if (in6_setscope(&inc.inc6_faddr,
2982 m->m_pkthdr.rcvif, NULL))
2983 goto out;
2984 /*
2985 * Only process the offered MTU if it
2986 * is smaller than the current one.
2987 */
2988 if (mtu < tp->t_maxseg +
2989 sizeof (struct tcphdr) +
2990 sizeof (struct ip6_hdr)) {
2991 tcp_hc_updatemtu(&inc, mtu);
2992 tcp_mtudisc(inp, mtu);
2993 ICMP6STAT_INC(icp6s_pmtuchg);
2994 }
2995 } else
2996 inp = (*notify)(inp, errno);
2997 }
2998 } else {
2999 bzero(&inc, sizeof(inc));
3000 inc.inc_fibnum = M_GETFIB(m);
3001 inc.inc_flags |= INC_ISIPV6;
3002 inc.inc_fport = t_ports.th_dport;
3003 inc.inc_lport = t_ports.th_sport;
3004 inc.inc6_faddr = *dst;
3005 inc.inc6_laddr = ip6->ip6_src;
3006 syncache_unreach(&inc, icmp_tcp_seq, port);
3007 }
3008 out:
3009 if (inp != NULL)
3010 INP_WUNLOCK(inp);
3011 }
3012
3013 static void
3014 tcp6_ctlinput(struct ip6ctlparam *ctl)
3015 {
3016 tcp6_ctlinput_with_port(ctl, htons(0));
3017 }
3018
3019 static void
3020 tcp6_ctlinput_viaudp(udp_tun_icmp_param_t param)
3021 {
3022 struct ip6ctlparam *ip6cp = param.ip6cp;
3023 struct mbuf *m;
3024 struct udphdr *udp;
3025 uint16_t port;
3026
3027 m = m_pulldown(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(struct udphdr), NULL);
3028 if (m == NULL) {
3029 return;
3030 }
3031 udp = mtod(m, struct udphdr *);
3032 if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) {
3033 return;
3034 }
3035 port = udp->uh_dport;
3036 m_adj(m, sizeof(struct udphdr));
3037 if ((m->m_flags & M_PKTHDR) == 0) {
3038 ip6cp->ip6c_m->m_pkthdr.len -= sizeof(struct udphdr);
3039 }
3040 /* Now call in to the normal handling code */
3041 tcp6_ctlinput_with_port(ip6cp, port);
3042 }
3043
3044 #endif /* INET6 */
3045
3046 static uint32_t
3047 tcp_keyed_hash(struct in_conninfo *inc, u_char *key, u_int len)
3048 {
3049 SIPHASH_CTX ctx;
3050 uint32_t hash[2];
3051
3052 KASSERT(len >= SIPHASH_KEY_LENGTH,
3053 ("%s: keylen %u too short ", __func__, len));
3054 SipHash24_Init(&ctx);
3055 SipHash_SetKey(&ctx, (uint8_t *)key);
3056 SipHash_Update(&ctx, &inc->inc_fport, sizeof(uint16_t));
3057 SipHash_Update(&ctx, &inc->inc_lport, sizeof(uint16_t));
3058 switch (inc->inc_flags & INC_ISIPV6) {
3059 #ifdef INET
3060 case 0:
3061 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(struct in_addr));
3062 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(struct in_addr));
3063 break;
3064 #endif
3065 #ifdef INET6
3066 case INC_ISIPV6:
3067 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(struct in6_addr));
3068 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(struct in6_addr));
3069 break;
3070 #endif
3071 }
3072 SipHash_Final((uint8_t *)hash, &ctx);
3073
3074 return (hash[0] ^ hash[1]);
3075 }
3076
3077 uint32_t
3078 tcp_new_ts_offset(struct in_conninfo *inc)
3079 {
3080 struct in_conninfo inc_store, *local_inc;
3081
3082 if (!V_tcp_ts_offset_per_conn) {
3083 memcpy(&inc_store, inc, sizeof(struct in_conninfo));
3084 inc_store.inc_lport = 0;
3085 inc_store.inc_fport = 0;
3086 local_inc = &inc_store;
3087 } else {
3088 local_inc = inc;
3089 }
3090 return (tcp_keyed_hash(local_inc, V_ts_offset_secret,
3091 sizeof(V_ts_offset_secret)));
3092 }
3093
3094 /*
3095 * Following is where TCP initial sequence number generation occurs.
3096 *
3097 * There are two places where we must use initial sequence numbers:
3098 * 1. In SYN-ACK packets.
3099 * 2. In SYN packets.
3100 *
3101 * All ISNs for SYN-ACK packets are generated by the syncache. See
3102 * tcp_syncache.c for details.
3103 *
3104 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
3105 * depends on this property. In addition, these ISNs should be
3106 * unguessable so as to prevent connection hijacking. To satisfy
3107 * the requirements of this situation, the algorithm outlined in
3108 * RFC 1948 is used, with only small modifications.
3109 *
3110 * Implementation details:
3111 *
3112 * Time is based off the system timer, and is corrected so that it
3113 * increases by one megabyte per second. This allows for proper
3114 * recycling on high speed LANs while still leaving over an hour
3115 * before rollover.
3116 *
3117 * As reading the *exact* system time is too expensive to be done
3118 * whenever setting up a TCP connection, we increment the time
3119 * offset in two ways. First, a small random positive increment
3120 * is added to isn_offset for each connection that is set up.
3121 * Second, the function tcp_isn_tick fires once per clock tick
3122 * and increments isn_offset as necessary so that sequence numbers
3123 * are incremented at approximately ISN_BYTES_PER_SECOND. The
3124 * random positive increments serve only to ensure that the same
3125 * exact sequence number is never sent out twice (as could otherwise
3126 * happen when a port is recycled in less than the system tick
3127 * interval.)
3128 *
3129 * net.inet.tcp.isn_reseed_interval controls the number of seconds
3130 * between seeding of isn_secret. This is normally set to zero,
3131 * as reseeding should not be necessary.
3132 *
3133 * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
3134 * isn_offset_old, and isn_ctx is performed using the ISN lock. In
3135 * general, this means holding an exclusive (write) lock.
3136 */
3137
3138 #define ISN_BYTES_PER_SECOND 1048576
3139 #define ISN_STATIC_INCREMENT 4096
3140 #define ISN_RANDOM_INCREMENT (4096 - 1)
3141 #define ISN_SECRET_LENGTH SIPHASH_KEY_LENGTH
3142
3143 VNET_DEFINE_STATIC(u_char, isn_secret[ISN_SECRET_LENGTH]);
3144 VNET_DEFINE_STATIC(int, isn_last);
3145 VNET_DEFINE_STATIC(int, isn_last_reseed);
3146 VNET_DEFINE_STATIC(u_int32_t, isn_offset);
3147 VNET_DEFINE_STATIC(u_int32_t, isn_offset_old);
3148
3149 #define V_isn_secret VNET(isn_secret)
3150 #define V_isn_last VNET(isn_last)
3151 #define V_isn_last_reseed VNET(isn_last_reseed)
3152 #define V_isn_offset VNET(isn_offset)
3153 #define V_isn_offset_old VNET(isn_offset_old)
3154
3155 tcp_seq
3156 tcp_new_isn(struct in_conninfo *inc)
3157 {
3158 tcp_seq new_isn;
3159 u_int32_t projected_offset;
3160
3161 ISN_LOCK();
3162 /* Seed if this is the first use, reseed if requested. */
3163 if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
3164 (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
3165 < (u_int)ticks))) {
3166 arc4rand(&V_isn_secret, sizeof(V_isn_secret), 0);
3167 V_isn_last_reseed = ticks;
3168 }
3169
3170 /* Compute the hash and return the ISN. */
3171 new_isn = (tcp_seq)tcp_keyed_hash(inc, V_isn_secret,
3172 sizeof(V_isn_secret));
3173 V_isn_offset += ISN_STATIC_INCREMENT +
3174 (arc4random() & ISN_RANDOM_INCREMENT);
3175 if (ticks != V_isn_last) {
3176 projected_offset = V_isn_offset_old +
3177 ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last);
3178 if (SEQ_GT(projected_offset, V_isn_offset))
3179 V_isn_offset = projected_offset;
3180 V_isn_offset_old = V_isn_offset;
3181 V_isn_last = ticks;
3182 }
3183 new_isn += V_isn_offset;
3184 ISN_UNLOCK();
3185 return (new_isn);
3186 }
3187
3188 /*
3189 * When a specific ICMP unreachable message is received and the
3190 * connection state is SYN-SENT, drop the connection. This behavior
3191 * is controlled by the icmp_may_rst sysctl.
3192 */
3193 static struct inpcb *
3194 tcp_drop_syn_sent(struct inpcb *inp, int errno)
3195 {
3196 struct tcpcb *tp;
3197
3198 NET_EPOCH_ASSERT();
3199 INP_WLOCK_ASSERT(inp);
3200
3201 tp = intotcpcb(inp);
3202 if (tp->t_state != TCPS_SYN_SENT)
3203 return (inp);
3204
3205 if (IS_FASTOPEN(tp->t_flags))
3206 tcp_fastopen_disable_path(tp);
3207
3208 tp = tcp_drop(tp, errno);
3209 if (tp != NULL)
3210 return (inp);
3211 else
3212 return (NULL);
3213 }
3214
3215 /*
3216 * When `need fragmentation' ICMP is received, update our idea of the MSS
3217 * based on the new value. Also nudge TCP to send something, since we
3218 * know the packet we just sent was dropped.
3219 * This duplicates some code in the tcp_mss() function in tcp_input.c.
3220 */
3221 static struct inpcb *
3222 tcp_mtudisc_notify(struct inpcb *inp, int error)
3223 {
3224
3225 return (tcp_mtudisc(inp, -1));
3226 }
3227
3228 static struct inpcb *
3229 tcp_mtudisc(struct inpcb *inp, int mtuoffer)
3230 {
3231 struct tcpcb *tp;
3232 struct socket *so;
3233
3234 INP_WLOCK_ASSERT(inp);
3235
3236 tp = intotcpcb(inp);
3237 KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
3238
3239 tcp_mss_update(tp, -1, mtuoffer, NULL, NULL);
3240
3241 so = inp->inp_socket;
3242 SOCKBUF_LOCK(&so->so_snd);
3243 /* If the mss is larger than the socket buffer, decrease the mss. */
3244 if (so->so_snd.sb_hiwat < tp->t_maxseg)
3245 tp->t_maxseg = so->so_snd.sb_hiwat;
3246 SOCKBUF_UNLOCK(&so->so_snd);
3247
3248 TCPSTAT_INC(tcps_mturesent);
3249 tp->t_rtttime = 0;
3250 tp->snd_nxt = tp->snd_una;
3251 tcp_free_sackholes(tp);
3252 tp->snd_recover = tp->snd_max;
3253 if (tp->t_flags & TF_SACK_PERMIT)
3254 EXIT_FASTRECOVERY(tp->t_flags);
3255 if (tp->t_fb->tfb_tcp_mtu_chg != NULL) {
3256 /*
3257 * Conceptually the snd_nxt setting
3258 * and freeing sack holes should
3259 * be done by the default stacks
3260 * own tfb_tcp_mtu_chg().
3261 */
3262 tp->t_fb->tfb_tcp_mtu_chg(tp);
3263 }
3264 if (tcp_output(tp) < 0)
3265 return (NULL);
3266 else
3267 return (inp);
3268 }
3269
3270 #ifdef INET
3271 /*
3272 * Look-up the routing entry to the peer of this inpcb. If no route
3273 * is found and it cannot be allocated, then return 0. This routine
3274 * is called by TCP routines that access the rmx structure and by
3275 * tcp_mss_update to get the peer/interface MTU.
3276 */
3277 uint32_t
3278 tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap)
3279 {
3280 struct nhop_object *nh;
3281 struct ifnet *ifp;
3282 uint32_t maxmtu = 0;
3283
3284 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
3285
3286 if (inc->inc_faddr.s_addr != INADDR_ANY) {
3287 nh = fib4_lookup(inc->inc_fibnum, inc->inc_faddr, 0, NHR_NONE, 0);
3288 if (nh == NULL)
3289 return (0);
3290
3291 ifp = nh->nh_ifp;
3292 maxmtu = nh->nh_mtu;
3293
3294 /* Report additional interface capabilities. */
3295 if (cap != NULL) {
3296 if (ifp->if_capenable & IFCAP_TSO4 &&
3297 ifp->if_hwassist & CSUM_TSO) {
3298 cap->ifcap |= CSUM_TSO;
3299 cap->tsomax = ifp->if_hw_tsomax;
3300 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
3301 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
3302 }
3303 }
3304 }
3305 return (maxmtu);
3306 }
3307 #endif /* INET */
3308
3309 #ifdef INET6
3310 uint32_t
3311 tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap)
3312 {
3313 struct nhop_object *nh;
3314 struct in6_addr dst6;
3315 uint32_t scopeid;
3316 struct ifnet *ifp;
3317 uint32_t maxmtu = 0;
3318
3319 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
3320
3321 if (inc->inc_flags & INC_IPV6MINMTU)
3322 return (IPV6_MMTU);
3323
3324 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
3325 in6_splitscope(&inc->inc6_faddr, &dst6, &scopeid);
3326 nh = fib6_lookup(inc->inc_fibnum, &dst6, scopeid, NHR_NONE, 0);
3327 if (nh == NULL)
3328 return (0);
3329
3330 ifp = nh->nh_ifp;
3331 maxmtu = nh->nh_mtu;
3332
3333 /* Report additional interface capabilities. */
3334 if (cap != NULL) {
3335 if (ifp->if_capenable & IFCAP_TSO6 &&
3336 ifp->if_hwassist & CSUM_TSO) {
3337 cap->ifcap |= CSUM_TSO;
3338 cap->tsomax = ifp->if_hw_tsomax;
3339 cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
3340 cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
3341 }
3342 }
3343 }
3344
3345 return (maxmtu);
3346 }
3347
3348 /*
3349 * Handle setsockopt(IPV6_USE_MIN_MTU) by a TCP stack.
3350 *
3351 * XXXGL: we are updating inpcb here with INC_IPV6MINMTU flag.
3352 * The right place to do that is ip6_setpktopt() that has just been
3353 * executed. By the way it just filled ip6po_minmtu for us.
3354 */
3355 void
3356 tcp6_use_min_mtu(struct tcpcb *tp)
3357 {
3358 struct inpcb *inp = tptoinpcb(tp);
3359
3360 INP_WLOCK_ASSERT(inp);
3361 /*
3362 * In case of the IPV6_USE_MIN_MTU socket
3363 * option, the INC_IPV6MINMTU flag to announce
3364 * a corresponding MSS during the initial
3365 * handshake. If the TCP connection is not in
3366 * the front states, just reduce the MSS being
3367 * used. This avoids the sending of TCP
3368 * segments which will be fragmented at the
3369 * IPv6 layer.
3370 */
3371 inp->inp_inc.inc_flags |= INC_IPV6MINMTU;
3372 if ((tp->t_state >= TCPS_SYN_SENT) &&
3373 (inp->inp_inc.inc_flags & INC_ISIPV6)) {
3374 struct ip6_pktopts *opt;
3375
3376 opt = inp->in6p_outputopts;
3377 if (opt != NULL && opt->ip6po_minmtu == IP6PO_MINMTU_ALL &&
3378 tp->t_maxseg > TCP6_MSS)
3379 tp->t_maxseg = TCP6_MSS;
3380 }
3381 }
3382 #endif /* INET6 */
3383
3384 /*
3385 * Calculate effective SMSS per RFC5681 definition for a given TCP
3386 * connection at its current state, taking into account SACK and etc.
3387 */
3388 u_int
3389 tcp_maxseg(const struct tcpcb *tp)
3390 {
3391 u_int optlen;
3392
3393 if (tp->t_flags & TF_NOOPT)
3394 return (tp->t_maxseg);
3395
3396 /*
3397 * Here we have a simplified code from tcp_addoptions(),
3398 * without a proper loop, and having most of paddings hardcoded.
3399 * We might make mistakes with padding here in some edge cases,
3400 * but this is harmless, since result of tcp_maxseg() is used
3401 * only in cwnd and ssthresh estimations.
3402 */
3403 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3404 if (tp->t_flags & TF_RCVD_TSTMP)
3405 optlen = TCPOLEN_TSTAMP_APPA;
3406 else
3407 optlen = 0;
3408 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3409 if (tp->t_flags & TF_SIGNATURE)
3410 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE);
3411 #endif
3412 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0) {
3413 optlen += TCPOLEN_SACKHDR;
3414 optlen += tp->rcv_numsacks * TCPOLEN_SACK;
3415 optlen = PADTCPOLEN(optlen);
3416 }
3417 } else {
3418 if (tp->t_flags & TF_REQ_TSTMP)
3419 optlen = TCPOLEN_TSTAMP_APPA;
3420 else
3421 optlen = PADTCPOLEN(TCPOLEN_MAXSEG);
3422 if (tp->t_flags & TF_REQ_SCALE)
3423 optlen += PADTCPOLEN(TCPOLEN_WINDOW);
3424 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3425 if (tp->t_flags & TF_SIGNATURE)
3426 optlen += PADTCPOLEN(TCPOLEN_SIGNATURE);
3427 #endif
3428 if (tp->t_flags & TF_SACK_PERMIT)
3429 optlen += PADTCPOLEN(TCPOLEN_SACK_PERMITTED);
3430 }
3431 #undef PAD
3432 optlen = min(optlen, TCP_MAXOLEN);
3433 return (tp->t_maxseg - optlen);
3434 }
3435
3436
3437 u_int
3438 tcp_fixed_maxseg(const struct tcpcb *tp)
3439 {
3440 int optlen;
3441
3442 if (tp->t_flags & TF_NOOPT)
3443 return (tp->t_maxseg);
3444
3445 /*
3446 * Here we have a simplified code from tcp_addoptions(),
3447 * without a proper loop, and having most of paddings hardcoded.
3448 * We only consider fixed options that we would send every
3449 * time I.e. SACK is not considered. This is important
3450 * for cc modules to figure out what the modulo of the
3451 * cwnd should be.
3452 */
3453 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4)
3454 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3455 if (tp->t_flags & TF_RCVD_TSTMP)
3456 optlen = TCPOLEN_TSTAMP_APPA;
3457 else
3458 optlen = 0;
3459 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3460 if (tp->t_flags & TF_SIGNATURE)
3461 optlen += PAD(TCPOLEN_SIGNATURE);
3462 #endif
3463 } else {
3464 if (tp->t_flags & TF_REQ_TSTMP)
3465 optlen = TCPOLEN_TSTAMP_APPA;
3466 else
3467 optlen = PAD(TCPOLEN_MAXSEG);
3468 if (tp->t_flags & TF_REQ_SCALE)
3469 optlen += PAD(TCPOLEN_WINDOW);
3470 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
3471 if (tp->t_flags & TF_SIGNATURE)
3472 optlen += PAD(TCPOLEN_SIGNATURE);
3473 #endif
3474 if (tp->t_flags & TF_SACK_PERMIT)
3475 optlen += PAD(TCPOLEN_SACK_PERMITTED);
3476 }
3477 #undef PAD
3478 optlen = min(optlen, TCP_MAXOLEN);
3479 return (tp->t_maxseg - optlen);
3480 }
3481
3482
3483
3484 static int
3485 sysctl_drop(SYSCTL_HANDLER_ARGS)
3486 {
3487 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
3488 struct sockaddr_storage addrs[2];
3489 struct inpcb *inp;
3490 struct tcpcb *tp;
3491 #ifdef INET
3492 struct sockaddr_in *fin = NULL, *lin = NULL;
3493 #endif
3494 struct epoch_tracker et;
3495 #ifdef INET6
3496 struct sockaddr_in6 *fin6, *lin6;
3497 #endif
3498 int error;
3499
3500 inp = NULL;
3501 #ifdef INET6
3502 fin6 = lin6 = NULL;
3503 #endif
3504 error = 0;
3505
3506 if (req->oldptr != NULL || req->oldlen != 0)
3507 return (EINVAL);
3508 if (req->newptr == NULL)
3509 return (EPERM);
3510 if (req->newlen < sizeof(addrs))
3511 return (ENOMEM);
3512 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
3513 if (error)
3514 return (error);
3515
3516 switch (addrs[0].ss_family) {
3517 #ifdef INET6
3518 case AF_INET6:
3519 fin6 = (struct sockaddr_in6 *)&addrs[0];
3520 lin6 = (struct sockaddr_in6 *)&addrs[1];
3521 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
3522 lin6->sin6_len != sizeof(struct sockaddr_in6))
3523 return (EINVAL);
3524 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
3525 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
3526 return (EINVAL);
3527 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
3528 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
3529 #ifdef INET
3530 fin = (struct sockaddr_in *)&addrs[0];
3531 lin = (struct sockaddr_in *)&addrs[1];
3532 #endif
3533 break;
3534 }
3535 error = sa6_embedscope(fin6, V_ip6_use_defzone);
3536 if (error)
3537 return (error);
3538 error = sa6_embedscope(lin6, V_ip6_use_defzone);
3539 if (error)
3540 return (error);
3541 break;
3542 #endif
3543 #ifdef INET
3544 case AF_INET:
3545 fin = (struct sockaddr_in *)&addrs[0];
3546 lin = (struct sockaddr_in *)&addrs[1];
3547 if (fin->sin_len != sizeof(struct sockaddr_in) ||
3548 lin->sin_len != sizeof(struct sockaddr_in))
3549 return (EINVAL);
3550 break;
3551 #endif
3552 default:
3553 return (EINVAL);
3554 }
3555 NET_EPOCH_ENTER(et);
3556 switch (addrs[0].ss_family) {
3557 #ifdef INET6
3558 case AF_INET6:
3559 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
3560 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
3561 INPLOOKUP_WLOCKPCB, NULL);
3562 break;
3563 #endif
3564 #ifdef INET
3565 case AF_INET:
3566 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
3567 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
3568 break;
3569 #endif
3570 }
3571 if (inp != NULL) {
3572 if (!SOLISTENING(inp->inp_socket)) {
3573 tp = intotcpcb(inp);
3574 tp = tcp_drop(tp, ECONNABORTED);
3575 if (tp != NULL)
3576 INP_WUNLOCK(inp);
3577 } else
3578 INP_WUNLOCK(inp);
3579 } else
3580 error = ESRCH;
3581 NET_EPOCH_EXIT(et);
3582 return (error);
3583 }
3584
3585 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
3586 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3587 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_drop, "",
3588 "Drop TCP connection");
3589
3590 static int
3591 tcp_sysctl_setsockopt(SYSCTL_HANDLER_ARGS)
3592 {
3593 return (sysctl_setsockopt(oidp, arg1, arg2, req, &V_tcbinfo,
3594 &tcp_ctloutput_set));
3595 }
3596
3597 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, setsockopt,
3598 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3599 CTLFLAG_MPSAFE, NULL, 0, tcp_sysctl_setsockopt, "",
3600 "Set socket option for TCP endpoint");
3601
3602 #ifdef KERN_TLS
3603 static int
3604 sysctl_switch_tls(SYSCTL_HANDLER_ARGS)
3605 {
3606 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
3607 struct sockaddr_storage addrs[2];
3608 struct inpcb *inp;
3609 #ifdef INET
3610 struct sockaddr_in *fin = NULL, *lin = NULL;
3611 #endif
3612 struct epoch_tracker et;
3613 #ifdef INET6
3614 struct sockaddr_in6 *fin6, *lin6;
3615 #endif
3616 int error;
3617
3618 inp = NULL;
3619 #ifdef INET6
3620 fin6 = lin6 = NULL;
3621 #endif
3622 error = 0;
3623
3624 if (req->oldptr != NULL || req->oldlen != 0)
3625 return (EINVAL);
3626 if (req->newptr == NULL)
3627 return (EPERM);
3628 if (req->newlen < sizeof(addrs))
3629 return (ENOMEM);
3630 error = SYSCTL_IN(req, &addrs, sizeof(addrs));
3631 if (error)
3632 return (error);
3633
3634 switch (addrs[0].ss_family) {
3635 #ifdef INET6
3636 case AF_INET6:
3637 fin6 = (struct sockaddr_in6 *)&addrs[0];
3638 lin6 = (struct sockaddr_in6 *)&addrs[1];
3639 if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
3640 lin6->sin6_len != sizeof(struct sockaddr_in6))
3641 return (EINVAL);
3642 if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
3643 if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
3644 return (EINVAL);
3645 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
3646 in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
3647 #ifdef INET
3648 fin = (struct sockaddr_in *)&addrs[0];
3649 lin = (struct sockaddr_in *)&addrs[1];
3650 #endif
3651 break;
3652 }
3653 error = sa6_embedscope(fin6, V_ip6_use_defzone);
3654 if (error)
3655 return (error);
3656 error = sa6_embedscope(lin6, V_ip6_use_defzone);
3657 if (error)
3658 return (error);
3659 break;
3660 #endif
3661 #ifdef INET
3662 case AF_INET:
3663 fin = (struct sockaddr_in *)&addrs[0];
3664 lin = (struct sockaddr_in *)&addrs[1];
3665 if (fin->sin_len != sizeof(struct sockaddr_in) ||
3666 lin->sin_len != sizeof(struct sockaddr_in))
3667 return (EINVAL);
3668 break;
3669 #endif
3670 default:
3671 return (EINVAL);
3672 }
3673 NET_EPOCH_ENTER(et);
3674 switch (addrs[0].ss_family) {
3675 #ifdef INET6
3676 case AF_INET6:
3677 inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
3678 fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
3679 INPLOOKUP_WLOCKPCB, NULL);
3680 break;
3681 #endif
3682 #ifdef INET
3683 case AF_INET:
3684 inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
3685 lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
3686 break;
3687 #endif
3688 }
3689 NET_EPOCH_EXIT(et);
3690 if (inp != NULL) {
3691 struct socket *so;
3692
3693 so = inp->inp_socket;
3694 soref(so);
3695 error = ktls_set_tx_mode(so,
3696 arg2 == 0 ? TCP_TLS_MODE_SW : TCP_TLS_MODE_IFNET);
3697 INP_WUNLOCK(inp);
3698 sorele(so);
3699 } else
3700 error = ESRCH;
3701 return (error);
3702 }
3703
3704 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_sw_tls,
3705 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3706 CTLFLAG_NEEDGIANT, NULL, 0, sysctl_switch_tls, "",
3707 "Switch TCP connection to SW TLS");
3708 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_ifnet_tls,
3709 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
3710 CTLFLAG_NEEDGIANT, NULL, 1, sysctl_switch_tls, "",
3711 "Switch TCP connection to ifnet TLS");
3712 #endif
3713
3714 /*
3715 * Generate a standardized TCP log line for use throughout the
3716 * tcp subsystem. Memory allocation is done with M_NOWAIT to
3717 * allow use in the interrupt context.
3718 *
3719 * NB: The caller MUST free(s, M_TCPLOG) the returned string.
3720 * NB: The function may return NULL if memory allocation failed.
3721 *
3722 * Due to header inclusion and ordering limitations the struct ip
3723 * and ip6_hdr pointers have to be passed as void pointers.
3724 */
3725 char *
3726 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr,
3727 const void *ip6hdr)
3728 {
3729
3730 /* Is logging enabled? */
3731 if (V_tcp_log_in_vain == 0)
3732 return (NULL);
3733
3734 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
3735 }
3736
3737 char *
3738 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr,
3739 const void *ip6hdr)
3740 {
3741
3742 /* Is logging enabled? */
3743 if (tcp_log_debug == 0)
3744 return (NULL);
3745
3746 return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
3747 }
3748
3749 static char *
3750 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr,
3751 const void *ip6hdr)
3752 {
3753 char *s, *sp;
3754 size_t size;
3755 #ifdef INET
3756 const struct ip *ip = (const struct ip *)ip4hdr;
3757 #endif
3758 #ifdef INET6
3759 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)ip6hdr;
3760 #endif /* INET6 */
3761
3762 /*
3763 * The log line looks like this:
3764 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
3765 */
3766 size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
3767 sizeof(PRINT_TH_FLAGS) + 1 +
3768 #ifdef INET6
3769 2 * INET6_ADDRSTRLEN;
3770 #else
3771 2 * INET_ADDRSTRLEN;
3772 #endif /* INET6 */
3773
3774 s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
3775 if (s == NULL)
3776 return (NULL);
3777
3778 strcat(s, "TCP: [");
3779 sp = s + strlen(s);
3780
3781 if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) {
3782 inet_ntoa_r(inc->inc_faddr, sp);
3783 sp = s + strlen(s);
3784 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
3785 sp = s + strlen(s);
3786 inet_ntoa_r(inc->inc_laddr, sp);
3787 sp = s + strlen(s);
3788 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
3789 #ifdef INET6
3790 } else if (inc) {
3791 ip6_sprintf(sp, &inc->inc6_faddr);
3792 sp = s + strlen(s);
3793 sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
3794 sp = s + strlen(s);
3795 ip6_sprintf(sp, &inc->inc6_laddr);
3796 sp = s + strlen(s);
3797 sprintf(sp, "]:%i", ntohs(inc->inc_lport));
3798 } else if (ip6 && th) {
3799 ip6_sprintf(sp, &ip6->ip6_src);
3800 sp = s + strlen(s);
3801 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
3802 sp = s + strlen(s);
3803 ip6_sprintf(sp, &ip6->ip6_dst);
3804 sp = s + strlen(s);
3805 sprintf(sp, "]:%i", ntohs(th->th_dport));
3806 #endif /* INET6 */
3807 #ifdef INET
3808 } else if (ip && th) {
3809 inet_ntoa_r(ip->ip_src, sp);
3810 sp = s + strlen(s);
3811 sprintf(sp, "]:%i to [", ntohs(th->th_sport));
3812 sp = s + strlen(s);
3813 inet_ntoa_r(ip->ip_dst, sp);
3814 sp = s + strlen(s);
3815 sprintf(sp, "]:%i", ntohs(th->th_dport));
3816 #endif /* INET */
3817 } else {
3818 free(s, M_TCPLOG);
3819 return (NULL);
3820 }
3821 sp = s + strlen(s);
3822 if (th)
3823 sprintf(sp, " tcpflags 0x%b", tcp_get_flags(th), PRINT_TH_FLAGS);
3824 if (*(s + size - 1) != '\0')
3825 panic("%s: string too long", __func__);
3826 return (s);
3827 }
3828
3829 /*
3830 * A subroutine which makes it easy to track TCP state changes with DTrace.
3831 * This function shouldn't be called for t_state initializations that don't
3832 * correspond to actual TCP state transitions.
3833 */
3834 void
3835 tcp_state_change(struct tcpcb *tp, int newstate)
3836 {
3837 #if defined(KDTRACE_HOOKS)
3838 int pstate = tp->t_state;
3839 #endif
3840
3841 TCPSTATES_DEC(tp->t_state);
3842 TCPSTATES_INC(newstate);
3843 tp->t_state = newstate;
3844 TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, pstate);
3845 }
3846
3847 /*
3848 * Create an external-format (``xtcpcb'') structure using the information in
3849 * the kernel-format tcpcb structure pointed to by tp. This is done to
3850 * reduce the spew of irrelevant information over this interface, to isolate
3851 * user code from changes in the kernel structure, and potentially to provide
3852 * information-hiding if we decide that some of this information should be
3853 * hidden from users.
3854 */
3855 void
3856 tcp_inptoxtp(const struct inpcb *inp, struct xtcpcb *xt)
3857 {
3858 struct tcpcb *tp = intotcpcb(inp);
3859 sbintime_t now;
3860
3861 bzero(xt, sizeof(*xt));
3862 xt->t_state = tp->t_state;
3863 xt->t_logstate = tp->t_logstate;
3864 xt->t_flags = tp->t_flags;
3865 xt->t_sndzerowin = tp->t_sndzerowin;
3866 xt->t_sndrexmitpack = tp->t_sndrexmitpack;
3867 xt->t_rcvoopack = tp->t_rcvoopack;
3868 xt->t_rcv_wnd = tp->rcv_wnd;
3869 xt->t_snd_wnd = tp->snd_wnd;
3870 xt->t_snd_cwnd = tp->snd_cwnd;
3871 xt->t_snd_ssthresh = tp->snd_ssthresh;
3872 xt->t_dsack_bytes = tp->t_dsack_bytes;
3873 xt->t_dsack_tlp_bytes = tp->t_dsack_tlp_bytes;
3874 xt->t_dsack_pack = tp->t_dsack_pack;
3875 xt->t_maxseg = tp->t_maxseg;
3876 xt->xt_ecn = (tp->t_flags2 & TF2_ECN_PERMIT) ? 1 : 0 +
3877 (tp->t_flags2 & TF2_ACE_PERMIT) ? 2 : 0;
3878
3879 now = getsbinuptime();
3880 #define COPYTIMER(which,where) do { \
3881 if (tp->t_timers[which] != SBT_MAX) \
3882 xt->where = (tp->t_timers[which] - now) / SBT_1MS; \
3883 else \
3884 xt->where = 0; \
3885 } while (0)
3886 COPYTIMER(TT_DELACK, tt_delack);
3887 COPYTIMER(TT_REXMT, tt_rexmt);
3888 COPYTIMER(TT_PERSIST, tt_persist);
3889 COPYTIMER(TT_KEEP, tt_keep);
3890 COPYTIMER(TT_2MSL, tt_2msl);
3891 #undef COPYTIMER
3892 xt->t_rcvtime = 1000 * (ticks - tp->t_rcvtime) / hz;
3893
3894 xt->xt_encaps_port = tp->t_port;
3895 bcopy(tp->t_fb->tfb_tcp_block_name, xt->xt_stack,
3896 TCP_FUNCTION_NAME_LEN_MAX);
3897 bcopy(CC_ALGO(tp)->name, xt->xt_cc, TCP_CA_NAME_MAX);
3898 #ifdef TCP_BLACKBOX
3899 (void)tcp_log_get_id(tp, xt->xt_logid);
3900 #endif
3901
3902 xt->xt_len = sizeof(struct xtcpcb);
3903 in_pcbtoxinpcb(inp, &xt->xt_inp);
3904 }
3905
3906 void
3907 tcp_log_end_status(struct tcpcb *tp, uint8_t status)
3908 {
3909 uint32_t bit, i;
3910
3911 if ((tp == NULL) ||
3912 (status > TCP_EI_STATUS_MAX_VALUE) ||
3913 (status == 0)) {
3914 /* Invalid */
3915 return;
3916 }
3917 if (status > (sizeof(uint32_t) * 8)) {
3918 /* Should this be a KASSERT? */
3919 return;
3920 }
3921 bit = 1U << (status - 1);
3922 if (bit & tp->t_end_info_status) {
3923 /* already logged */
3924 return;
3925 }
3926 for (i = 0; i < TCP_END_BYTE_INFO; i++) {
3927 if (tp->t_end_info_bytes[i] == TCP_EI_EMPTY_SLOT) {
3928 tp->t_end_info_bytes[i] = status;
3929 tp->t_end_info_status |= bit;
3930 break;
3931 }
3932 }
3933 }
3934
3935 int
3936 tcp_can_enable_pacing(void)
3937 {
3938
3939 if ((tcp_pacing_limit == -1) ||
3940 (tcp_pacing_limit > number_of_tcp_connections_pacing)) {
3941 atomic_fetchadd_int(&number_of_tcp_connections_pacing, 1);
3942 shadow_num_connections = number_of_tcp_connections_pacing;
3943 return (1);
3944 } else {
3945 return (0);
3946 }
3947 }
3948
3949 static uint8_t tcp_pacing_warning = 0;
3950
3951 void
3952 tcp_decrement_paced_conn(void)
3953 {
3954 uint32_t ret;
3955
3956 ret = atomic_fetchadd_int(&number_of_tcp_connections_pacing, -1);
3957 shadow_num_connections = number_of_tcp_connections_pacing;
3958 KASSERT(ret != 0, ("tcp_paced_connection_exits -1 would cause wrap?"));
3959 if (ret == 0) {
3960 if (tcp_pacing_limit != -1) {
3961 printf("Warning all pacing is now disabled, count decrements invalidly!\n");
3962 tcp_pacing_limit = 0;
3963 } else if (tcp_pacing_warning == 0) {
3964 printf("Warning pacing count is invalid, invalid decrement\n");
3965 tcp_pacing_warning = 1;
3966 }
3967 }
3968 }
Cache object: 5b6545c1e47189d32455c87c8346c58f
|