1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/12.0/sys/netinet/tcp_timer.c 335924 2018-07-04 02:47:16Z mmacy $");
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_tcpdebug.h"
40 #include "opt_rss.h"
41
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/mbuf.h>
46 #include <sys/mutex.h>
47 #include <sys/protosw.h>
48 #include <sys/smp.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53
54 #include <net/if.h>
55 #include <net/route.h>
56 #include <net/rss_config.h>
57 #include <net/vnet.h>
58 #include <net/netisr.h>
59
60 #include <netinet/in.h>
61 #include <netinet/in_kdtrace.h>
62 #include <netinet/in_pcb.h>
63 #include <netinet/in_rss.h>
64 #include <netinet/in_systm.h>
65 #ifdef INET6
66 #include <netinet6/in6_pcb.h>
67 #endif
68 #include <netinet/ip_var.h>
69 #include <netinet/tcp.h>
70 #include <netinet/tcp_fsm.h>
71 #include <netinet/tcp_log_buf.h>
72 #include <netinet/tcp_timer.h>
73 #include <netinet/tcp_var.h>
74 #include <netinet/tcp_seq.h>
75 #include <netinet/cc/cc.h>
76 #ifdef INET6
77 #include <netinet6/tcp6_var.h>
78 #endif
79 #include <netinet/tcpip.h>
80 #ifdef TCPDEBUG
81 #include <netinet/tcp_debug.h>
82 #endif
83
84 int tcp_persmin;
85 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmin, CTLTYPE_INT|CTLFLAG_RW,
86 &tcp_persmin, 0, sysctl_msec_to_ticks, "I", "minimum persistence interval");
87
88 int tcp_persmax;
89 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmax, CTLTYPE_INT|CTLFLAG_RW,
90 &tcp_persmax, 0, sysctl_msec_to_ticks, "I", "maximum persistence interval");
91
92 int tcp_keepinit;
93 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW,
94 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "time to establish connection");
95
96 int tcp_keepidle;
97 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW,
98 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "time before keepalive probes begin");
99
100 int tcp_keepintvl;
101 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW,
102 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "time between keepalive probes");
103
104 int tcp_delacktime;
105 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, CTLTYPE_INT|CTLFLAG_RW,
106 &tcp_delacktime, 0, sysctl_msec_to_ticks, "I",
107 "Time before a delayed ACK is sent");
108
109 int tcp_msl;
110 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW,
111 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
112
113 int tcp_rexmit_min;
114 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT|CTLFLAG_RW,
115 &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I",
116 "Minimum Retransmission Timeout");
117
118 int tcp_rexmit_slop;
119 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT|CTLFLAG_RW,
120 &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I",
121 "Retransmission Timer Slop");
122
123 int tcp_always_keepalive = 1;
124 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
125 &tcp_always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
126
127 int tcp_fast_finwait2_recycle = 0;
128 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW,
129 &tcp_fast_finwait2_recycle, 0,
130 "Recycle closed FIN_WAIT_2 connections faster");
131
132 int tcp_finwait2_timeout;
133 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, CTLTYPE_INT|CTLFLAG_RW,
134 &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", "FIN-WAIT2 timeout");
135
136 int tcp_keepcnt = TCPTV_KEEPCNT;
137 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0,
138 "Number of keepalive probes to send");
139
140 /* max idle probes */
141 int tcp_maxpersistidle;
142
143 int tcp_rexmit_drop_options = 0;
144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW,
145 &tcp_rexmit_drop_options, 0,
146 "Drop TCP options from 3rd and later retransmitted SYN");
147
148 VNET_DEFINE(int, tcp_pmtud_blackhole_detect);
149 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection,
150 CTLFLAG_RW|CTLFLAG_VNET,
151 &VNET_NAME(tcp_pmtud_blackhole_detect), 0,
152 "Path MTU Discovery Black Hole Detection Enabled");
153
154 #ifdef INET
155 VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200;
156 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss,
157 CTLFLAG_RW|CTLFLAG_VNET,
158 &VNET_NAME(tcp_pmtud_blackhole_mss), 0,
159 "Path MTU Discovery Black Hole Detection lowered MSS");
160 #endif
161
162 #ifdef INET6
163 VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220;
164 SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss,
165 CTLFLAG_RW|CTLFLAG_VNET,
166 &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0,
167 "Path MTU Discovery IPv6 Black Hole Detection lowered MSS");
168 #endif
169
170 #ifdef RSS
171 static int per_cpu_timers = 1;
172 #else
173 static int per_cpu_timers = 0;
174 #endif
175 SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW,
176 &per_cpu_timers , 0, "run tcp timers on all cpus");
177
178 /*
179 * Map the given inp to a CPU id.
180 *
181 * This queries RSS if it's compiled in, else it defaults to the current
182 * CPU ID.
183 */
184 inline int
185 inp_to_cpuid(struct inpcb *inp)
186 {
187 u_int cpuid;
188
189 #ifdef RSS
190 if (per_cpu_timers) {
191 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
192 if (cpuid == NETISR_CPUID_NONE)
193 return (curcpu); /* XXX */
194 else
195 return (cpuid);
196 }
197 #else
198 /* Legacy, pre-RSS behaviour */
199 if (per_cpu_timers) {
200 /*
201 * We don't have a flowid -> cpuid mapping, so cheat and
202 * just map unknown cpuids to curcpu. Not the best, but
203 * apparently better than defaulting to swi 0.
204 */
205 cpuid = inp->inp_flowid % (mp_maxid + 1);
206 if (! CPU_ABSENT(cpuid))
207 return (cpuid);
208 return (curcpu);
209 }
210 #endif
211 /* Default for RSS and non-RSS - cpuid 0 */
212 else {
213 return (0);
214 }
215 }
216
217 /*
218 * Tcp protocol timeout routine called every 500 ms.
219 * Updates timestamps used for TCP
220 * causes finite state machine actions if timers expire.
221 */
222 void
223 tcp_slowtimo(void)
224 {
225 VNET_ITERATOR_DECL(vnet_iter);
226
227 VNET_LIST_RLOCK_NOSLEEP();
228 VNET_FOREACH(vnet_iter) {
229 CURVNET_SET(vnet_iter);
230 (void) tcp_tw_2msl_scan(0);
231 CURVNET_RESTORE();
232 }
233 VNET_LIST_RUNLOCK_NOSLEEP();
234 }
235
236 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
237 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
238
239 int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
240 { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 };
241
242 int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */
243
244 /*
245 * TCP timer processing.
246 */
247
248 void
249 tcp_timer_delack(void *xtp)
250 {
251 struct tcpcb *tp = xtp;
252 struct inpcb *inp;
253 CURVNET_SET(tp->t_vnet);
254
255 inp = tp->t_inpcb;
256 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
257 INP_WLOCK(inp);
258 if (callout_pending(&tp->t_timers->tt_delack) ||
259 !callout_active(&tp->t_timers->tt_delack)) {
260 INP_WUNLOCK(inp);
261 CURVNET_RESTORE();
262 return;
263 }
264 callout_deactivate(&tp->t_timers->tt_delack);
265 if ((inp->inp_flags & INP_DROPPED) != 0) {
266 INP_WUNLOCK(inp);
267 CURVNET_RESTORE();
268 return;
269 }
270 tp->t_flags |= TF_ACKNOW;
271 TCPSTAT_INC(tcps_delack);
272 (void) tp->t_fb->tfb_tcp_output(tp);
273 INP_WUNLOCK(inp);
274 CURVNET_RESTORE();
275 }
276
277 void
278 tcp_inpinfo_lock_del(struct inpcb *inp, struct tcpcb *tp)
279 {
280 if (inp && tp != NULL)
281 INP_WUNLOCK(inp);
282 }
283
284 void
285 tcp_timer_2msl(void *xtp)
286 {
287 struct tcpcb *tp = xtp;
288 struct inpcb *inp;
289 struct epoch_tracker et;
290 CURVNET_SET(tp->t_vnet);
291 #ifdef TCPDEBUG
292 int ostate;
293
294 ostate = tp->t_state;
295 #endif
296 inp = tp->t_inpcb;
297 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
298 INP_WLOCK(inp);
299 tcp_free_sackholes(tp);
300 if (callout_pending(&tp->t_timers->tt_2msl) ||
301 !callout_active(&tp->t_timers->tt_2msl)) {
302 INP_WUNLOCK(tp->t_inpcb);
303 CURVNET_RESTORE();
304 return;
305 }
306 callout_deactivate(&tp->t_timers->tt_2msl);
307 if ((inp->inp_flags & INP_DROPPED) != 0) {
308 INP_WUNLOCK(inp);
309 CURVNET_RESTORE();
310 return;
311 }
312 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
313 ("%s: tp %p tcpcb can't be stopped here", __func__, tp));
314 /*
315 * 2 MSL timeout in shutdown went off. If we're closed but
316 * still waiting for peer to close and connection has been idle
317 * too long delete connection control block. Otherwise, check
318 * again in a bit.
319 *
320 * If in TIME_WAIT state just ignore as this timeout is handled in
321 * tcp_tw_2msl_scan().
322 *
323 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed,
324 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it.
325 * Ignore fact that there were recent incoming segments.
326 */
327 if ((inp->inp_flags & INP_TIMEWAIT) != 0) {
328 INP_WUNLOCK(inp);
329 CURVNET_RESTORE();
330 return;
331 }
332 if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 &&
333 tp->t_inpcb && tp->t_inpcb->inp_socket &&
334 (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
335 TCPSTAT_INC(tcps_finwait2_drops);
336 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
337 tcp_inpinfo_lock_del(inp, tp);
338 goto out;
339 }
340 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
341 tp = tcp_close(tp);
342 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
343 tcp_inpinfo_lock_del(inp, tp);
344 goto out;
345 } else {
346 if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) {
347 callout_reset(&tp->t_timers->tt_2msl,
348 TP_KEEPINTVL(tp), tcp_timer_2msl, tp);
349 } else {
350 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
351 tcp_inpinfo_lock_del(inp, tp);
352 goto out;
353 }
354 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
355 tp = tcp_close(tp);
356 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
357 tcp_inpinfo_lock_del(inp, tp);
358 goto out;
359 }
360 }
361
362 #ifdef TCPDEBUG
363 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
364 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
365 PRU_SLOWTIMO);
366 #endif
367 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
368
369 if (tp != NULL)
370 INP_WUNLOCK(inp);
371 out:
372 CURVNET_RESTORE();
373 }
374
375 void
376 tcp_timer_keep(void *xtp)
377 {
378 struct tcpcb *tp = xtp;
379 struct tcptemp *t_template;
380 struct inpcb *inp;
381 struct epoch_tracker et;
382 CURVNET_SET(tp->t_vnet);
383 #ifdef TCPDEBUG
384 int ostate;
385
386 ostate = tp->t_state;
387 #endif
388 inp = tp->t_inpcb;
389 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
390 INP_WLOCK(inp);
391 if (callout_pending(&tp->t_timers->tt_keep) ||
392 !callout_active(&tp->t_timers->tt_keep)) {
393 INP_WUNLOCK(inp);
394 CURVNET_RESTORE();
395 return;
396 }
397 callout_deactivate(&tp->t_timers->tt_keep);
398 if ((inp->inp_flags & INP_DROPPED) != 0) {
399 INP_WUNLOCK(inp);
400 CURVNET_RESTORE();
401 return;
402 }
403 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
404 ("%s: tp %p tcpcb can't be stopped here", __func__, tp));
405
406 /*
407 * Because we don't regularly reset the keepalive callout in
408 * the ESTABLISHED state, it may be that we don't actually need
409 * to send a keepalive yet. If that occurs, schedule another
410 * call for the next time the keepalive timer might expire.
411 */
412 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
413 u_int idletime;
414
415 idletime = ticks - tp->t_rcvtime;
416 if (idletime < TP_KEEPIDLE(tp)) {
417 callout_reset(&tp->t_timers->tt_keep,
418 TP_KEEPIDLE(tp) - idletime, tcp_timer_keep, tp);
419 INP_WUNLOCK(inp);
420 CURVNET_RESTORE();
421 return;
422 }
423 }
424
425 /*
426 * Keep-alive timer went off; send something
427 * or drop connection if idle for too long.
428 */
429 TCPSTAT_INC(tcps_keeptimeo);
430 if (tp->t_state < TCPS_ESTABLISHED)
431 goto dropit;
432 if ((tcp_always_keepalive ||
433 inp->inp_socket->so_options & SO_KEEPALIVE) &&
434 tp->t_state <= TCPS_CLOSING) {
435 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
436 goto dropit;
437 /*
438 * Send a packet designed to force a response
439 * if the peer is up and reachable:
440 * either an ACK if the connection is still alive,
441 * or an RST if the peer has closed the connection
442 * due to timeout or reboot.
443 * Using sequence number tp->snd_una-1
444 * causes the transmitted zero-length segment
445 * to lie outside the receive window;
446 * by the protocol spec, this requires the
447 * correspondent TCP to respond.
448 */
449 TCPSTAT_INC(tcps_keepprobe);
450 t_template = tcpip_maketemplate(inp);
451 if (t_template) {
452 tcp_respond(tp, t_template->tt_ipgen,
453 &t_template->tt_t, (struct mbuf *)NULL,
454 tp->rcv_nxt, tp->snd_una - 1, 0);
455 free(t_template, M_TEMP);
456 }
457 callout_reset(&tp->t_timers->tt_keep, TP_KEEPINTVL(tp),
458 tcp_timer_keep, tp);
459 } else
460 callout_reset(&tp->t_timers->tt_keep, TP_KEEPIDLE(tp),
461 tcp_timer_keep, tp);
462
463 #ifdef TCPDEBUG
464 if (inp->inp_socket->so_options & SO_DEBUG)
465 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
466 PRU_SLOWTIMO);
467 #endif
468 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
469 INP_WUNLOCK(inp);
470 CURVNET_RESTORE();
471 return;
472
473 dropit:
474 TCPSTAT_INC(tcps_keepdrops);
475 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
476 tcp_inpinfo_lock_del(inp, tp);
477 goto out;
478 }
479 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
480 tp = tcp_drop(tp, ETIMEDOUT);
481
482 #ifdef TCPDEBUG
483 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
484 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
485 PRU_SLOWTIMO);
486 #endif
487 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
488 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
489 tcp_inpinfo_lock_del(inp, tp);
490 out:
491 CURVNET_RESTORE();
492 }
493
494 void
495 tcp_timer_persist(void *xtp)
496 {
497 struct tcpcb *tp = xtp;
498 struct inpcb *inp;
499 struct epoch_tracker et;
500 CURVNET_SET(tp->t_vnet);
501 #ifdef TCPDEBUG
502 int ostate;
503
504 ostate = tp->t_state;
505 #endif
506 inp = tp->t_inpcb;
507 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
508 INP_WLOCK(inp);
509 if (callout_pending(&tp->t_timers->tt_persist) ||
510 !callout_active(&tp->t_timers->tt_persist)) {
511 INP_WUNLOCK(inp);
512 CURVNET_RESTORE();
513 return;
514 }
515 callout_deactivate(&tp->t_timers->tt_persist);
516 if ((inp->inp_flags & INP_DROPPED) != 0) {
517 INP_WUNLOCK(inp);
518 CURVNET_RESTORE();
519 return;
520 }
521 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
522 ("%s: tp %p tcpcb can't be stopped here", __func__, tp));
523 /*
524 * Persistence timer into zero window.
525 * Force a byte to be output, if possible.
526 */
527 TCPSTAT_INC(tcps_persisttimeo);
528 /*
529 * Hack: if the peer is dead/unreachable, we do not
530 * time out if the window is closed. After a full
531 * backoff, drop the connection if the idle time
532 * (no responses to probes) reaches the maximum
533 * backoff that we would use if retransmitting.
534 */
535 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
536 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
537 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
538 TCPSTAT_INC(tcps_persistdrop);
539 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
540 tcp_inpinfo_lock_del(inp, tp);
541 goto out;
542 }
543 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
544 tp = tcp_drop(tp, ETIMEDOUT);
545 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
546 tcp_inpinfo_lock_del(inp, tp);
547 goto out;
548 }
549 /*
550 * If the user has closed the socket then drop a persisting
551 * connection after a much reduced timeout.
552 */
553 if (tp->t_state > TCPS_CLOSE_WAIT &&
554 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
555 TCPSTAT_INC(tcps_persistdrop);
556 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
557 tcp_inpinfo_lock_del(inp, tp);
558 goto out;
559 }
560 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
561 tp = tcp_drop(tp, ETIMEDOUT);
562 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
563 tcp_inpinfo_lock_del(inp, tp);
564 goto out;
565 }
566 tcp_setpersist(tp);
567 tp->t_flags |= TF_FORCEDATA;
568 (void) tp->t_fb->tfb_tcp_output(tp);
569 tp->t_flags &= ~TF_FORCEDATA;
570
571 #ifdef TCPDEBUG
572 if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
573 tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO);
574 #endif
575 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
576 INP_WUNLOCK(inp);
577 out:
578 CURVNET_RESTORE();
579 }
580
581 void
582 tcp_timer_rexmt(void * xtp)
583 {
584 struct tcpcb *tp = xtp;
585 CURVNET_SET(tp->t_vnet);
586 int rexmt;
587 struct inpcb *inp;
588 struct epoch_tracker et;
589 #ifdef TCPDEBUG
590 int ostate;
591
592 ostate = tp->t_state;
593 #endif
594 inp = tp->t_inpcb;
595 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
596 INP_WLOCK(inp);
597 if (callout_pending(&tp->t_timers->tt_rexmt) ||
598 !callout_active(&tp->t_timers->tt_rexmt)) {
599 INP_WUNLOCK(inp);
600 CURVNET_RESTORE();
601 return;
602 }
603 callout_deactivate(&tp->t_timers->tt_rexmt);
604 if ((inp->inp_flags & INP_DROPPED) != 0) {
605 INP_WUNLOCK(inp);
606 CURVNET_RESTORE();
607 return;
608 }
609 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
610 ("%s: tp %p tcpcb can't be stopped here", __func__, tp));
611 tcp_free_sackholes(tp);
612 TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_RTO, 0, 0, NULL, false);
613 if (tp->t_fb->tfb_tcp_rexmit_tmr) {
614 /* The stack has a timer action too. */
615 (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp);
616 }
617 /*
618 * Retransmission timer went off. Message has not
619 * been acked within retransmit interval. Back off
620 * to a longer retransmit interval and retransmit one segment.
621 */
622 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
623 tp->t_rxtshift = TCP_MAXRXTSHIFT;
624 TCPSTAT_INC(tcps_timeoutdrop);
625 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
626 tcp_inpinfo_lock_del(inp, tp);
627 goto out;
628 }
629 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
630 tp = tcp_drop(tp, ETIMEDOUT);
631 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
632 tcp_inpinfo_lock_del(inp, tp);
633 goto out;
634 }
635 if (tp->t_state == TCPS_SYN_SENT) {
636 /*
637 * If the SYN was retransmitted, indicate CWND to be
638 * limited to 1 segment in cc_conn_init().
639 */
640 tp->snd_cwnd = 1;
641 } else if (tp->t_rxtshift == 1) {
642 /*
643 * first retransmit; record ssthresh and cwnd so they can
644 * be recovered if this turns out to be a "bad" retransmit.
645 * A retransmit is considered "bad" if an ACK for this
646 * segment is received within RTT/2 interval; the assumption
647 * here is that the ACK was already in flight. See
648 * "On Estimating End-to-End Network Path Properties" by
649 * Allman and Paxson for more details.
650 */
651 tp->snd_cwnd_prev = tp->snd_cwnd;
652 tp->snd_ssthresh_prev = tp->snd_ssthresh;
653 tp->snd_recover_prev = tp->snd_recover;
654 if (IN_FASTRECOVERY(tp->t_flags))
655 tp->t_flags |= TF_WASFRECOVERY;
656 else
657 tp->t_flags &= ~TF_WASFRECOVERY;
658 if (IN_CONGRECOVERY(tp->t_flags))
659 tp->t_flags |= TF_WASCRECOVERY;
660 else
661 tp->t_flags &= ~TF_WASCRECOVERY;
662 if ((tp->t_flags & TF_RCVD_TSTMP) == 0)
663 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
664 /* In the event that we've negotiated timestamps
665 * badrxtwin will be set to the value that we set
666 * the retransmitted packet's to_tsval to by tcp_output
667 */
668 tp->t_flags |= TF_PREVVALID;
669 } else
670 tp->t_flags &= ~TF_PREVVALID;
671 TCPSTAT_INC(tcps_rexmttimeo);
672 if ((tp->t_state == TCPS_SYN_SENT) ||
673 (tp->t_state == TCPS_SYN_RECEIVED))
674 rexmt = TCPTV_RTOBASE * tcp_syn_backoff[tp->t_rxtshift];
675 else
676 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
677 TCPT_RANGESET(tp->t_rxtcur, rexmt,
678 tp->t_rttmin, TCPTV_REXMTMAX);
679
680 /*
681 * We enter the path for PLMTUD if connection is established or, if
682 * connection is FIN_WAIT_1 status, reason for the last is that if
683 * amount of data we send is very small, we could send it in couple of
684 * packets and process straight to FIN. In that case we won't catch
685 * ESTABLISHED state.
686 */
687 if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED))
688 || (tp->t_state == TCPS_FIN_WAIT_1))) {
689 #ifdef INET6
690 int isipv6;
691 #endif
692
693 /*
694 * Idea here is that at each stage of mtu probe (usually, 1448
695 * -> 1188 -> 524) should be given 2 chances to recover before
696 * further clamping down. 'tp->t_rxtshift % 2 == 0' should
697 * take care of that.
698 */
699 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) ==
700 (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) &&
701 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
702 tp->t_rxtshift % 2 == 0)) {
703 /*
704 * Enter Path MTU Black-hole Detection mechanism:
705 * - Disable Path MTU Discovery (IP "DF" bit).
706 * - Reduce MTU to lower value than what we
707 * negotiated with peer.
708 */
709 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
710 /* Record that we may have found a black hole. */
711 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
712 /* Keep track of previous MSS. */
713 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
714 }
715
716 /*
717 * Reduce the MSS to blackhole value or to the default
718 * in an attempt to retransmit.
719 */
720 #ifdef INET6
721 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0;
722 if (isipv6 &&
723 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
724 /* Use the sysctl tuneable blackhole MSS. */
725 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
726 TCPSTAT_INC(tcps_pmtud_blackhole_activated);
727 } else if (isipv6) {
728 /* Use the default MSS. */
729 tp->t_maxseg = V_tcp_v6mssdflt;
730 /*
731 * Disable Path MTU Discovery when we switch to
732 * minmss.
733 */
734 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
735 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
736 }
737 #endif
738 #if defined(INET6) && defined(INET)
739 else
740 #endif
741 #ifdef INET
742 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
743 /* Use the sysctl tuneable blackhole MSS. */
744 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
745 TCPSTAT_INC(tcps_pmtud_blackhole_activated);
746 } else {
747 /* Use the default MSS. */
748 tp->t_maxseg = V_tcp_mssdflt;
749 /*
750 * Disable Path MTU Discovery when we switch to
751 * minmss.
752 */
753 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
754 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
755 }
756 #endif
757 /*
758 * Reset the slow-start flight size
759 * as it may depend on the new MSS.
760 */
761 if (CC_ALGO(tp)->conn_init != NULL)
762 CC_ALGO(tp)->conn_init(tp->ccv);
763 } else {
764 /*
765 * If further retransmissions are still unsuccessful
766 * with a lowered MTU, maybe this isn't a blackhole and
767 * we restore the previous MSS and blackhole detection
768 * flags.
769 * The limit '6' is determined by giving each probe
770 * stage (1448, 1188, 524) 2 chances to recover.
771 */
772 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
773 (tp->t_rxtshift >= 6)) {
774 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
775 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
776 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
777 TCPSTAT_INC(tcps_pmtud_blackhole_failed);
778 /*
779 * Reset the slow-start flight size as it
780 * may depend on the new MSS.
781 */
782 if (CC_ALGO(tp)->conn_init != NULL)
783 CC_ALGO(tp)->conn_init(tp->ccv);
784 }
785 }
786 }
787
788 /*
789 * Disable RFC1323 and SACK if we haven't got any response to
790 * our third SYN to work-around some broken terminal servers
791 * (most of which have hopefully been retired) that have bad VJ
792 * header compression code which trashes TCP segments containing
793 * unknown-to-them TCP options.
794 */
795 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
796 (tp->t_rxtshift == 3))
797 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
798 /*
799 * If we backed off this far, notify the L3 protocol that we're having
800 * connection problems.
801 */
802 if (tp->t_rxtshift > TCP_RTT_INVALIDATE) {
803 #ifdef INET6
804 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
805 in6_losing(tp->t_inpcb);
806 else
807 #endif
808 in_losing(tp->t_inpcb);
809 }
810 tp->snd_nxt = tp->snd_una;
811 tp->snd_recover = tp->snd_max;
812 /*
813 * Force a segment to be sent.
814 */
815 tp->t_flags |= TF_ACKNOW;
816 /*
817 * If timing a segment in this window, stop the timer.
818 */
819 tp->t_rtttime = 0;
820
821 cc_cong_signal(tp, NULL, CC_RTO);
822
823 (void) tp->t_fb->tfb_tcp_output(tp);
824
825 #ifdef TCPDEBUG
826 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
827 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
828 PRU_SLOWTIMO);
829 #endif
830 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
831 INP_WUNLOCK(inp);
832 out:
833 CURVNET_RESTORE();
834 }
835
836 void
837 tcp_timer_activate(struct tcpcb *tp, uint32_t timer_type, u_int delta)
838 {
839 struct callout *t_callout;
840 timeout_t *f_callout;
841 struct inpcb *inp = tp->t_inpcb;
842 int cpu = inp_to_cpuid(inp);
843
844 #ifdef TCP_OFFLOAD
845 if (tp->t_flags & TF_TOE)
846 return;
847 #endif
848
849 if (tp->t_timers->tt_flags & TT_STOPPED)
850 return;
851
852 switch (timer_type) {
853 case TT_DELACK:
854 t_callout = &tp->t_timers->tt_delack;
855 f_callout = tcp_timer_delack;
856 break;
857 case TT_REXMT:
858 t_callout = &tp->t_timers->tt_rexmt;
859 f_callout = tcp_timer_rexmt;
860 break;
861 case TT_PERSIST:
862 t_callout = &tp->t_timers->tt_persist;
863 f_callout = tcp_timer_persist;
864 break;
865 case TT_KEEP:
866 t_callout = &tp->t_timers->tt_keep;
867 f_callout = tcp_timer_keep;
868 break;
869 case TT_2MSL:
870 t_callout = &tp->t_timers->tt_2msl;
871 f_callout = tcp_timer_2msl;
872 break;
873 default:
874 if (tp->t_fb->tfb_tcp_timer_activate) {
875 tp->t_fb->tfb_tcp_timer_activate(tp, timer_type, delta);
876 return;
877 }
878 panic("tp %p bad timer_type %#x", tp, timer_type);
879 }
880 if (delta == 0) {
881 callout_stop(t_callout);
882 } else {
883 callout_reset_on(t_callout, delta, f_callout, tp, cpu);
884 }
885 }
886
887 int
888 tcp_timer_active(struct tcpcb *tp, uint32_t timer_type)
889 {
890 struct callout *t_callout;
891
892 switch (timer_type) {
893 case TT_DELACK:
894 t_callout = &tp->t_timers->tt_delack;
895 break;
896 case TT_REXMT:
897 t_callout = &tp->t_timers->tt_rexmt;
898 break;
899 case TT_PERSIST:
900 t_callout = &tp->t_timers->tt_persist;
901 break;
902 case TT_KEEP:
903 t_callout = &tp->t_timers->tt_keep;
904 break;
905 case TT_2MSL:
906 t_callout = &tp->t_timers->tt_2msl;
907 break;
908 default:
909 if (tp->t_fb->tfb_tcp_timer_active) {
910 return(tp->t_fb->tfb_tcp_timer_active(tp, timer_type));
911 }
912 panic("tp %p bad timer_type %#x", tp, timer_type);
913 }
914 return callout_active(t_callout);
915 }
916
917 /*
918 * Stop the timer from running, and apply a flag
919 * against the timer_flags that will force the
920 * timer never to run. The flag is needed to assure
921 * a race does not leave it running and cause
922 * the timer to possibly restart itself (keep and persist
923 * especially do this).
924 */
925 int
926 tcp_timer_suspend(struct tcpcb *tp, uint32_t timer_type)
927 {
928 struct callout *t_callout;
929 uint32_t t_flags;
930
931 switch (timer_type) {
932 case TT_DELACK:
933 t_flags = TT_DELACK_SUS;
934 t_callout = &tp->t_timers->tt_delack;
935 break;
936 case TT_REXMT:
937 t_flags = TT_REXMT_SUS;
938 t_callout = &tp->t_timers->tt_rexmt;
939 break;
940 case TT_PERSIST:
941 t_flags = TT_PERSIST_SUS;
942 t_callout = &tp->t_timers->tt_persist;
943 break;
944 case TT_KEEP:
945 t_flags = TT_KEEP_SUS;
946 t_callout = &tp->t_timers->tt_keep;
947 break;
948 case TT_2MSL:
949 t_flags = TT_2MSL_SUS;
950 t_callout = &tp->t_timers->tt_2msl;
951 break;
952 default:
953 panic("tp:%p bad timer_type 0x%x", tp, timer_type);
954 }
955 tp->t_timers->tt_flags |= t_flags;
956 return (callout_stop(t_callout));
957 }
958
959 void
960 tcp_timers_unsuspend(struct tcpcb *tp, uint32_t timer_type)
961 {
962 switch (timer_type) {
963 case TT_DELACK:
964 if (tp->t_timers->tt_flags & TT_DELACK_SUS) {
965 tp->t_timers->tt_flags &= ~TT_DELACK_SUS;
966 if (tp->t_flags & TF_DELACK) {
967 /* Delayed ack timer should be up activate a timer */
968 tp->t_flags &= ~TF_DELACK;
969 tcp_timer_activate(tp, TT_DELACK,
970 tcp_delacktime);
971 }
972 }
973 break;
974 case TT_REXMT:
975 if (tp->t_timers->tt_flags & TT_REXMT_SUS) {
976 tp->t_timers->tt_flags &= ~TT_REXMT_SUS;
977 if (SEQ_GT(tp->snd_max, tp->snd_una) &&
978 (tcp_timer_active((tp), TT_PERSIST) == 0) &&
979 tp->snd_wnd) {
980 /* We have outstanding data activate a timer */
981 tcp_timer_activate(tp, TT_REXMT,
982 tp->t_rxtcur);
983 }
984 }
985 break;
986 case TT_PERSIST:
987 if (tp->t_timers->tt_flags & TT_PERSIST_SUS) {
988 tp->t_timers->tt_flags &= ~TT_PERSIST_SUS;
989 if (tp->snd_wnd == 0) {
990 /* Activate the persists timer */
991 tp->t_rxtshift = 0;
992 tcp_setpersist(tp);
993 }
994 }
995 break;
996 case TT_KEEP:
997 if (tp->t_timers->tt_flags & TT_KEEP_SUS) {
998 tp->t_timers->tt_flags &= ~TT_KEEP_SUS;
999 tcp_timer_activate(tp, TT_KEEP,
1000 TCPS_HAVEESTABLISHED(tp->t_state) ?
1001 TP_KEEPIDLE(tp) : TP_KEEPINIT(tp));
1002 }
1003 break;
1004 case TT_2MSL:
1005 if (tp->t_timers->tt_flags &= TT_2MSL_SUS) {
1006 tp->t_timers->tt_flags &= ~TT_2MSL_SUS;
1007 if ((tp->t_state == TCPS_FIN_WAIT_2) &&
1008 ((tp->t_inpcb->inp_socket == NULL) ||
1009 (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE))) {
1010 /* Star the 2MSL timer */
1011 tcp_timer_activate(tp, TT_2MSL,
1012 (tcp_fast_finwait2_recycle) ?
1013 tcp_finwait2_timeout : TP_MAXIDLE(tp));
1014 }
1015 }
1016 break;
1017 default:
1018 panic("tp:%p bad timer_type 0x%x", tp, timer_type);
1019 }
1020 }
1021
1022 void
1023 tcp_timer_stop(struct tcpcb *tp, uint32_t timer_type)
1024 {
1025 struct callout *t_callout;
1026
1027 tp->t_timers->tt_flags |= TT_STOPPED;
1028 switch (timer_type) {
1029 case TT_DELACK:
1030 t_callout = &tp->t_timers->tt_delack;
1031 break;
1032 case TT_REXMT:
1033 t_callout = &tp->t_timers->tt_rexmt;
1034 break;
1035 case TT_PERSIST:
1036 t_callout = &tp->t_timers->tt_persist;
1037 break;
1038 case TT_KEEP:
1039 t_callout = &tp->t_timers->tt_keep;
1040 break;
1041 case TT_2MSL:
1042 t_callout = &tp->t_timers->tt_2msl;
1043 break;
1044 default:
1045 if (tp->t_fb->tfb_tcp_timer_stop) {
1046 /*
1047 * XXXrrs we need to look at this with the
1048 * stop case below (flags).
1049 */
1050 tp->t_fb->tfb_tcp_timer_stop(tp, timer_type);
1051 return;
1052 }
1053 panic("tp %p bad timer_type %#x", tp, timer_type);
1054 }
1055
1056 if (callout_async_drain(t_callout, tcp_timer_discard) == 0) {
1057 /*
1058 * Can't stop the callout, defer tcpcb actual deletion
1059 * to the last one. We do this using the async drain
1060 * function and incrementing the count in
1061 */
1062 tp->t_timers->tt_draincnt++;
1063 }
1064 }
Cache object: e8b24d53727ec3733e5da326051f92e9
|