1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2001 McAfee, Inc.
5 * Copyright (c) 2006,2013 Andre Oppermann, Internet Business Solutions AG
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Jonathan Lemon
9 * and McAfee Research, the Security Research Division of McAfee, Inc. under
10 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11 * DARPA CHATS research program. [2001 McAfee, Inc.]
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_ipsec.h"
41 #include "opt_pcbgroup.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/hash.h>
46 #include <sys/refcount.h>
47 #include <sys/kernel.h>
48 #include <sys/sysctl.h>
49 #include <sys/limits.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/proc.h> /* for proc0 declaration */
55 #include <sys/random.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/syslog.h>
59 #include <sys/ucred.h>
60
61 #include <sys/md5.h>
62 #include <crypto/siphash/siphash.h>
63
64 #include <vm/uma.h>
65
66 #include <net/if.h>
67 #include <net/if_var.h>
68 #include <net/route.h>
69 #include <net/vnet.h>
70
71 #include <netinet/in.h>
72 #include <netinet/in_kdtrace.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75 #include <netinet/in_var.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/ip_var.h>
78 #include <netinet/ip_options.h>
79 #ifdef INET6
80 #include <netinet/ip6.h>
81 #include <netinet/icmp6.h>
82 #include <netinet6/nd6.h>
83 #include <netinet6/ip6_var.h>
84 #include <netinet6/in6_pcb.h>
85 #endif
86 #include <netinet/tcp.h>
87 #include <netinet/tcp_fastopen.h>
88 #include <netinet/tcp_fsm.h>
89 #include <netinet/tcp_seq.h>
90 #include <netinet/tcp_timer.h>
91 #include <netinet/tcp_var.h>
92 #include <netinet/tcp_syncache.h>
93 #ifdef INET6
94 #include <netinet6/tcp6_var.h>
95 #endif
96 #ifdef TCP_OFFLOAD
97 #include <netinet/toecore.h>
98 #endif
99
100 #include <netipsec/ipsec_support.h>
101
102 #include <machine/in_cksum.h>
103
104 #include <security/mac/mac_framework.h>
105
106 VNET_DEFINE_STATIC(int, tcp_syncookies) = 1;
107 #define V_tcp_syncookies VNET(tcp_syncookies)
108 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_VNET | CTLFLAG_RW,
109 &VNET_NAME(tcp_syncookies), 0,
110 "Use TCP SYN cookies if the syncache overflows");
111
112 VNET_DEFINE_STATIC(int, tcp_syncookiesonly) = 0;
113 #define V_tcp_syncookiesonly VNET(tcp_syncookiesonly)
114 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_VNET | CTLFLAG_RW,
115 &VNET_NAME(tcp_syncookiesonly), 0,
116 "Use only TCP SYN cookies");
117
118 VNET_DEFINE_STATIC(int, functions_inherit_listen_socket_stack) = 1;
119 #define V_functions_inherit_listen_socket_stack \
120 VNET(functions_inherit_listen_socket_stack)
121 SYSCTL_INT(_net_inet_tcp, OID_AUTO, functions_inherit_listen_socket_stack,
122 CTLFLAG_VNET | CTLFLAG_RW,
123 &VNET_NAME(functions_inherit_listen_socket_stack), 0,
124 "Inherit listen socket's stack");
125
126 #ifdef TCP_OFFLOAD
127 #define ADDED_BY_TOE(sc) ((sc)->sc_tod != NULL)
128 #endif
129
130 static void syncache_drop(struct syncache *, struct syncache_head *);
131 static void syncache_free(struct syncache *);
132 static void syncache_insert(struct syncache *, struct syncache_head *);
133 static int syncache_respond(struct syncache *, struct syncache_head *,
134 const struct mbuf *, int);
135 static struct socket *syncache_socket(struct syncache *, struct socket *,
136 struct mbuf *m);
137 static void syncache_timeout(struct syncache *sc, struct syncache_head *sch,
138 int docallout);
139 static void syncache_timer(void *);
140
141 static uint32_t syncookie_mac(struct in_conninfo *, tcp_seq, uint8_t,
142 uint8_t *, uintptr_t);
143 static tcp_seq syncookie_generate(struct syncache_head *, struct syncache *);
144 static struct syncache
145 *syncookie_lookup(struct in_conninfo *, struct syncache_head *,
146 struct syncache *, struct tcphdr *, struct tcpopt *,
147 struct socket *);
148 static void syncookie_reseed(void *);
149 #ifdef INVARIANTS
150 static int syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
151 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
152 struct socket *lso);
153 #endif
154
155 /*
156 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
157 * 3 retransmits corresponds to a timeout with default values of
158 * tcp_rexmit_initial * ( 1 +
159 * tcp_backoff[1] +
160 * tcp_backoff[2] +
161 * tcp_backoff[3]) + 3 * tcp_rexmit_slop,
162 * 1000 ms * (1 + 2 + 4 + 8) + 3 * 200 ms = 15600 ms,
163 * the odds are that the user has given up attempting to connect by then.
164 */
165 #define SYNCACHE_MAXREXMTS 3
166
167 /* Arbitrary values */
168 #define TCP_SYNCACHE_HASHSIZE 512
169 #define TCP_SYNCACHE_BUCKETLIMIT 30
170
171 VNET_DEFINE_STATIC(struct tcp_syncache, tcp_syncache);
172 #define V_tcp_syncache VNET(tcp_syncache)
173
174 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0,
175 "TCP SYN cache");
176
177 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
178 &VNET_NAME(tcp_syncache.bucket_limit), 0,
179 "Per-bucket hash limit for syncache");
180
181 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
182 &VNET_NAME(tcp_syncache.cache_limit), 0,
183 "Overall entry limit for syncache");
184
185 SYSCTL_UMA_CUR(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_VNET,
186 &VNET_NAME(tcp_syncache.zone), "Current number of entries in syncache");
187
188 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
189 &VNET_NAME(tcp_syncache.hashsize), 0,
190 "Size of TCP syncache hashtable");
191
192 static int
193 sysctl_net_inet_tcp_syncache_rexmtlimit_check(SYSCTL_HANDLER_ARGS)
194 {
195 int error;
196 u_int new;
197
198 new = V_tcp_syncache.rexmt_limit;
199 error = sysctl_handle_int(oidp, &new, 0, req);
200 if ((error == 0) && (req->newptr != NULL)) {
201 if (new > TCP_MAXRXTSHIFT)
202 error = EINVAL;
203 else
204 V_tcp_syncache.rexmt_limit = new;
205 }
206 return (error);
207 }
208
209 SYSCTL_PROC(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit,
210 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW,
211 &VNET_NAME(tcp_syncache.rexmt_limit), 0,
212 sysctl_net_inet_tcp_syncache_rexmtlimit_check, "UI",
213 "Limit on SYN/ACK retransmissions");
214
215 VNET_DEFINE(int, tcp_sc_rst_sock_fail) = 1;
216 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail,
217 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_sc_rst_sock_fail), 0,
218 "Send reset on socket allocation failure");
219
220 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
221
222 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx)
223 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx)
224 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED)
225
226 /*
227 * Requires the syncache entry to be already removed from the bucket list.
228 */
229 static void
230 syncache_free(struct syncache *sc)
231 {
232
233 if (sc->sc_ipopts)
234 (void) m_free(sc->sc_ipopts);
235 if (sc->sc_cred)
236 crfree(sc->sc_cred);
237 #ifdef MAC
238 mac_syncache_destroy(&sc->sc_label);
239 #endif
240
241 uma_zfree(V_tcp_syncache.zone, sc);
242 }
243
244 void
245 syncache_init(void)
246 {
247 int i;
248
249 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
250 V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
251 V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
252 V_tcp_syncache.hash_secret = arc4random();
253
254 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
255 &V_tcp_syncache.hashsize);
256 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
257 &V_tcp_syncache.bucket_limit);
258 if (!powerof2(V_tcp_syncache.hashsize) ||
259 V_tcp_syncache.hashsize == 0) {
260 printf("WARNING: syncache hash size is not a power of 2.\n");
261 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
262 }
263 V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1;
264
265 /* Set limits. */
266 V_tcp_syncache.cache_limit =
267 V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit;
268 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
269 &V_tcp_syncache.cache_limit);
270
271 /* Allocate the hash table. */
272 V_tcp_syncache.hashbase = malloc(V_tcp_syncache.hashsize *
273 sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK | M_ZERO);
274
275 #ifdef VIMAGE
276 V_tcp_syncache.vnet = curvnet;
277 #endif
278
279 /* Initialize the hash buckets. */
280 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
281 TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket);
282 mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
283 NULL, MTX_DEF);
284 callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer,
285 &V_tcp_syncache.hashbase[i].sch_mtx, 0);
286 V_tcp_syncache.hashbase[i].sch_length = 0;
287 V_tcp_syncache.hashbase[i].sch_sc = &V_tcp_syncache;
288 V_tcp_syncache.hashbase[i].sch_last_overflow =
289 -(SYNCOOKIE_LIFETIME + 1);
290 }
291
292 /* Create the syncache entry zone. */
293 V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
294 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
295 V_tcp_syncache.cache_limit = uma_zone_set_max(V_tcp_syncache.zone,
296 V_tcp_syncache.cache_limit);
297
298 /* Start the SYN cookie reseeder callout. */
299 callout_init(&V_tcp_syncache.secret.reseed, 1);
300 arc4rand(V_tcp_syncache.secret.key[0], SYNCOOKIE_SECRET_SIZE, 0);
301 arc4rand(V_tcp_syncache.secret.key[1], SYNCOOKIE_SECRET_SIZE, 0);
302 callout_reset(&V_tcp_syncache.secret.reseed, SYNCOOKIE_LIFETIME * hz,
303 syncookie_reseed, &V_tcp_syncache);
304 }
305
306 #ifdef VIMAGE
307 void
308 syncache_destroy(void)
309 {
310 struct syncache_head *sch;
311 struct syncache *sc, *nsc;
312 int i;
313
314 /*
315 * Stop the re-seed timer before freeing resources. No need to
316 * possibly schedule it another time.
317 */
318 callout_drain(&V_tcp_syncache.secret.reseed);
319
320 /* Cleanup hash buckets: stop timers, free entries, destroy locks. */
321 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
322
323 sch = &V_tcp_syncache.hashbase[i];
324 callout_drain(&sch->sch_timer);
325
326 SCH_LOCK(sch);
327 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc)
328 syncache_drop(sc, sch);
329 SCH_UNLOCK(sch);
330 KASSERT(TAILQ_EMPTY(&sch->sch_bucket),
331 ("%s: sch->sch_bucket not empty", __func__));
332 KASSERT(sch->sch_length == 0, ("%s: sch->sch_length %d not 0",
333 __func__, sch->sch_length));
334 mtx_destroy(&sch->sch_mtx);
335 }
336
337 KASSERT(uma_zone_get_cur(V_tcp_syncache.zone) == 0,
338 ("%s: cache_count not 0", __func__));
339
340 /* Free the allocated global resources. */
341 uma_zdestroy(V_tcp_syncache.zone);
342 free(V_tcp_syncache.hashbase, M_SYNCACHE);
343 }
344 #endif
345
346 /*
347 * Inserts a syncache entry into the specified bucket row.
348 * Locks and unlocks the syncache_head autonomously.
349 */
350 static void
351 syncache_insert(struct syncache *sc, struct syncache_head *sch)
352 {
353 struct syncache *sc2;
354
355 SCH_LOCK(sch);
356
357 /*
358 * Make sure that we don't overflow the per-bucket limit.
359 * If the bucket is full, toss the oldest element.
360 */
361 if (sch->sch_length >= V_tcp_syncache.bucket_limit) {
362 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
363 ("sch->sch_length incorrect"));
364 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
365 sch->sch_last_overflow = time_uptime;
366 syncache_drop(sc2, sch);
367 TCPSTAT_INC(tcps_sc_bucketoverflow);
368 }
369
370 /* Put it into the bucket. */
371 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash);
372 sch->sch_length++;
373
374 #ifdef TCP_OFFLOAD
375 if (ADDED_BY_TOE(sc)) {
376 struct toedev *tod = sc->sc_tod;
377
378 tod->tod_syncache_added(tod, sc->sc_todctx);
379 }
380 #endif
381
382 /* Reinitialize the bucket row's timer. */
383 if (sch->sch_length == 1)
384 sch->sch_nextc = ticks + INT_MAX;
385 syncache_timeout(sc, sch, 1);
386
387 SCH_UNLOCK(sch);
388
389 TCPSTATES_INC(TCPS_SYN_RECEIVED);
390 TCPSTAT_INC(tcps_sc_added);
391 }
392
393 /*
394 * Remove and free entry from syncache bucket row.
395 * Expects locked syncache head.
396 */
397 static void
398 syncache_drop(struct syncache *sc, struct syncache_head *sch)
399 {
400
401 SCH_LOCK_ASSERT(sch);
402
403 TCPSTATES_DEC(TCPS_SYN_RECEIVED);
404 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
405 sch->sch_length--;
406
407 #ifdef TCP_OFFLOAD
408 if (ADDED_BY_TOE(sc)) {
409 struct toedev *tod = sc->sc_tod;
410
411 tod->tod_syncache_removed(tod, sc->sc_todctx);
412 }
413 #endif
414
415 syncache_free(sc);
416 }
417
418 /*
419 * Engage/reengage time on bucket row.
420 */
421 static void
422 syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout)
423 {
424 int rexmt;
425
426 if (sc->sc_rxmits == 0)
427 rexmt = tcp_rexmit_initial;
428 else
429 TCPT_RANGESET(rexmt,
430 tcp_rexmit_initial * tcp_backoff[sc->sc_rxmits],
431 tcp_rexmit_min, TCPTV_REXMTMAX);
432 sc->sc_rxttime = ticks + rexmt;
433 sc->sc_rxmits++;
434 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) {
435 sch->sch_nextc = sc->sc_rxttime;
436 if (docallout)
437 callout_reset(&sch->sch_timer, sch->sch_nextc - ticks,
438 syncache_timer, (void *)sch);
439 }
440 }
441
442 /*
443 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
444 * If we have retransmitted an entry the maximum number of times, expire it.
445 * One separate timer for each bucket row.
446 */
447 static void
448 syncache_timer(void *xsch)
449 {
450 struct syncache_head *sch = (struct syncache_head *)xsch;
451 struct syncache *sc, *nsc;
452 int tick = ticks;
453 char *s;
454
455 CURVNET_SET(sch->sch_sc->vnet);
456
457 /* NB: syncache_head has already been locked by the callout. */
458 SCH_LOCK_ASSERT(sch);
459
460 /*
461 * In the following cycle we may remove some entries and/or
462 * advance some timeouts, so re-initialize the bucket timer.
463 */
464 sch->sch_nextc = tick + INT_MAX;
465
466 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) {
467 /*
468 * We do not check if the listen socket still exists
469 * and accept the case where the listen socket may be
470 * gone by the time we resend the SYN/ACK. We do
471 * not expect this to happens often. If it does,
472 * then the RST will be sent by the time the remote
473 * host does the SYN/ACK->ACK.
474 */
475 if (TSTMP_GT(sc->sc_rxttime, tick)) {
476 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc))
477 sch->sch_nextc = sc->sc_rxttime;
478 continue;
479 }
480 if (sc->sc_rxmits > V_tcp_ecn_maxretries) {
481 sc->sc_flags &= ~SCF_ECN;
482 }
483 if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) {
484 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
485 log(LOG_DEBUG, "%s; %s: Retransmits exhausted, "
486 "giving up and removing syncache entry\n",
487 s, __func__);
488 free(s, M_TCPLOG);
489 }
490 syncache_drop(sc, sch);
491 TCPSTAT_INC(tcps_sc_stale);
492 continue;
493 }
494 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
495 log(LOG_DEBUG, "%s; %s: Response timeout, "
496 "retransmitting (%u) SYN|ACK\n",
497 s, __func__, sc->sc_rxmits);
498 free(s, M_TCPLOG);
499 }
500
501 syncache_respond(sc, sch, NULL, TH_SYN|TH_ACK);
502 TCPSTAT_INC(tcps_sc_retransmitted);
503 syncache_timeout(sc, sch, 0);
504 }
505 if (!TAILQ_EMPTY(&(sch)->sch_bucket))
506 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
507 syncache_timer, (void *)(sch));
508 CURVNET_RESTORE();
509 }
510
511 /*
512 * Find an entry in the syncache.
513 * Returns always with locked syncache_head plus a matching entry or NULL.
514 */
515 static struct syncache *
516 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
517 {
518 struct syncache *sc;
519 struct syncache_head *sch;
520 uint32_t hash;
521
522 /*
523 * The hash is built on foreign port + local port + foreign address.
524 * We rely on the fact that struct in_conninfo starts with 16 bits
525 * of foreign port, then 16 bits of local port then followed by 128
526 * bits of foreign address. In case of IPv4 address, the first 3
527 * 32-bit words of the address always are zeroes.
528 */
529 hash = jenkins_hash32((uint32_t *)&inc->inc_ie, 5,
530 V_tcp_syncache.hash_secret) & V_tcp_syncache.hashmask;
531
532 sch = &V_tcp_syncache.hashbase[hash];
533 *schp = sch;
534 SCH_LOCK(sch);
535
536 /* Circle through bucket row to find matching entry. */
537 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash)
538 if (bcmp(&inc->inc_ie, &sc->sc_inc.inc_ie,
539 sizeof(struct in_endpoints)) == 0)
540 break;
541
542 return (sc); /* Always returns with locked sch. */
543 }
544
545 /*
546 * This function is called when we get a RST for a
547 * non-existent connection, so that we can see if the
548 * connection is in the syn cache. If it is, zap it.
549 * If required send a challenge ACK.
550 */
551 void
552 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th, struct mbuf *m)
553 {
554 struct syncache *sc;
555 struct syncache_head *sch;
556 char *s = NULL;
557
558 sc = syncache_lookup(inc, &sch); /* returns locked sch */
559 SCH_LOCK_ASSERT(sch);
560
561 /*
562 * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags.
563 * See RFC 793 page 65, section SEGMENT ARRIVES.
564 */
565 if (th->th_flags & (TH_ACK|TH_SYN|TH_FIN)) {
566 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
567 log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or "
568 "FIN flag set, segment ignored\n", s, __func__);
569 TCPSTAT_INC(tcps_badrst);
570 goto done;
571 }
572
573 /*
574 * No corresponding connection was found in syncache.
575 * If syncookies are enabled and possibly exclusively
576 * used, or we are under memory pressure, a valid RST
577 * may not find a syncache entry. In that case we're
578 * done and no SYN|ACK retransmissions will happen.
579 * Otherwise the RST was misdirected or spoofed.
580 */
581 if (sc == NULL) {
582 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
583 log(LOG_DEBUG, "%s; %s: Spurious RST without matching "
584 "syncache entry (possibly syncookie only), "
585 "segment ignored\n", s, __func__);
586 TCPSTAT_INC(tcps_badrst);
587 goto done;
588 }
589
590 /*
591 * If the RST bit is set, check the sequence number to see
592 * if this is a valid reset segment.
593 *
594 * RFC 793 page 37:
595 * In all states except SYN-SENT, all reset (RST) segments
596 * are validated by checking their SEQ-fields. A reset is
597 * valid if its sequence number is in the window.
598 *
599 * RFC 793 page 69:
600 * There are four cases for the acceptability test for an incoming
601 * segment:
602 *
603 * Segment Receive Test
604 * Length Window
605 * ------- ------- -------------------------------------------
606 * 0 0 SEG.SEQ = RCV.NXT
607 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
608 * >0 0 not acceptable
609 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
610 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
611 *
612 * Note that when receiving a SYN segment in the LISTEN state,
613 * IRS is set to SEG.SEQ and RCV.NXT is set to SEG.SEQ+1, as
614 * described in RFC 793, page 66.
615 */
616 if ((SEQ_GEQ(th->th_seq, sc->sc_irs + 1) &&
617 SEQ_LT(th->th_seq, sc->sc_irs + 1 + sc->sc_wnd)) ||
618 (sc->sc_wnd == 0 && th->th_seq == sc->sc_irs + 1)) {
619 if (V_tcp_insecure_rst ||
620 th->th_seq == sc->sc_irs + 1) {
621 syncache_drop(sc, sch);
622 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
623 log(LOG_DEBUG,
624 "%s; %s: Our SYN|ACK was rejected, "
625 "connection attempt aborted by remote "
626 "endpoint\n",
627 s, __func__);
628 TCPSTAT_INC(tcps_sc_reset);
629 } else {
630 TCPSTAT_INC(tcps_badrst);
631 /* Send challenge ACK. */
632 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
633 log(LOG_DEBUG, "%s; %s: RST with invalid "
634 " SEQ %u != NXT %u (+WND %u), "
635 "sending challenge ACK\n",
636 s, __func__,
637 th->th_seq, sc->sc_irs + 1, sc->sc_wnd);
638 syncache_respond(sc, sch, m, TH_ACK);
639 }
640 } else {
641 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
642 log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != "
643 "NXT %u (+WND %u), segment ignored\n",
644 s, __func__,
645 th->th_seq, sc->sc_irs + 1, sc->sc_wnd);
646 TCPSTAT_INC(tcps_badrst);
647 }
648
649 done:
650 if (s != NULL)
651 free(s, M_TCPLOG);
652 SCH_UNLOCK(sch);
653 }
654
655 void
656 syncache_badack(struct in_conninfo *inc)
657 {
658 struct syncache *sc;
659 struct syncache_head *sch;
660
661 sc = syncache_lookup(inc, &sch); /* returns locked sch */
662 SCH_LOCK_ASSERT(sch);
663 if (sc != NULL) {
664 syncache_drop(sc, sch);
665 TCPSTAT_INC(tcps_sc_badack);
666 }
667 SCH_UNLOCK(sch);
668 }
669
670 void
671 syncache_unreach(struct in_conninfo *inc, tcp_seq th_seq)
672 {
673 struct syncache *sc;
674 struct syncache_head *sch;
675
676 sc = syncache_lookup(inc, &sch); /* returns locked sch */
677 SCH_LOCK_ASSERT(sch);
678 if (sc == NULL)
679 goto done;
680
681 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
682 if (ntohl(th_seq) != sc->sc_iss)
683 goto done;
684
685 /*
686 * If we've rertransmitted 3 times and this is our second error,
687 * we remove the entry. Otherwise, we allow it to continue on.
688 * This prevents us from incorrectly nuking an entry during a
689 * spurious network outage.
690 *
691 * See tcp_notify().
692 */
693 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) {
694 sc->sc_flags |= SCF_UNREACH;
695 goto done;
696 }
697 syncache_drop(sc, sch);
698 TCPSTAT_INC(tcps_sc_unreach);
699 done:
700 SCH_UNLOCK(sch);
701 }
702
703 /*
704 * Build a new TCP socket structure from a syncache entry.
705 *
706 * On success return the newly created socket with its underlying inp locked.
707 */
708 static struct socket *
709 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
710 {
711 struct tcp_function_block *blk;
712 struct inpcb *inp = NULL;
713 struct socket *so;
714 struct tcpcb *tp;
715 int error;
716 char *s;
717
718 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
719
720 /*
721 * Ok, create the full blown connection, and set things up
722 * as they would have been set up if we had created the
723 * connection when the SYN arrived. If we can't create
724 * the connection, abort it.
725 */
726 so = sonewconn(lso, 0);
727 if (so == NULL) {
728 /*
729 * Drop the connection; we will either send a RST or
730 * have the peer retransmit its SYN again after its
731 * RTO and try again.
732 */
733 TCPSTAT_INC(tcps_listendrop);
734 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
735 log(LOG_DEBUG, "%s; %s: Socket create failed "
736 "due to limits or memory shortage\n",
737 s, __func__);
738 free(s, M_TCPLOG);
739 }
740 goto abort2;
741 }
742 #ifdef MAC
743 mac_socketpeer_set_from_mbuf(m, so);
744 #endif
745
746 inp = sotoinpcb(so);
747 inp->inp_inc.inc_fibnum = so->so_fibnum;
748 INP_WLOCK(inp);
749 /*
750 * Exclusive pcbinfo lock is not required in syncache socket case even
751 * if two inpcb locks can be acquired simultaneously:
752 * - the inpcb in LISTEN state,
753 * - the newly created inp.
754 *
755 * In this case, an inp cannot be at same time in LISTEN state and
756 * just created by an accept() call.
757 */
758 INP_HASH_WLOCK(&V_tcbinfo);
759
760 /* Insert new socket into PCB hash list. */
761 inp->inp_inc.inc_flags = sc->sc_inc.inc_flags;
762 #ifdef INET6
763 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
764 inp->inp_vflag &= ~INP_IPV4;
765 inp->inp_vflag |= INP_IPV6;
766 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
767 } else {
768 inp->inp_vflag &= ~INP_IPV6;
769 inp->inp_vflag |= INP_IPV4;
770 #endif
771 inp->inp_ip_ttl = sc->sc_ip_ttl;
772 inp->inp_ip_tos = sc->sc_ip_tos;
773 inp->inp_laddr = sc->sc_inc.inc_laddr;
774 #ifdef INET6
775 }
776 #endif
777
778 /*
779 * If there's an mbuf and it has a flowid, then let's initialise the
780 * inp with that particular flowid.
781 */
782 if (m != NULL && M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
783 inp->inp_flowid = m->m_pkthdr.flowid;
784 inp->inp_flowtype = M_HASHTYPE_GET(m);
785 }
786
787 inp->inp_lport = sc->sc_inc.inc_lport;
788 #ifdef INET6
789 if (inp->inp_vflag & INP_IPV6PROTO) {
790 struct inpcb *oinp = sotoinpcb(lso);
791
792 /*
793 * Inherit socket options from the listening socket.
794 * Note that in6p_inputopts are not (and should not be)
795 * copied, since it stores previously received options and is
796 * used to detect if each new option is different than the
797 * previous one and hence should be passed to a user.
798 * If we copied in6p_inputopts, a user would not be able to
799 * receive options just after calling the accept system call.
800 */
801 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
802 if (oinp->in6p_outputopts)
803 inp->in6p_outputopts =
804 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
805 inp->in6p_hops = oinp->in6p_hops;
806 }
807
808 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
809 struct in6_addr laddr6;
810 struct sockaddr_in6 sin6;
811
812 sin6.sin6_family = AF_INET6;
813 sin6.sin6_len = sizeof(sin6);
814 sin6.sin6_addr = sc->sc_inc.inc6_faddr;
815 sin6.sin6_port = sc->sc_inc.inc_fport;
816 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
817 laddr6 = inp->in6p_laddr;
818 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
819 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
820 if ((error = in6_pcbconnect_mbuf(inp, (struct sockaddr *)&sin6,
821 thread0.td_ucred, m, false)) != 0) {
822 inp->in6p_laddr = laddr6;
823 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
824 log(LOG_DEBUG, "%s; %s: in6_pcbconnect failed "
825 "with error %i\n",
826 s, __func__, error);
827 free(s, M_TCPLOG);
828 }
829 INP_HASH_WUNLOCK(&V_tcbinfo);
830 goto abort;
831 }
832 /* Override flowlabel from in6_pcbconnect. */
833 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
834 inp->inp_flow |= sc->sc_flowlabel;
835 }
836 #endif /* INET6 */
837 #if defined(INET) && defined(INET6)
838 else
839 #endif
840 #ifdef INET
841 {
842 struct in_addr laddr;
843 struct sockaddr_in sin;
844
845 inp->inp_options = (m) ? ip_srcroute(m) : NULL;
846
847 if (inp->inp_options == NULL) {
848 inp->inp_options = sc->sc_ipopts;
849 sc->sc_ipopts = NULL;
850 }
851
852 sin.sin_family = AF_INET;
853 sin.sin_len = sizeof(sin);
854 sin.sin_addr = sc->sc_inc.inc_faddr;
855 sin.sin_port = sc->sc_inc.inc_fport;
856 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
857 laddr = inp->inp_laddr;
858 if (inp->inp_laddr.s_addr == INADDR_ANY)
859 inp->inp_laddr = sc->sc_inc.inc_laddr;
860 if ((error = in_pcbconnect_mbuf(inp, (struct sockaddr *)&sin,
861 thread0.td_ucred, m, false)) != 0) {
862 inp->inp_laddr = laddr;
863 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
864 log(LOG_DEBUG, "%s; %s: in_pcbconnect failed "
865 "with error %i\n",
866 s, __func__, error);
867 free(s, M_TCPLOG);
868 }
869 INP_HASH_WUNLOCK(&V_tcbinfo);
870 goto abort;
871 }
872 }
873 #endif /* INET */
874 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
875 /* Copy old policy into new socket's. */
876 if (ipsec_copy_pcbpolicy(sotoinpcb(lso), inp) != 0)
877 printf("syncache_socket: could not copy policy\n");
878 #endif
879 INP_HASH_WUNLOCK(&V_tcbinfo);
880 tp = intotcpcb(inp);
881 tcp_state_change(tp, TCPS_SYN_RECEIVED);
882 tp->iss = sc->sc_iss;
883 tp->irs = sc->sc_irs;
884 tcp_rcvseqinit(tp);
885 tcp_sendseqinit(tp);
886 blk = sototcpcb(lso)->t_fb;
887 if (V_functions_inherit_listen_socket_stack && blk != tp->t_fb) {
888 /*
889 * Our parents t_fb was not the default,
890 * we need to release our ref on tp->t_fb and
891 * pickup one on the new entry.
892 */
893 struct tcp_function_block *rblk;
894
895 rblk = find_and_ref_tcp_fb(blk);
896 KASSERT(rblk != NULL,
897 ("cannot find blk %p out of syncache?", blk));
898 if (tp->t_fb->tfb_tcp_fb_fini)
899 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
900 refcount_release(&tp->t_fb->tfb_refcnt);
901 tp->t_fb = rblk;
902 /*
903 * XXXrrs this is quite dangerous, it is possible
904 * for the new function to fail to init. We also
905 * are not asking if the handoff_is_ok though at
906 * the very start thats probalbly ok.
907 */
908 if (tp->t_fb->tfb_tcp_fb_init) {
909 (*tp->t_fb->tfb_tcp_fb_init)(tp);
910 }
911 }
912 tp->snd_wl1 = sc->sc_irs;
913 tp->snd_max = tp->iss + 1;
914 tp->snd_nxt = tp->iss + 1;
915 tp->rcv_up = sc->sc_irs + 1;
916 tp->rcv_wnd = sc->sc_wnd;
917 tp->rcv_adv += tp->rcv_wnd;
918 tp->last_ack_sent = tp->rcv_nxt;
919
920 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
921 if (sc->sc_flags & SCF_NOOPT)
922 tp->t_flags |= TF_NOOPT;
923 else {
924 if (sc->sc_flags & SCF_WINSCALE) {
925 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
926 tp->snd_scale = sc->sc_requested_s_scale;
927 tp->request_r_scale = sc->sc_requested_r_scale;
928 }
929 if (sc->sc_flags & SCF_TIMESTAMP) {
930 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
931 tp->ts_recent = sc->sc_tsreflect;
932 tp->ts_recent_age = tcp_ts_getticks();
933 tp->ts_offset = sc->sc_tsoff;
934 }
935 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
936 if (sc->sc_flags & SCF_SIGNATURE)
937 tp->t_flags |= TF_SIGNATURE;
938 #endif
939 if (sc->sc_flags & SCF_SACK)
940 tp->t_flags |= TF_SACK_PERMIT;
941 }
942
943 if (sc->sc_flags & SCF_ECN)
944 tp->t_flags |= TF_ECN_PERMIT;
945
946 /*
947 * Set up MSS and get cached values from tcp_hostcache.
948 * This might overwrite some of the defaults we just set.
949 */
950 tcp_mss(tp, sc->sc_peer_mss);
951
952 /*
953 * If the SYN,ACK was retransmitted, indicate that CWND to be
954 * limited to one segment in cc_conn_init().
955 * NB: sc_rxmits counts all SYN,ACK transmits, not just retransmits.
956 */
957 if (sc->sc_rxmits > 1)
958 tp->snd_cwnd = 1;
959
960 #ifdef TCP_OFFLOAD
961 /*
962 * Allow a TOE driver to install its hooks. Note that we hold the
963 * pcbinfo lock too and that prevents tcp_usr_accept from accepting a
964 * new connection before the TOE driver has done its thing.
965 */
966 if (ADDED_BY_TOE(sc)) {
967 struct toedev *tod = sc->sc_tod;
968
969 tod->tod_offload_socket(tod, sc->sc_todctx, so);
970 }
971 #endif
972 /*
973 * Copy and activate timers.
974 */
975 tp->t_keepinit = sototcpcb(lso)->t_keepinit;
976 tp->t_keepidle = sototcpcb(lso)->t_keepidle;
977 tp->t_keepintvl = sototcpcb(lso)->t_keepintvl;
978 tp->t_keepcnt = sototcpcb(lso)->t_keepcnt;
979 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
980
981 TCPSTAT_INC(tcps_accepts);
982 return (so);
983
984 abort:
985 INP_WUNLOCK(inp);
986 abort2:
987 if (so != NULL)
988 soabort(so);
989 return (NULL);
990 }
991
992 /*
993 * This function gets called when we receive an ACK for a
994 * socket in the LISTEN state. We look up the connection
995 * in the syncache, and if its there, we pull it out of
996 * the cache and turn it into a full-blown connection in
997 * the SYN-RECEIVED state.
998 *
999 * On syncache_socket() success the newly created socket
1000 * has its underlying inp locked.
1001 */
1002 int
1003 syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
1004 struct socket **lsop, struct mbuf *m)
1005 {
1006 struct syncache *sc;
1007 struct syncache_head *sch;
1008 struct syncache scs;
1009 char *s;
1010
1011 /*
1012 * Global TCP locks are held because we manipulate the PCB lists
1013 * and create a new socket.
1014 */
1015 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1016 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK,
1017 ("%s: can handle only ACK", __func__));
1018
1019 sc = syncache_lookup(inc, &sch); /* returns locked sch */
1020 SCH_LOCK_ASSERT(sch);
1021
1022 #ifdef INVARIANTS
1023 /*
1024 * Test code for syncookies comparing the syncache stored
1025 * values with the reconstructed values from the cookie.
1026 */
1027 if (sc != NULL)
1028 syncookie_cmp(inc, sch, sc, th, to, *lsop);
1029 #endif
1030
1031 if (sc == NULL) {
1032 /*
1033 * There is no syncache entry, so see if this ACK is
1034 * a returning syncookie. To do this, first:
1035 * A. Check if syncookies are used in case of syncache
1036 * overflows
1037 * B. See if this socket has had a syncache entry dropped in
1038 * the recent past. We don't want to accept a bogus
1039 * syncookie if we've never received a SYN or accept it
1040 * twice.
1041 * C. check that the syncookie is valid. If it is, then
1042 * cobble up a fake syncache entry, and return.
1043 */
1044 if (!V_tcp_syncookies) {
1045 SCH_UNLOCK(sch);
1046 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1047 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
1048 "segment rejected (syncookies disabled)\n",
1049 s, __func__);
1050 goto failed;
1051 }
1052 if (!V_tcp_syncookiesonly &&
1053 sch->sch_last_overflow < time_uptime - SYNCOOKIE_LIFETIME) {
1054 SCH_UNLOCK(sch);
1055 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1056 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
1057 "segment rejected (no syncache entry)\n",
1058 s, __func__);
1059 goto failed;
1060 }
1061 bzero(&scs, sizeof(scs));
1062 sc = syncookie_lookup(inc, sch, &scs, th, to, *lsop);
1063 SCH_UNLOCK(sch);
1064 if (sc == NULL) {
1065 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1066 log(LOG_DEBUG, "%s; %s: Segment failed "
1067 "SYNCOOKIE authentication, segment rejected "
1068 "(probably spoofed)\n", s, __func__);
1069 goto failed;
1070 }
1071 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1072 /* If received ACK has MD5 signature, check it. */
1073 if ((to->to_flags & TOF_SIGNATURE) != 0 &&
1074 (!TCPMD5_ENABLED() ||
1075 TCPMD5_INPUT(m, th, to->to_signature) != 0)) {
1076 /* Drop the ACK. */
1077 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1078 log(LOG_DEBUG, "%s; %s: Segment rejected, "
1079 "MD5 signature doesn't match.\n",
1080 s, __func__);
1081 free(s, M_TCPLOG);
1082 }
1083 TCPSTAT_INC(tcps_sig_err_sigopt);
1084 return (-1); /* Do not send RST */
1085 }
1086 #endif /* TCP_SIGNATURE */
1087 } else {
1088 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1089 /*
1090 * If listening socket requested TCP digests, check that
1091 * received ACK has signature and it is correct.
1092 * If not, drop the ACK and leave sc entry in th cache,
1093 * because SYN was received with correct signature.
1094 */
1095 if (sc->sc_flags & SCF_SIGNATURE) {
1096 if ((to->to_flags & TOF_SIGNATURE) == 0) {
1097 /* No signature */
1098 TCPSTAT_INC(tcps_sig_err_nosigopt);
1099 SCH_UNLOCK(sch);
1100 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1101 log(LOG_DEBUG, "%s; %s: Segment "
1102 "rejected, MD5 signature wasn't "
1103 "provided.\n", s, __func__);
1104 free(s, M_TCPLOG);
1105 }
1106 return (-1); /* Do not send RST */
1107 }
1108 if (!TCPMD5_ENABLED() ||
1109 TCPMD5_INPUT(m, th, to->to_signature) != 0) {
1110 /* Doesn't match or no SA */
1111 SCH_UNLOCK(sch);
1112 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1113 log(LOG_DEBUG, "%s; %s: Segment "
1114 "rejected, MD5 signature doesn't "
1115 "match.\n", s, __func__);
1116 free(s, M_TCPLOG);
1117 }
1118 return (-1); /* Do not send RST */
1119 }
1120 }
1121 #endif /* TCP_SIGNATURE */
1122
1123 /*
1124 * RFC 7323 PAWS: If we have a timestamp on this segment and
1125 * it's less than ts_recent, drop it.
1126 * XXXMT: RFC 7323 also requires to send an ACK.
1127 * In tcp_input.c this is only done for TCP segments
1128 * with user data, so be consistent here and just drop
1129 * the segment.
1130 */
1131 if (sc->sc_flags & SCF_TIMESTAMP && to->to_flags & TOF_TS &&
1132 TSTMP_LT(to->to_tsval, sc->sc_tsreflect)) {
1133 SCH_UNLOCK(sch);
1134 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1135 log(LOG_DEBUG,
1136 "%s; %s: SEG.TSval %u < TS.Recent %u, "
1137 "segment dropped\n", s, __func__,
1138 to->to_tsval, sc->sc_tsreflect);
1139 free(s, M_TCPLOG);
1140 }
1141 return (-1); /* Do not send RST */
1142 }
1143
1144 /*
1145 * If timestamps were not negotiated during SYN/ACK and a
1146 * segment with a timestamp is received, ignore the
1147 * timestamp and process the packet normally.
1148 * See section 3.2 of RFC 7323.
1149 */
1150 if (!(sc->sc_flags & SCF_TIMESTAMP) &&
1151 (to->to_flags & TOF_TS)) {
1152 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1153 log(LOG_DEBUG, "%s; %s: Timestamp not "
1154 "expected, segment processed normally\n",
1155 s, __func__);
1156 free(s, M_TCPLOG);
1157 s = NULL;
1158 }
1159 }
1160
1161 /*
1162 * If timestamps were negotiated during SYN/ACK and a
1163 * segment without a timestamp is received, silently drop
1164 * the segment, unless the missing timestamps are tolerated.
1165 * See section 3.2 of RFC 7323.
1166 */
1167 if ((sc->sc_flags & SCF_TIMESTAMP) &&
1168 !(to->to_flags & TOF_TS)) {
1169 if (V_tcp_tolerate_missing_ts) {
1170 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1171 log(LOG_DEBUG,
1172 "%s; %s: Timestamp missing, "
1173 "segment processed normally\n",
1174 s, __func__);
1175 free(s, M_TCPLOG);
1176 }
1177 } else {
1178 SCH_UNLOCK(sch);
1179 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1180 log(LOG_DEBUG,
1181 "%s; %s: Timestamp missing, "
1182 "segment silently dropped\n",
1183 s, __func__);
1184 free(s, M_TCPLOG);
1185 }
1186 return (-1); /* Do not send RST */
1187 }
1188 }
1189
1190 /*
1191 * Pull out the entry to unlock the bucket row.
1192 *
1193 * NOTE: We must decrease TCPS_SYN_RECEIVED count here, not
1194 * tcp_state_change(). The tcpcb is not existent at this
1195 * moment. A new one will be allocated via syncache_socket->
1196 * sonewconn->tcp_usr_attach in TCPS_CLOSED state, then
1197 * syncache_socket() will change it to TCPS_SYN_RECEIVED.
1198 */
1199 TCPSTATES_DEC(TCPS_SYN_RECEIVED);
1200 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
1201 sch->sch_length--;
1202 #ifdef TCP_OFFLOAD
1203 if (ADDED_BY_TOE(sc)) {
1204 struct toedev *tod = sc->sc_tod;
1205
1206 tod->tod_syncache_removed(tod, sc->sc_todctx);
1207 }
1208 #endif
1209 SCH_UNLOCK(sch);
1210 }
1211
1212 /*
1213 * Segment validation:
1214 * ACK must match our initial sequence number + 1 (the SYN|ACK).
1215 */
1216 if (th->th_ack != sc->sc_iss + 1) {
1217 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1218 log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment "
1219 "rejected\n", s, __func__, th->th_ack, sc->sc_iss);
1220 goto failed;
1221 }
1222
1223 /*
1224 * The SEQ must fall in the window starting at the received
1225 * initial receive sequence number + 1 (the SYN).
1226 */
1227 if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
1228 SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
1229 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1230 log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment "
1231 "rejected\n", s, __func__, th->th_seq, sc->sc_irs);
1232 goto failed;
1233 }
1234
1235 *lsop = syncache_socket(sc, *lsop, m);
1236
1237 if (*lsop == NULL)
1238 TCPSTAT_INC(tcps_sc_aborted);
1239 else
1240 TCPSTAT_INC(tcps_sc_completed);
1241
1242 /* how do we find the inp for the new socket? */
1243 if (sc != &scs)
1244 syncache_free(sc);
1245 return (1);
1246 failed:
1247 if (sc != NULL && sc != &scs)
1248 syncache_free(sc);
1249 if (s != NULL)
1250 free(s, M_TCPLOG);
1251 *lsop = NULL;
1252 return (0);
1253 }
1254
1255 static void
1256 syncache_tfo_expand(struct syncache *sc, struct socket **lsop, struct mbuf *m,
1257 uint64_t response_cookie)
1258 {
1259 struct inpcb *inp;
1260 struct tcpcb *tp;
1261 unsigned int *pending_counter;
1262
1263 /*
1264 * Global TCP locks are held because we manipulate the PCB lists
1265 * and create a new socket.
1266 */
1267 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1268
1269 pending_counter = intotcpcb(sotoinpcb(*lsop))->t_tfo_pending;
1270 *lsop = syncache_socket(sc, *lsop, m);
1271 if (*lsop == NULL) {
1272 TCPSTAT_INC(tcps_sc_aborted);
1273 atomic_subtract_int(pending_counter, 1);
1274 } else {
1275 soisconnected(*lsop);
1276 inp = sotoinpcb(*lsop);
1277 tp = intotcpcb(inp);
1278 tp->t_flags |= TF_FASTOPEN;
1279 tp->t_tfo_cookie.server = response_cookie;
1280 tp->snd_max = tp->iss;
1281 tp->snd_nxt = tp->iss;
1282 tp->t_tfo_pending = pending_counter;
1283 TCPSTAT_INC(tcps_sc_completed);
1284 }
1285 }
1286
1287 /*
1288 * Given a LISTEN socket and an inbound SYN request, add
1289 * this to the syn cache, and send back a segment:
1290 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
1291 * to the source.
1292 *
1293 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
1294 * Doing so would require that we hold onto the data and deliver it
1295 * to the application. However, if we are the target of a SYN-flood
1296 * DoS attack, an attacker could send data which would eventually
1297 * consume all available buffer space if it were ACKed. By not ACKing
1298 * the data, we avoid this DoS scenario.
1299 *
1300 * The exception to the above is when a SYN with a valid TCP Fast Open (TFO)
1301 * cookie is processed and a new socket is created. In this case, any data
1302 * accompanying the SYN will be queued to the socket by tcp_input() and will
1303 * be ACKed either when the application sends response data or the delayed
1304 * ACK timer expires, whichever comes first.
1305 */
1306 int
1307 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
1308 struct inpcb *inp, struct socket **lsop, struct mbuf *m, void *tod,
1309 void *todctx, uint8_t iptos)
1310 {
1311 struct tcpcb *tp;
1312 struct socket *so;
1313 struct syncache *sc = NULL;
1314 struct syncache_head *sch;
1315 struct mbuf *ipopts = NULL;
1316 u_int ltflags;
1317 int win, ip_ttl, ip_tos;
1318 char *s;
1319 int rv = 0;
1320 #ifdef INET6
1321 int autoflowlabel = 0;
1322 #endif
1323 #ifdef MAC
1324 struct label *maclabel;
1325 #endif
1326 struct syncache scs;
1327 struct ucred *cred;
1328 uint64_t tfo_response_cookie;
1329 unsigned int *tfo_pending = NULL;
1330 int tfo_cookie_valid = 0;
1331 int tfo_response_cookie_valid = 0;
1332
1333 INP_WLOCK_ASSERT(inp); /* listen socket */
1334 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN,
1335 ("%s: unexpected tcp flags", __func__));
1336
1337 /*
1338 * Combine all so/tp operations very early to drop the INP lock as
1339 * soon as possible.
1340 */
1341 so = *lsop;
1342 KASSERT(SOLISTENING(so), ("%s: %p not listening", __func__, so));
1343 tp = sototcpcb(so);
1344 cred = crhold(so->so_cred);
1345
1346 #ifdef INET6
1347 if (inc->inc_flags & INC_ISIPV6) {
1348 if (inp->inp_flags & IN6P_AUTOFLOWLABEL) {
1349 autoflowlabel = 1;
1350 }
1351 ip_ttl = in6_selecthlim(inp, NULL);
1352 if ((inp->in6p_outputopts == NULL) ||
1353 (inp->in6p_outputopts->ip6po_tclass == -1)) {
1354 ip_tos = 0;
1355 } else {
1356 ip_tos = inp->in6p_outputopts->ip6po_tclass;
1357 }
1358 }
1359 #endif
1360 #if defined(INET6) && defined(INET)
1361 else
1362 #endif
1363 #ifdef INET
1364 {
1365 ip_ttl = inp->inp_ip_ttl;
1366 ip_tos = inp->inp_ip_tos;
1367 }
1368 #endif
1369 win = so->sol_sbrcv_hiwat;
1370 ltflags = (tp->t_flags & (TF_NOOPT | TF_SIGNATURE));
1371
1372 if (V_tcp_fastopen_server_enable && IS_FASTOPEN(tp->t_flags) &&
1373 (tp->t_tfo_pending != NULL) &&
1374 (to->to_flags & TOF_FASTOPEN)) {
1375 /*
1376 * Limit the number of pending TFO connections to
1377 * approximately half of the queue limit. This prevents TFO
1378 * SYN floods from starving the service by filling the
1379 * listen queue with bogus TFO connections.
1380 */
1381 if (atomic_fetchadd_int(tp->t_tfo_pending, 1) <=
1382 (so->sol_qlimit / 2)) {
1383 int result;
1384
1385 result = tcp_fastopen_check_cookie(inc,
1386 to->to_tfo_cookie, to->to_tfo_len,
1387 &tfo_response_cookie);
1388 tfo_cookie_valid = (result > 0);
1389 tfo_response_cookie_valid = (result >= 0);
1390 }
1391
1392 /*
1393 * Remember the TFO pending counter as it will have to be
1394 * decremented below if we don't make it to syncache_tfo_expand().
1395 */
1396 tfo_pending = tp->t_tfo_pending;
1397 }
1398
1399 /* By the time we drop the lock these should no longer be used. */
1400 so = NULL;
1401 tp = NULL;
1402
1403 #ifdef MAC
1404 if (mac_syncache_init(&maclabel) != 0) {
1405 INP_WUNLOCK(inp);
1406 goto done;
1407 } else
1408 mac_syncache_create(maclabel, inp);
1409 #endif
1410 if (!tfo_cookie_valid)
1411 INP_WUNLOCK(inp);
1412
1413 /*
1414 * Remember the IP options, if any.
1415 */
1416 #ifdef INET6
1417 if (!(inc->inc_flags & INC_ISIPV6))
1418 #endif
1419 #ifdef INET
1420 ipopts = (m) ? ip_srcroute(m) : NULL;
1421 #else
1422 ipopts = NULL;
1423 #endif
1424
1425 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1426 /*
1427 * If listening socket requested TCP digests, check that received
1428 * SYN has signature and it is correct. If signature doesn't match
1429 * or TCP_SIGNATURE support isn't enabled, drop the packet.
1430 */
1431 if (ltflags & TF_SIGNATURE) {
1432 if ((to->to_flags & TOF_SIGNATURE) == 0) {
1433 TCPSTAT_INC(tcps_sig_err_nosigopt);
1434 goto done;
1435 }
1436 if (!TCPMD5_ENABLED() ||
1437 TCPMD5_INPUT(m, th, to->to_signature) != 0)
1438 goto done;
1439 }
1440 #endif /* TCP_SIGNATURE */
1441 /*
1442 * See if we already have an entry for this connection.
1443 * If we do, resend the SYN,ACK, and reset the retransmit timer.
1444 *
1445 * XXX: should the syncache be re-initialized with the contents
1446 * of the new SYN here (which may have different options?)
1447 *
1448 * XXX: We do not check the sequence number to see if this is a
1449 * real retransmit or a new connection attempt. The question is
1450 * how to handle such a case; either ignore it as spoofed, or
1451 * drop the current entry and create a new one?
1452 */
1453 sc = syncache_lookup(inc, &sch); /* returns locked entry */
1454 SCH_LOCK_ASSERT(sch);
1455 if (sc != NULL) {
1456 if (tfo_cookie_valid)
1457 INP_WUNLOCK(inp);
1458 TCPSTAT_INC(tcps_sc_dupsyn);
1459 if (ipopts) {
1460 /*
1461 * If we were remembering a previous source route,
1462 * forget it and use the new one we've been given.
1463 */
1464 if (sc->sc_ipopts)
1465 (void) m_free(sc->sc_ipopts);
1466 sc->sc_ipopts = ipopts;
1467 }
1468 /*
1469 * Update timestamp if present.
1470 */
1471 if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS))
1472 sc->sc_tsreflect = to->to_tsval;
1473 else
1474 sc->sc_flags &= ~SCF_TIMESTAMP;
1475 /*
1476 * Disable ECN if needed.
1477 */
1478 if ((sc->sc_flags & SCF_ECN) &&
1479 ((th->th_flags & (TH_ECE|TH_CWR)) != (TH_ECE|TH_CWR))) {
1480 sc->sc_flags &= ~SCF_ECN;
1481 }
1482 #ifdef MAC
1483 /*
1484 * Since we have already unconditionally allocated label
1485 * storage, free it up. The syncache entry will already
1486 * have an initialized label we can use.
1487 */
1488 mac_syncache_destroy(&maclabel);
1489 #endif
1490 TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
1491 /* Retransmit SYN|ACK and reset retransmit count. */
1492 if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) {
1493 log(LOG_DEBUG, "%s; %s: Received duplicate SYN, "
1494 "resetting timer and retransmitting SYN|ACK\n",
1495 s, __func__);
1496 free(s, M_TCPLOG);
1497 }
1498 if (syncache_respond(sc, sch, m, TH_SYN|TH_ACK) == 0) {
1499 sc->sc_rxmits = 0;
1500 syncache_timeout(sc, sch, 1);
1501 TCPSTAT_INC(tcps_sndacks);
1502 TCPSTAT_INC(tcps_sndtotal);
1503 }
1504 SCH_UNLOCK(sch);
1505 goto donenoprobe;
1506 }
1507
1508 if (tfo_cookie_valid) {
1509 bzero(&scs, sizeof(scs));
1510 sc = &scs;
1511 goto skip_alloc;
1512 }
1513
1514 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1515 if (sc == NULL) {
1516 /*
1517 * The zone allocator couldn't provide more entries.
1518 * Treat this as if the cache was full; drop the oldest
1519 * entry and insert the new one.
1520 */
1521 TCPSTAT_INC(tcps_sc_zonefail);
1522 if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL) {
1523 sch->sch_last_overflow = time_uptime;
1524 syncache_drop(sc, sch);
1525 }
1526 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1527 if (sc == NULL) {
1528 if (V_tcp_syncookies) {
1529 bzero(&scs, sizeof(scs));
1530 sc = &scs;
1531 } else {
1532 SCH_UNLOCK(sch);
1533 if (ipopts)
1534 (void) m_free(ipopts);
1535 goto done;
1536 }
1537 }
1538 }
1539
1540 skip_alloc:
1541 if (!tfo_cookie_valid && tfo_response_cookie_valid)
1542 sc->sc_tfo_cookie = &tfo_response_cookie;
1543
1544 /*
1545 * Fill in the syncache values.
1546 */
1547 #ifdef MAC
1548 sc->sc_label = maclabel;
1549 #endif
1550 sc->sc_cred = cred;
1551 cred = NULL;
1552 sc->sc_ipopts = ipopts;
1553 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
1554 sc->sc_ip_tos = ip_tos;
1555 sc->sc_ip_ttl = ip_ttl;
1556 #ifdef TCP_OFFLOAD
1557 sc->sc_tod = tod;
1558 sc->sc_todctx = todctx;
1559 #endif
1560 sc->sc_irs = th->th_seq;
1561 sc->sc_iss = arc4random();
1562 sc->sc_flags = 0;
1563 sc->sc_flowlabel = 0;
1564
1565 /*
1566 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN].
1567 * win was derived from socket earlier in the function.
1568 */
1569 win = imax(win, 0);
1570 win = imin(win, TCP_MAXWIN);
1571 sc->sc_wnd = win;
1572
1573 if (V_tcp_do_rfc1323) {
1574 /*
1575 * A timestamp received in a SYN makes
1576 * it ok to send timestamp requests and replies.
1577 */
1578 if (to->to_flags & TOF_TS) {
1579 sc->sc_tsreflect = to->to_tsval;
1580 sc->sc_flags |= SCF_TIMESTAMP;
1581 sc->sc_tsoff = tcp_new_ts_offset(inc);
1582 }
1583 if (to->to_flags & TOF_SCALE) {
1584 int wscale = 0;
1585
1586 /*
1587 * Pick the smallest possible scaling factor that
1588 * will still allow us to scale up to sb_max, aka
1589 * kern.ipc.maxsockbuf.
1590 *
1591 * We do this because there are broken firewalls that
1592 * will corrupt the window scale option, leading to
1593 * the other endpoint believing that our advertised
1594 * window is unscaled. At scale factors larger than
1595 * 5 the unscaled window will drop below 1500 bytes,
1596 * leading to serious problems when traversing these
1597 * broken firewalls.
1598 *
1599 * With the default maxsockbuf of 256K, a scale factor
1600 * of 3 will be chosen by this algorithm. Those who
1601 * choose a larger maxsockbuf should watch out
1602 * for the compatibility problems mentioned above.
1603 *
1604 * RFC1323: The Window field in a SYN (i.e., a <SYN>
1605 * or <SYN,ACK>) segment itself is never scaled.
1606 */
1607 while (wscale < TCP_MAX_WINSHIFT &&
1608 (TCP_MAXWIN << wscale) < sb_max)
1609 wscale++;
1610 sc->sc_requested_r_scale = wscale;
1611 sc->sc_requested_s_scale = to->to_wscale;
1612 sc->sc_flags |= SCF_WINSCALE;
1613 }
1614 }
1615 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1616 /*
1617 * If listening socket requested TCP digests, flag this in the
1618 * syncache so that syncache_respond() will do the right thing
1619 * with the SYN+ACK.
1620 */
1621 if (ltflags & TF_SIGNATURE)
1622 sc->sc_flags |= SCF_SIGNATURE;
1623 #endif /* TCP_SIGNATURE */
1624 if (to->to_flags & TOF_SACKPERM)
1625 sc->sc_flags |= SCF_SACK;
1626 if (to->to_flags & TOF_MSS)
1627 sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */
1628 if (ltflags & TF_NOOPT)
1629 sc->sc_flags |= SCF_NOOPT;
1630 if (((th->th_flags & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) &&
1631 V_tcp_do_ecn)
1632 sc->sc_flags |= SCF_ECN;
1633
1634 if (V_tcp_syncookies)
1635 sc->sc_iss = syncookie_generate(sch, sc);
1636 #ifdef INET6
1637 if (autoflowlabel) {
1638 if (V_tcp_syncookies)
1639 sc->sc_flowlabel = sc->sc_iss;
1640 else
1641 sc->sc_flowlabel = ip6_randomflowlabel();
1642 sc->sc_flowlabel = htonl(sc->sc_flowlabel) & IPV6_FLOWLABEL_MASK;
1643 }
1644 #endif
1645 SCH_UNLOCK(sch);
1646
1647 if (tfo_cookie_valid) {
1648 syncache_tfo_expand(sc, lsop, m, tfo_response_cookie);
1649 /* INP_WUNLOCK(inp) will be performed by the caller */
1650 rv = 1;
1651 goto tfo_expanded;
1652 }
1653
1654 TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
1655 /*
1656 * Do a standard 3-way handshake.
1657 */
1658 if (syncache_respond(sc, sch, m, TH_SYN|TH_ACK) == 0) {
1659 if (V_tcp_syncookies && V_tcp_syncookiesonly && sc != &scs)
1660 syncache_free(sc);
1661 else if (sc != &scs)
1662 syncache_insert(sc, sch); /* locks and unlocks sch */
1663 TCPSTAT_INC(tcps_sndacks);
1664 TCPSTAT_INC(tcps_sndtotal);
1665 } else {
1666 if (sc != &scs)
1667 syncache_free(sc);
1668 TCPSTAT_INC(tcps_sc_dropped);
1669 }
1670 goto donenoprobe;
1671
1672 done:
1673 TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
1674 donenoprobe:
1675 if (m) {
1676 *lsop = NULL;
1677 m_freem(m);
1678 }
1679 /*
1680 * If tfo_pending is not NULL here, then a TFO SYN that did not
1681 * result in a new socket was processed and the associated pending
1682 * counter has not yet been decremented. All such TFO processing paths
1683 * transit this point.
1684 */
1685 if (tfo_pending != NULL)
1686 tcp_fastopen_decrement_counter(tfo_pending);
1687
1688 tfo_expanded:
1689 if (cred != NULL)
1690 crfree(cred);
1691 #ifdef MAC
1692 if (sc == &scs)
1693 mac_syncache_destroy(&maclabel);
1694 #endif
1695 return (rv);
1696 }
1697
1698 /*
1699 * Send SYN|ACK or ACK to the peer. Either in response to a peer's segment,
1700 * i.e. m0 != NULL, or upon 3WHS ACK timeout, i.e. m0 == NULL.
1701 */
1702 static int
1703 syncache_respond(struct syncache *sc, struct syncache_head *sch,
1704 const struct mbuf *m0, int flags)
1705 {
1706 struct ip *ip = NULL;
1707 struct mbuf *m;
1708 struct tcphdr *th = NULL;
1709 int optlen, error = 0; /* Make compiler happy */
1710 u_int16_t hlen, tlen, mssopt;
1711 struct tcpopt to;
1712 #ifdef INET6
1713 struct ip6_hdr *ip6 = NULL;
1714 #endif
1715 hlen =
1716 #ifdef INET6
1717 (sc->sc_inc.inc_flags & INC_ISIPV6) ? sizeof(struct ip6_hdr) :
1718 #endif
1719 sizeof(struct ip);
1720 tlen = hlen + sizeof(struct tcphdr);
1721
1722 /* Determine MSS we advertize to other end of connection. */
1723 mssopt = max(tcp_mssopt(&sc->sc_inc), V_tcp_minmss);
1724
1725 /* XXX: Assume that the entire packet will fit in a header mbuf. */
1726 KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN,
1727 ("syncache: mbuf too small"));
1728
1729 /* Create the IP+TCP header from scratch. */
1730 m = m_gethdr(M_NOWAIT, MT_DATA);
1731 if (m == NULL)
1732 return (ENOBUFS);
1733 #ifdef MAC
1734 mac_syncache_create_mbuf(sc->sc_label, m);
1735 #endif
1736 m->m_data += max_linkhdr;
1737 m->m_len = tlen;
1738 m->m_pkthdr.len = tlen;
1739 m->m_pkthdr.rcvif = NULL;
1740
1741 #ifdef INET6
1742 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1743 ip6 = mtod(m, struct ip6_hdr *);
1744 ip6->ip6_vfc = IPV6_VERSION;
1745 ip6->ip6_nxt = IPPROTO_TCP;
1746 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1747 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1748 ip6->ip6_plen = htons(tlen - hlen);
1749 /* ip6_hlim is set after checksum */
1750 /* Zero out traffic class and flow label. */
1751 ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK;
1752 ip6->ip6_flow |= sc->sc_flowlabel;
1753 ip6->ip6_flow |= htonl(sc->sc_ip_tos << 20);
1754
1755 th = (struct tcphdr *)(ip6 + 1);
1756 }
1757 #endif
1758 #if defined(INET6) && defined(INET)
1759 else
1760 #endif
1761 #ifdef INET
1762 {
1763 ip = mtod(m, struct ip *);
1764 ip->ip_v = IPVERSION;
1765 ip->ip_hl = sizeof(struct ip) >> 2;
1766 ip->ip_len = htons(tlen);
1767 ip->ip_id = 0;
1768 ip->ip_off = 0;
1769 ip->ip_sum = 0;
1770 ip->ip_p = IPPROTO_TCP;
1771 ip->ip_src = sc->sc_inc.inc_laddr;
1772 ip->ip_dst = sc->sc_inc.inc_faddr;
1773 ip->ip_ttl = sc->sc_ip_ttl;
1774 ip->ip_tos = sc->sc_ip_tos;
1775
1776 /*
1777 * See if we should do MTU discovery. Route lookups are
1778 * expensive, so we will only unset the DF bit if:
1779 *
1780 * 1) path_mtu_discovery is disabled
1781 * 2) the SCF_UNREACH flag has been set
1782 */
1783 if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1784 ip->ip_off |= htons(IP_DF);
1785
1786 th = (struct tcphdr *)(ip + 1);
1787 }
1788 #endif /* INET */
1789 th->th_sport = sc->sc_inc.inc_lport;
1790 th->th_dport = sc->sc_inc.inc_fport;
1791
1792 if (flags & TH_SYN)
1793 th->th_seq = htonl(sc->sc_iss);
1794 else
1795 th->th_seq = htonl(sc->sc_iss + 1);
1796 th->th_ack = htonl(sc->sc_irs + 1);
1797 th->th_off = sizeof(struct tcphdr) >> 2;
1798 th->th_x2 = 0;
1799 th->th_flags = flags;
1800 th->th_win = htons(sc->sc_wnd);
1801 th->th_urp = 0;
1802
1803 if ((flags & TH_SYN) && (sc->sc_flags & SCF_ECN)) {
1804 th->th_flags |= TH_ECE;
1805 TCPSTAT_INC(tcps_ecn_shs);
1806 }
1807
1808 /* Tack on the TCP options. */
1809 if ((sc->sc_flags & SCF_NOOPT) == 0) {
1810 to.to_flags = 0;
1811
1812 if (flags & TH_SYN) {
1813 to.to_mss = mssopt;
1814 to.to_flags = TOF_MSS;
1815 if (sc->sc_flags & SCF_WINSCALE) {
1816 to.to_wscale = sc->sc_requested_r_scale;
1817 to.to_flags |= TOF_SCALE;
1818 }
1819 if (sc->sc_flags & SCF_SACK)
1820 to.to_flags |= TOF_SACKPERM;
1821 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1822 if (sc->sc_flags & SCF_SIGNATURE)
1823 to.to_flags |= TOF_SIGNATURE;
1824 #endif
1825 if (sc->sc_tfo_cookie) {
1826 to.to_flags |= TOF_FASTOPEN;
1827 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
1828 to.to_tfo_cookie = sc->sc_tfo_cookie;
1829 /* don't send cookie again when retransmitting response */
1830 sc->sc_tfo_cookie = NULL;
1831 }
1832 }
1833 if (sc->sc_flags & SCF_TIMESTAMP) {
1834 to.to_tsval = sc->sc_tsoff + tcp_ts_getticks();
1835 to.to_tsecr = sc->sc_tsreflect;
1836 to.to_flags |= TOF_TS;
1837 }
1838 optlen = tcp_addoptions(&to, (u_char *)(th + 1));
1839
1840 /* Adjust headers by option size. */
1841 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1842 m->m_len += optlen;
1843 m->m_pkthdr.len += optlen;
1844 #ifdef INET6
1845 if (sc->sc_inc.inc_flags & INC_ISIPV6)
1846 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen);
1847 else
1848 #endif
1849 ip->ip_len = htons(ntohs(ip->ip_len) + optlen);
1850 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1851 if (sc->sc_flags & SCF_SIGNATURE) {
1852 KASSERT(to.to_flags & TOF_SIGNATURE,
1853 ("tcp_addoptions() didn't set tcp_signature"));
1854
1855 /* NOTE: to.to_signature is inside of mbuf */
1856 if (!TCPMD5_ENABLED() ||
1857 TCPMD5_OUTPUT(m, th, to.to_signature) != 0) {
1858 m_freem(m);
1859 return (EACCES);
1860 }
1861 }
1862 #endif
1863 } else
1864 optlen = 0;
1865
1866 M_SETFIB(m, sc->sc_inc.inc_fibnum);
1867 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1868 /*
1869 * If we have peer's SYN and it has a flowid, then let's assign it to
1870 * our SYN|ACK. ip6_output() and ip_output() will not assign flowid
1871 * to SYN|ACK due to lack of inp here.
1872 */
1873 if (m0 != NULL && M_HASHTYPE_GET(m0) != M_HASHTYPE_NONE) {
1874 m->m_pkthdr.flowid = m0->m_pkthdr.flowid;
1875 M_HASHTYPE_SET(m, M_HASHTYPE_GET(m0));
1876 }
1877 #ifdef INET6
1878 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1879 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1880 th->th_sum = in6_cksum_pseudo(ip6, tlen + optlen - hlen,
1881 IPPROTO_TCP, 0);
1882 ip6->ip6_hlim = sc->sc_ip_ttl;
1883 #ifdef TCP_OFFLOAD
1884 if (ADDED_BY_TOE(sc)) {
1885 struct toedev *tod = sc->sc_tod;
1886
1887 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1888
1889 return (error);
1890 }
1891 #endif
1892 TCP_PROBE5(send, NULL, NULL, ip6, NULL, th);
1893 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1894 }
1895 #endif
1896 #if defined(INET6) && defined(INET)
1897 else
1898 #endif
1899 #ifdef INET
1900 {
1901 m->m_pkthdr.csum_flags = CSUM_TCP;
1902 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1903 htons(tlen + optlen - hlen + IPPROTO_TCP));
1904 #ifdef TCP_OFFLOAD
1905 if (ADDED_BY_TOE(sc)) {
1906 struct toedev *tod = sc->sc_tod;
1907
1908 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1909
1910 return (error);
1911 }
1912 #endif
1913 TCP_PROBE5(send, NULL, NULL, ip, NULL, th);
1914 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL);
1915 }
1916 #endif
1917 return (error);
1918 }
1919
1920 /*
1921 * The purpose of syncookies is to handle spoofed SYN flooding DoS attacks
1922 * that exceed the capacity of the syncache by avoiding the storage of any
1923 * of the SYNs we receive. Syncookies defend against blind SYN flooding
1924 * attacks where the attacker does not have access to our responses.
1925 *
1926 * Syncookies encode and include all necessary information about the
1927 * connection setup within the SYN|ACK that we send back. That way we
1928 * can avoid keeping any local state until the ACK to our SYN|ACK returns
1929 * (if ever). Normally the syncache and syncookies are running in parallel
1930 * with the latter taking over when the former is exhausted. When matching
1931 * syncache entry is found the syncookie is ignored.
1932 *
1933 * The only reliable information persisting the 3WHS is our initial sequence
1934 * number ISS of 32 bits. Syncookies embed a cryptographically sufficient
1935 * strong hash (MAC) value and a few bits of TCP SYN options in the ISS
1936 * of our SYN|ACK. The MAC can be recomputed when the ACK to our SYN|ACK
1937 * returns and signifies a legitimate connection if it matches the ACK.
1938 *
1939 * The available space of 32 bits to store the hash and to encode the SYN
1940 * option information is very tight and we should have at least 24 bits for
1941 * the MAC to keep the number of guesses by blind spoofing reasonably high.
1942 *
1943 * SYN option information we have to encode to fully restore a connection:
1944 * MSS: is imporant to chose an optimal segment size to avoid IP level
1945 * fragmentation along the path. The common MSS values can be encoded
1946 * in a 3-bit table. Uncommon values are captured by the next lower value
1947 * in the table leading to a slight increase in packetization overhead.
1948 * WSCALE: is necessary to allow large windows to be used for high delay-
1949 * bandwidth product links. Not scaling the window when it was initially
1950 * negotiated is bad for performance as lack of scaling further decreases
1951 * the apparent available send window. We only need to encode the WSCALE
1952 * we received from the remote end. Our end can be recalculated at any
1953 * time. The common WSCALE values can be encoded in a 3-bit table.
1954 * Uncommon values are captured by the next lower value in the table
1955 * making us under-estimate the available window size halving our
1956 * theoretically possible maximum throughput for that connection.
1957 * SACK: Greatly assists in packet loss recovery and requires 1 bit.
1958 * TIMESTAMP and SIGNATURE is not encoded because they are permanent options
1959 * that are included in all segments on a connection. We enable them when
1960 * the ACK has them.
1961 *
1962 * Security of syncookies and attack vectors:
1963 *
1964 * The MAC is computed over (faddr||laddr||fport||lport||irs||flags||secmod)
1965 * together with the gloabl secret to make it unique per connection attempt.
1966 * Thus any change of any of those parameters results in a different MAC output
1967 * in an unpredictable way unless a collision is encountered. 24 bits of the
1968 * MAC are embedded into the ISS.
1969 *
1970 * To prevent replay attacks two rotating global secrets are updated with a
1971 * new random value every 15 seconds. The life-time of a syncookie is thus
1972 * 15-30 seconds.
1973 *
1974 * Vector 1: Attacking the secret. This requires finding a weakness in the
1975 * MAC itself or the way it is used here. The attacker can do a chosen plain
1976 * text attack by varying and testing the all parameters under his control.
1977 * The strength depends on the size and randomness of the secret, and the
1978 * cryptographic security of the MAC function. Due to the constant updating
1979 * of the secret the attacker has at most 29.999 seconds to find the secret
1980 * and launch spoofed connections. After that he has to start all over again.
1981 *
1982 * Vector 2: Collision attack on the MAC of a single ACK. With a 24 bit MAC
1983 * size an average of 4,823 attempts are required for a 50% chance of success
1984 * to spoof a single syncookie (birthday collision paradox). However the
1985 * attacker is blind and doesn't know if one of his attempts succeeded unless
1986 * he has a side channel to interfere success from. A single connection setup
1987 * success average of 90% requires 8,790 packets, 99.99% requires 17,578 packets.
1988 * This many attempts are required for each one blind spoofed connection. For
1989 * every additional spoofed connection he has to launch another N attempts.
1990 * Thus for a sustained rate 100 spoofed connections per second approximately
1991 * 1,800,000 packets per second would have to be sent.
1992 *
1993 * NB: The MAC function should be fast so that it doesn't become a CPU
1994 * exhaustion attack vector itself.
1995 *
1996 * References:
1997 * RFC4987 TCP SYN Flooding Attacks and Common Mitigations
1998 * SYN cookies were first proposed by cryptographer Dan J. Bernstein in 1996
1999 * http://cr.yp.to/syncookies.html (overview)
2000 * http://cr.yp.to/syncookies/archive (details)
2001 *
2002 *
2003 * Schematic construction of a syncookie enabled Initial Sequence Number:
2004 * 0 1 2 3
2005 * 12345678901234567890123456789012
2006 * |xxxxxxxxxxxxxxxxxxxxxxxxWWWMMMSP|
2007 *
2008 * x 24 MAC (truncated)
2009 * W 3 Send Window Scale index
2010 * M 3 MSS index
2011 * S 1 SACK permitted
2012 * P 1 Odd/even secret
2013 */
2014
2015 /*
2016 * Distribution and probability of certain MSS values. Those in between are
2017 * rounded down to the next lower one.
2018 * [An Analysis of TCP Maximum Segment Sizes, S. Alcock and R. Nelson, 2011]
2019 * .2% .3% 5% 7% 7% 20% 15% 45%
2020 */
2021 static int tcp_sc_msstab[] = { 216, 536, 1200, 1360, 1400, 1440, 1452, 1460 };
2022
2023 /*
2024 * Distribution and probability of certain WSCALE values. We have to map the
2025 * (send) window scale (shift) option with a range of 0-14 from 4 bits into 3
2026 * bits based on prevalence of certain values. Where we don't have an exact
2027 * match for are rounded down to the next lower one letting us under-estimate
2028 * the true available window. At the moment this would happen only for the
2029 * very uncommon values 3, 5 and those above 8 (more than 16MB socket buffer
2030 * and window size). The absence of the WSCALE option (no scaling in either
2031 * direction) is encoded with index zero.
2032 * [WSCALE values histograms, Allman, 2012]
2033 * X 10 10 35 5 6 14 10% by host
2034 * X 11 4 5 5 18 49 3% by connections
2035 */
2036 static int tcp_sc_wstab[] = { 0, 0, 1, 2, 4, 6, 7, 8 };
2037
2038 /*
2039 * Compute the MAC for the SYN cookie. SIPHASH-2-4 is chosen for its speed
2040 * and good cryptographic properties.
2041 */
2042 static uint32_t
2043 syncookie_mac(struct in_conninfo *inc, tcp_seq irs, uint8_t flags,
2044 uint8_t *secbits, uintptr_t secmod)
2045 {
2046 SIPHASH_CTX ctx;
2047 uint32_t siphash[2];
2048
2049 SipHash24_Init(&ctx);
2050 SipHash_SetKey(&ctx, secbits);
2051 switch (inc->inc_flags & INC_ISIPV6) {
2052 #ifdef INET
2053 case 0:
2054 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(inc->inc_faddr));
2055 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(inc->inc_laddr));
2056 break;
2057 #endif
2058 #ifdef INET6
2059 case INC_ISIPV6:
2060 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(inc->inc6_faddr));
2061 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(inc->inc6_laddr));
2062 break;
2063 #endif
2064 }
2065 SipHash_Update(&ctx, &inc->inc_fport, sizeof(inc->inc_fport));
2066 SipHash_Update(&ctx, &inc->inc_lport, sizeof(inc->inc_lport));
2067 SipHash_Update(&ctx, &irs, sizeof(irs));
2068 SipHash_Update(&ctx, &flags, sizeof(flags));
2069 SipHash_Update(&ctx, &secmod, sizeof(secmod));
2070 SipHash_Final((u_int8_t *)&siphash, &ctx);
2071
2072 return (siphash[0] ^ siphash[1]);
2073 }
2074
2075 static tcp_seq
2076 syncookie_generate(struct syncache_head *sch, struct syncache *sc)
2077 {
2078 u_int i, secbit, wscale;
2079 uint32_t iss, hash;
2080 uint8_t *secbits;
2081 union syncookie cookie;
2082
2083 SCH_LOCK_ASSERT(sch);
2084
2085 cookie.cookie = 0;
2086
2087 /* Map our computed MSS into the 3-bit index. */
2088 for (i = nitems(tcp_sc_msstab) - 1;
2089 tcp_sc_msstab[i] > sc->sc_peer_mss && i > 0;
2090 i--)
2091 ;
2092 cookie.flags.mss_idx = i;
2093
2094 /*
2095 * Map the send window scale into the 3-bit index but only if
2096 * the wscale option was received.
2097 */
2098 if (sc->sc_flags & SCF_WINSCALE) {
2099 wscale = sc->sc_requested_s_scale;
2100 for (i = nitems(tcp_sc_wstab) - 1;
2101 tcp_sc_wstab[i] > wscale && i > 0;
2102 i--)
2103 ;
2104 cookie.flags.wscale_idx = i;
2105 }
2106
2107 /* Can we do SACK? */
2108 if (sc->sc_flags & SCF_SACK)
2109 cookie.flags.sack_ok = 1;
2110
2111 /* Which of the two secrets to use. */
2112 secbit = sch->sch_sc->secret.oddeven & 0x1;
2113 cookie.flags.odd_even = secbit;
2114
2115 secbits = sch->sch_sc->secret.key[secbit];
2116 hash = syncookie_mac(&sc->sc_inc, sc->sc_irs, cookie.cookie, secbits,
2117 (uintptr_t)sch);
2118
2119 /*
2120 * Put the flags into the hash and XOR them to get better ISS number
2121 * variance. This doesn't enhance the cryptographic strength and is
2122 * done to prevent the 8 cookie bits from showing up directly on the
2123 * wire.
2124 */
2125 iss = hash & ~0xff;
2126 iss |= cookie.cookie ^ (hash >> 24);
2127
2128 TCPSTAT_INC(tcps_sc_sendcookie);
2129 return (iss);
2130 }
2131
2132 static struct syncache *
2133 syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
2134 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
2135 struct socket *lso)
2136 {
2137 uint32_t hash;
2138 uint8_t *secbits;
2139 tcp_seq ack, seq;
2140 int wnd, wscale = 0;
2141 union syncookie cookie;
2142
2143 SCH_LOCK_ASSERT(sch);
2144
2145 /*
2146 * Pull information out of SYN-ACK/ACK and revert sequence number
2147 * advances.
2148 */
2149 ack = th->th_ack - 1;
2150 seq = th->th_seq - 1;
2151
2152 /*
2153 * Unpack the flags containing enough information to restore the
2154 * connection.
2155 */
2156 cookie.cookie = (ack & 0xff) ^ (ack >> 24);
2157
2158 /* Which of the two secrets to use. */
2159 secbits = sch->sch_sc->secret.key[cookie.flags.odd_even];
2160
2161 hash = syncookie_mac(inc, seq, cookie.cookie, secbits, (uintptr_t)sch);
2162
2163 /* The recomputed hash matches the ACK if this was a genuine cookie. */
2164 if ((ack & ~0xff) != (hash & ~0xff))
2165 return (NULL);
2166
2167 /* Fill in the syncache values. */
2168 sc->sc_flags = 0;
2169 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
2170 sc->sc_ipopts = NULL;
2171
2172 sc->sc_irs = seq;
2173 sc->sc_iss = ack;
2174
2175 switch (inc->inc_flags & INC_ISIPV6) {
2176 #ifdef INET
2177 case 0:
2178 sc->sc_ip_ttl = sotoinpcb(lso)->inp_ip_ttl;
2179 sc->sc_ip_tos = sotoinpcb(lso)->inp_ip_tos;
2180 break;
2181 #endif
2182 #ifdef INET6
2183 case INC_ISIPV6:
2184 if (sotoinpcb(lso)->inp_flags & IN6P_AUTOFLOWLABEL)
2185 sc->sc_flowlabel =
2186 htonl(sc->sc_iss) & IPV6_FLOWLABEL_MASK;
2187 break;
2188 #endif
2189 }
2190
2191 sc->sc_peer_mss = tcp_sc_msstab[cookie.flags.mss_idx];
2192
2193 /* We can simply recompute receive window scale we sent earlier. */
2194 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < sb_max)
2195 wscale++;
2196
2197 /* Only use wscale if it was enabled in the orignal SYN. */
2198 if (cookie.flags.wscale_idx > 0) {
2199 sc->sc_requested_r_scale = wscale;
2200 sc->sc_requested_s_scale = tcp_sc_wstab[cookie.flags.wscale_idx];
2201 sc->sc_flags |= SCF_WINSCALE;
2202 }
2203
2204 wnd = lso->sol_sbrcv_hiwat;
2205 wnd = imax(wnd, 0);
2206 wnd = imin(wnd, TCP_MAXWIN);
2207 sc->sc_wnd = wnd;
2208
2209 if (cookie.flags.sack_ok)
2210 sc->sc_flags |= SCF_SACK;
2211
2212 if (to->to_flags & TOF_TS) {
2213 sc->sc_flags |= SCF_TIMESTAMP;
2214 sc->sc_tsreflect = to->to_tsval;
2215 sc->sc_tsoff = tcp_new_ts_offset(inc);
2216 }
2217
2218 if (to->to_flags & TOF_SIGNATURE)
2219 sc->sc_flags |= SCF_SIGNATURE;
2220
2221 sc->sc_rxmits = 0;
2222
2223 TCPSTAT_INC(tcps_sc_recvcookie);
2224 return (sc);
2225 }
2226
2227 #ifdef INVARIANTS
2228 static int
2229 syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
2230 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
2231 struct socket *lso)
2232 {
2233 struct syncache scs, *scx;
2234 char *s;
2235
2236 bzero(&scs, sizeof(scs));
2237 scx = syncookie_lookup(inc, sch, &scs, th, to, lso);
2238
2239 if ((s = tcp_log_addrs(inc, th, NULL, NULL)) == NULL)
2240 return (0);
2241
2242 if (scx != NULL) {
2243 if (sc->sc_peer_mss != scx->sc_peer_mss)
2244 log(LOG_DEBUG, "%s; %s: mss different %i vs %i\n",
2245 s, __func__, sc->sc_peer_mss, scx->sc_peer_mss);
2246
2247 if (sc->sc_requested_r_scale != scx->sc_requested_r_scale)
2248 log(LOG_DEBUG, "%s; %s: rwscale different %i vs %i\n",
2249 s, __func__, sc->sc_requested_r_scale,
2250 scx->sc_requested_r_scale);
2251
2252 if (sc->sc_requested_s_scale != scx->sc_requested_s_scale)
2253 log(LOG_DEBUG, "%s; %s: swscale different %i vs %i\n",
2254 s, __func__, sc->sc_requested_s_scale,
2255 scx->sc_requested_s_scale);
2256
2257 if ((sc->sc_flags & SCF_SACK) != (scx->sc_flags & SCF_SACK))
2258 log(LOG_DEBUG, "%s; %s: SACK different\n", s, __func__);
2259 }
2260
2261 if (s != NULL)
2262 free(s, M_TCPLOG);
2263 return (0);
2264 }
2265 #endif /* INVARIANTS */
2266
2267 static void
2268 syncookie_reseed(void *arg)
2269 {
2270 struct tcp_syncache *sc = arg;
2271 uint8_t *secbits;
2272 int secbit;
2273
2274 /*
2275 * Reseeding the secret doesn't have to be protected by a lock.
2276 * It only must be ensured that the new random values are visible
2277 * to all CPUs in a SMP environment. The atomic with release
2278 * semantics ensures that.
2279 */
2280 secbit = (sc->secret.oddeven & 0x1) ? 0 : 1;
2281 secbits = sc->secret.key[secbit];
2282 arc4rand(secbits, SYNCOOKIE_SECRET_SIZE, 0);
2283 atomic_add_rel_int(&sc->secret.oddeven, 1);
2284
2285 /* Reschedule ourself. */
2286 callout_schedule(&sc->secret.reseed, SYNCOOKIE_LIFETIME * hz);
2287 }
2288
2289 /*
2290 * Exports the syncache entries to userland so that netstat can display
2291 * them alongside the other sockets. This function is intended to be
2292 * called only from tcp_pcblist.
2293 *
2294 * Due to concurrency on an active system, the number of pcbs exported
2295 * may have no relation to max_pcbs. max_pcbs merely indicates the
2296 * amount of space the caller allocated for this function to use.
2297 */
2298 int
2299 syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported)
2300 {
2301 struct xtcpcb xt;
2302 struct syncache *sc;
2303 struct syncache_head *sch;
2304 int count, error, i;
2305
2306 for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
2307 sch = &V_tcp_syncache.hashbase[i];
2308 SCH_LOCK(sch);
2309 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
2310 if (count >= max_pcbs) {
2311 SCH_UNLOCK(sch);
2312 goto exit;
2313 }
2314 if (cr_cansee(req->td->td_ucred, sc->sc_cred) != 0)
2315 continue;
2316 bzero(&xt, sizeof(xt));
2317 xt.xt_len = sizeof(xt);
2318 if (sc->sc_inc.inc_flags & INC_ISIPV6)
2319 xt.xt_inp.inp_vflag = INP_IPV6;
2320 else
2321 xt.xt_inp.inp_vflag = INP_IPV4;
2322 bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc,
2323 sizeof (struct in_conninfo));
2324 xt.t_state = TCPS_SYN_RECEIVED;
2325 xt.xt_inp.xi_socket.xso_protocol = IPPROTO_TCP;
2326 xt.xt_inp.xi_socket.xso_len = sizeof (struct xsocket);
2327 xt.xt_inp.xi_socket.so_type = SOCK_STREAM;
2328 xt.xt_inp.xi_socket.so_state = SS_ISCONNECTING;
2329 error = SYSCTL_OUT(req, &xt, sizeof xt);
2330 if (error) {
2331 SCH_UNLOCK(sch);
2332 goto exit;
2333 }
2334 count++;
2335 }
2336 SCH_UNLOCK(sch);
2337 }
2338 exit:
2339 *pcbs_exported = count;
2340 return error;
2341 }
Cache object: 0c1ca7c7c3fbd1cabc93ebdd2f1ac65b
|