1 /*-
2 * Copyright (c) 2001 McAfee, Inc.
3 * Copyright (c) 2006,2013 Andre Oppermann, Internet Business Solutions AG
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Jonathan Lemon
7 * and McAfee Research, the Security Research Division of McAfee, Inc. under
8 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program. [2001 McAfee, Inc.]
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/10.3/sys/netinet/tcp_syncache.c 293402 2016-01-08 00:46:28Z stas $");
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 #include "opt_pcbgroup.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/proc.h> /* for proc0 declaration */
51 #include <sys/random.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/syslog.h>
55 #include <sys/ucred.h>
56
57 #include <sys/md5.h>
58 #include <crypto/siphash/siphash.h>
59
60 #include <vm/uma.h>
61
62 #include <net/if.h>
63 #include <net/route.h>
64 #include <net/vnet.h>
65
66 #include <netinet/in.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/ip.h>
69 #include <netinet/in_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/ip_var.h>
72 #include <netinet/ip_options.h>
73 #ifdef INET6
74 #include <netinet/ip6.h>
75 #include <netinet/icmp6.h>
76 #include <netinet6/nd6.h>
77 #include <netinet6/ip6_var.h>
78 #include <netinet6/in6_pcb.h>
79 #endif
80 #include <netinet/tcp.h>
81 #ifdef TCP_RFC7413
82 #include <netinet/tcp_fastopen.h>
83 #endif
84 #include <netinet/tcp_fsm.h>
85 #include <netinet/tcp_seq.h>
86 #include <netinet/tcp_timer.h>
87 #include <netinet/tcp_var.h>
88 #include <netinet/tcp_syncache.h>
89 #ifdef INET6
90 #include <netinet6/tcp6_var.h>
91 #endif
92 #ifdef TCP_OFFLOAD
93 #include <netinet/toecore.h>
94 #endif
95
96 #ifdef IPSEC
97 #include <netipsec/ipsec.h>
98 #ifdef INET6
99 #include <netipsec/ipsec6.h>
100 #endif
101 #include <netipsec/key.h>
102 #endif /*IPSEC*/
103
104 #include <machine/in_cksum.h>
105
106 #include <security/mac/mac_framework.h>
107
108 static VNET_DEFINE(int, tcp_syncookies) = 1;
109 #define V_tcp_syncookies VNET(tcp_syncookies)
110 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
111 &VNET_NAME(tcp_syncookies), 0,
112 "Use TCP SYN cookies if the syncache overflows");
113
114 static VNET_DEFINE(int, tcp_syncookiesonly) = 0;
115 #define V_tcp_syncookiesonly VNET(tcp_syncookiesonly)
116 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_RW,
117 &VNET_NAME(tcp_syncookiesonly), 0,
118 "Use only TCP SYN cookies");
119
120 #ifdef TCP_OFFLOAD
121 #define ADDED_BY_TOE(sc) ((sc)->sc_tod != NULL)
122 #endif
123
124 static void syncache_drop(struct syncache *, struct syncache_head *);
125 static void syncache_free(struct syncache *);
126 static void syncache_insert(struct syncache *, struct syncache_head *);
127 static int syncache_respond(struct syncache *);
128 static struct socket *syncache_socket(struct syncache *, struct socket *,
129 struct mbuf *m);
130 static int syncache_sysctl_count(SYSCTL_HANDLER_ARGS);
131 static void syncache_timeout(struct syncache *sc, struct syncache_head *sch,
132 int docallout);
133 static void syncache_timer(void *);
134
135 static uint32_t syncookie_mac(struct in_conninfo *, tcp_seq, uint8_t,
136 uint8_t *, uintptr_t);
137 static tcp_seq syncookie_generate(struct syncache_head *, struct syncache *);
138 static struct syncache
139 *syncookie_lookup(struct in_conninfo *, struct syncache_head *,
140 struct syncache *, struct tcphdr *, struct tcpopt *,
141 struct socket *);
142 static void syncookie_reseed(void *);
143 #ifdef INVARIANTS
144 static int syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
145 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
146 struct socket *lso);
147 #endif
148
149 /*
150 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
151 * 3 retransmits corresponds to a timeout of 3 * (1 + 2 + 4 + 8) == 45 seconds,
152 * the odds are that the user has given up attempting to connect by then.
153 */
154 #define SYNCACHE_MAXREXMTS 3
155
156 /* Arbitrary values */
157 #define TCP_SYNCACHE_HASHSIZE 512
158 #define TCP_SYNCACHE_BUCKETLIMIT 30
159
160 static VNET_DEFINE(struct tcp_syncache, tcp_syncache);
161 #define V_tcp_syncache VNET(tcp_syncache)
162
163 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0,
164 "TCP SYN cache");
165
166 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
167 &VNET_NAME(tcp_syncache.bucket_limit), 0,
168 "Per-bucket hash limit for syncache");
169
170 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
171 &VNET_NAME(tcp_syncache.cache_limit), 0,
172 "Overall entry limit for syncache");
173
174 SYSCTL_VNET_PROC(_net_inet_tcp_syncache, OID_AUTO, count, (CTLTYPE_UINT|CTLFLAG_RD),
175 NULL, 0, &syncache_sysctl_count, "IU",
176 "Current number of entries in syncache");
177
178 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
179 &VNET_NAME(tcp_syncache.hashsize), 0,
180 "Size of TCP syncache hashtable");
181
182 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
183 &VNET_NAME(tcp_syncache.rexmt_limit), 0,
184 "Limit on SYN/ACK retransmissions");
185
186 VNET_DEFINE(int, tcp_sc_rst_sock_fail) = 1;
187 SYSCTL_VNET_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail,
188 CTLFLAG_RW, &VNET_NAME(tcp_sc_rst_sock_fail), 0,
189 "Send reset on socket allocation failure");
190
191 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
192
193 #define SYNCACHE_HASH(inc, mask) \
194 ((V_tcp_syncache.hash_secret ^ \
195 (inc)->inc_faddr.s_addr ^ \
196 ((inc)->inc_faddr.s_addr >> 16) ^ \
197 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
198
199 #define SYNCACHE_HASH6(inc, mask) \
200 ((V_tcp_syncache.hash_secret ^ \
201 (inc)->inc6_faddr.s6_addr32[0] ^ \
202 (inc)->inc6_faddr.s6_addr32[3] ^ \
203 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
204
205 #define ENDPTS_EQ(a, b) ( \
206 (a)->ie_fport == (b)->ie_fport && \
207 (a)->ie_lport == (b)->ie_lport && \
208 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \
209 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \
210 )
211
212 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
213
214 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx)
215 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx)
216 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED)
217
218 /*
219 * Requires the syncache entry to be already removed from the bucket list.
220 */
221 static void
222 syncache_free(struct syncache *sc)
223 {
224
225 if (sc->sc_ipopts)
226 (void) m_free(sc->sc_ipopts);
227 if (sc->sc_cred)
228 crfree(sc->sc_cred);
229 #ifdef MAC
230 mac_syncache_destroy(&sc->sc_label);
231 #endif
232
233 uma_zfree(V_tcp_syncache.zone, sc);
234 }
235
236 void
237 syncache_init(void)
238 {
239 int i;
240
241 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
242 V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
243 V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
244 V_tcp_syncache.hash_secret = arc4random();
245
246 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
247 &V_tcp_syncache.hashsize);
248 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
249 &V_tcp_syncache.bucket_limit);
250 if (!powerof2(V_tcp_syncache.hashsize) ||
251 V_tcp_syncache.hashsize == 0) {
252 printf("WARNING: syncache hash size is not a power of 2.\n");
253 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
254 }
255 V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1;
256
257 /* Set limits. */
258 V_tcp_syncache.cache_limit =
259 V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit;
260 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
261 &V_tcp_syncache.cache_limit);
262
263 /* Allocate the hash table. */
264 V_tcp_syncache.hashbase = malloc(V_tcp_syncache.hashsize *
265 sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK | M_ZERO);
266
267 #ifdef VIMAGE
268 V_tcp_syncache.vnet = curvnet;
269 #endif
270
271 /* Initialize the hash buckets. */
272 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
273 TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket);
274 mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
275 NULL, MTX_DEF);
276 callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer,
277 &V_tcp_syncache.hashbase[i].sch_mtx, 0);
278 V_tcp_syncache.hashbase[i].sch_length = 0;
279 V_tcp_syncache.hashbase[i].sch_sc = &V_tcp_syncache;
280 }
281
282 /* Create the syncache entry zone. */
283 V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
284 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
285 V_tcp_syncache.cache_limit = uma_zone_set_max(V_tcp_syncache.zone,
286 V_tcp_syncache.cache_limit);
287
288 /* Start the SYN cookie reseeder callout. */
289 callout_init(&V_tcp_syncache.secret.reseed, 1);
290 arc4rand(V_tcp_syncache.secret.key[0], SYNCOOKIE_SECRET_SIZE, 0);
291 arc4rand(V_tcp_syncache.secret.key[1], SYNCOOKIE_SECRET_SIZE, 0);
292 callout_reset(&V_tcp_syncache.secret.reseed, SYNCOOKIE_LIFETIME * hz,
293 syncookie_reseed, &V_tcp_syncache);
294 }
295
296 #ifdef VIMAGE
297 void
298 syncache_destroy(void)
299 {
300 struct syncache_head *sch;
301 struct syncache *sc, *nsc;
302 int i;
303
304 /* Cleanup hash buckets: stop timers, free entries, destroy locks. */
305 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
306
307 sch = &V_tcp_syncache.hashbase[i];
308 callout_drain(&sch->sch_timer);
309
310 SCH_LOCK(sch);
311 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc)
312 syncache_drop(sc, sch);
313 SCH_UNLOCK(sch);
314 KASSERT(TAILQ_EMPTY(&sch->sch_bucket),
315 ("%s: sch->sch_bucket not empty", __func__));
316 KASSERT(sch->sch_length == 0, ("%s: sch->sch_length %d not 0",
317 __func__, sch->sch_length));
318 mtx_destroy(&sch->sch_mtx);
319 }
320
321 KASSERT(uma_zone_get_cur(V_tcp_syncache.zone) == 0,
322 ("%s: cache_count not 0", __func__));
323
324 /* Free the allocated global resources. */
325 uma_zdestroy(V_tcp_syncache.zone);
326 free(V_tcp_syncache.hashbase, M_SYNCACHE);
327
328 callout_drain(&V_tcp_syncache.secret.reseed);
329 }
330 #endif
331
332 static int
333 syncache_sysctl_count(SYSCTL_HANDLER_ARGS)
334 {
335 int count;
336
337 count = uma_zone_get_cur(V_tcp_syncache.zone);
338 return (sysctl_handle_int(oidp, &count, 0, req));
339 }
340
341 /*
342 * Inserts a syncache entry into the specified bucket row.
343 * Locks and unlocks the syncache_head autonomously.
344 */
345 static void
346 syncache_insert(struct syncache *sc, struct syncache_head *sch)
347 {
348 struct syncache *sc2;
349
350 SCH_LOCK(sch);
351
352 /*
353 * Make sure that we don't overflow the per-bucket limit.
354 * If the bucket is full, toss the oldest element.
355 */
356 if (sch->sch_length >= V_tcp_syncache.bucket_limit) {
357 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
358 ("sch->sch_length incorrect"));
359 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
360 syncache_drop(sc2, sch);
361 TCPSTAT_INC(tcps_sc_bucketoverflow);
362 }
363
364 /* Put it into the bucket. */
365 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash);
366 sch->sch_length++;
367
368 #ifdef TCP_OFFLOAD
369 if (ADDED_BY_TOE(sc)) {
370 struct toedev *tod = sc->sc_tod;
371
372 tod->tod_syncache_added(tod, sc->sc_todctx);
373 }
374 #endif
375
376 /* Reinitialize the bucket row's timer. */
377 if (sch->sch_length == 1)
378 sch->sch_nextc = ticks + INT_MAX;
379 syncache_timeout(sc, sch, 1);
380
381 SCH_UNLOCK(sch);
382
383 TCPSTAT_INC(tcps_sc_added);
384 }
385
386 /*
387 * Remove and free entry from syncache bucket row.
388 * Expects locked syncache head.
389 */
390 static void
391 syncache_drop(struct syncache *sc, struct syncache_head *sch)
392 {
393
394 SCH_LOCK_ASSERT(sch);
395
396 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
397 sch->sch_length--;
398
399 #ifdef TCP_OFFLOAD
400 if (ADDED_BY_TOE(sc)) {
401 struct toedev *tod = sc->sc_tod;
402
403 tod->tod_syncache_removed(tod, sc->sc_todctx);
404 }
405 #endif
406
407 syncache_free(sc);
408 }
409
410 /*
411 * Engage/reengage time on bucket row.
412 */
413 static void
414 syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout)
415 {
416 sc->sc_rxttime = ticks +
417 TCPTV_RTOBASE * (tcp_syn_backoff[sc->sc_rxmits]);
418 sc->sc_rxmits++;
419 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) {
420 sch->sch_nextc = sc->sc_rxttime;
421 if (docallout)
422 callout_reset(&sch->sch_timer, sch->sch_nextc - ticks,
423 syncache_timer, (void *)sch);
424 }
425 }
426
427 /*
428 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
429 * If we have retransmitted an entry the maximum number of times, expire it.
430 * One separate timer for each bucket row.
431 */
432 static void
433 syncache_timer(void *xsch)
434 {
435 struct syncache_head *sch = (struct syncache_head *)xsch;
436 struct syncache *sc, *nsc;
437 int tick = ticks;
438 char *s;
439
440 CURVNET_SET(sch->sch_sc->vnet);
441
442 /* NB: syncache_head has already been locked by the callout. */
443 SCH_LOCK_ASSERT(sch);
444
445 /*
446 * In the following cycle we may remove some entries and/or
447 * advance some timeouts, so re-initialize the bucket timer.
448 */
449 sch->sch_nextc = tick + INT_MAX;
450
451 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) {
452 /*
453 * We do not check if the listen socket still exists
454 * and accept the case where the listen socket may be
455 * gone by the time we resend the SYN/ACK. We do
456 * not expect this to happens often. If it does,
457 * then the RST will be sent by the time the remote
458 * host does the SYN/ACK->ACK.
459 */
460 if (TSTMP_GT(sc->sc_rxttime, tick)) {
461 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc))
462 sch->sch_nextc = sc->sc_rxttime;
463 continue;
464 }
465 if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) {
466 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
467 log(LOG_DEBUG, "%s; %s: Retransmits exhausted, "
468 "giving up and removing syncache entry\n",
469 s, __func__);
470 free(s, M_TCPLOG);
471 }
472 syncache_drop(sc, sch);
473 TCPSTAT_INC(tcps_sc_stale);
474 continue;
475 }
476 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
477 log(LOG_DEBUG, "%s; %s: Response timeout, "
478 "retransmitting (%u) SYN|ACK\n",
479 s, __func__, sc->sc_rxmits);
480 free(s, M_TCPLOG);
481 }
482
483 (void) syncache_respond(sc);
484 TCPSTAT_INC(tcps_sc_retransmitted);
485 syncache_timeout(sc, sch, 0);
486 }
487 if (!TAILQ_EMPTY(&(sch)->sch_bucket))
488 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
489 syncache_timer, (void *)(sch));
490 CURVNET_RESTORE();
491 }
492
493 /*
494 * Find an entry in the syncache.
495 * Returns always with locked syncache_head plus a matching entry or NULL.
496 */
497 static struct syncache *
498 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
499 {
500 struct syncache *sc;
501 struct syncache_head *sch;
502
503 #ifdef INET6
504 if (inc->inc_flags & INC_ISIPV6) {
505 sch = &V_tcp_syncache.hashbase[
506 SYNCACHE_HASH6(inc, V_tcp_syncache.hashmask)];
507 *schp = sch;
508
509 SCH_LOCK(sch);
510
511 /* Circle through bucket row to find matching entry. */
512 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
513 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
514 return (sc);
515 }
516 } else
517 #endif
518 {
519 sch = &V_tcp_syncache.hashbase[
520 SYNCACHE_HASH(inc, V_tcp_syncache.hashmask)];
521 *schp = sch;
522
523 SCH_LOCK(sch);
524
525 /* Circle through bucket row to find matching entry. */
526 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
527 #ifdef INET6
528 if (sc->sc_inc.inc_flags & INC_ISIPV6)
529 continue;
530 #endif
531 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
532 return (sc);
533 }
534 }
535 SCH_LOCK_ASSERT(*schp);
536 return (NULL); /* always returns with locked sch */
537 }
538
539 /*
540 * This function is called when we get a RST for a
541 * non-existent connection, so that we can see if the
542 * connection is in the syn cache. If it is, zap it.
543 */
544 void
545 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
546 {
547 struct syncache *sc;
548 struct syncache_head *sch;
549 char *s = NULL;
550
551 sc = syncache_lookup(inc, &sch); /* returns locked sch */
552 SCH_LOCK_ASSERT(sch);
553
554 /*
555 * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags.
556 * See RFC 793 page 65, section SEGMENT ARRIVES.
557 */
558 if (th->th_flags & (TH_ACK|TH_SYN|TH_FIN)) {
559 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
560 log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or "
561 "FIN flag set, segment ignored\n", s, __func__);
562 TCPSTAT_INC(tcps_badrst);
563 goto done;
564 }
565
566 /*
567 * No corresponding connection was found in syncache.
568 * If syncookies are enabled and possibly exclusively
569 * used, or we are under memory pressure, a valid RST
570 * may not find a syncache entry. In that case we're
571 * done and no SYN|ACK retransmissions will happen.
572 * Otherwise the RST was misdirected or spoofed.
573 */
574 if (sc == NULL) {
575 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
576 log(LOG_DEBUG, "%s; %s: Spurious RST without matching "
577 "syncache entry (possibly syncookie only), "
578 "segment ignored\n", s, __func__);
579 TCPSTAT_INC(tcps_badrst);
580 goto done;
581 }
582
583 /*
584 * If the RST bit is set, check the sequence number to see
585 * if this is a valid reset segment.
586 * RFC 793 page 37:
587 * In all states except SYN-SENT, all reset (RST) segments
588 * are validated by checking their SEQ-fields. A reset is
589 * valid if its sequence number is in the window.
590 *
591 * The sequence number in the reset segment is normally an
592 * echo of our outgoing acknowlegement numbers, but some hosts
593 * send a reset with the sequence number at the rightmost edge
594 * of our receive window, and we have to handle this case.
595 */
596 if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
597 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
598 syncache_drop(sc, sch);
599 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
600 log(LOG_DEBUG, "%s; %s: Our SYN|ACK was rejected, "
601 "connection attempt aborted by remote endpoint\n",
602 s, __func__);
603 TCPSTAT_INC(tcps_sc_reset);
604 } else {
605 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
606 log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != "
607 "IRS %u (+WND %u), segment ignored\n",
608 s, __func__, th->th_seq, sc->sc_irs, sc->sc_wnd);
609 TCPSTAT_INC(tcps_badrst);
610 }
611
612 done:
613 if (s != NULL)
614 free(s, M_TCPLOG);
615 SCH_UNLOCK(sch);
616 }
617
618 void
619 syncache_badack(struct in_conninfo *inc)
620 {
621 struct syncache *sc;
622 struct syncache_head *sch;
623
624 sc = syncache_lookup(inc, &sch); /* returns locked sch */
625 SCH_LOCK_ASSERT(sch);
626 if (sc != NULL) {
627 syncache_drop(sc, sch);
628 TCPSTAT_INC(tcps_sc_badack);
629 }
630 SCH_UNLOCK(sch);
631 }
632
633 void
634 syncache_unreach(struct in_conninfo *inc, struct tcphdr *th)
635 {
636 struct syncache *sc;
637 struct syncache_head *sch;
638
639 sc = syncache_lookup(inc, &sch); /* returns locked sch */
640 SCH_LOCK_ASSERT(sch);
641 if (sc == NULL)
642 goto done;
643
644 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
645 if (ntohl(th->th_seq) != sc->sc_iss)
646 goto done;
647
648 /*
649 * If we've rertransmitted 3 times and this is our second error,
650 * we remove the entry. Otherwise, we allow it to continue on.
651 * This prevents us from incorrectly nuking an entry during a
652 * spurious network outage.
653 *
654 * See tcp_notify().
655 */
656 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) {
657 sc->sc_flags |= SCF_UNREACH;
658 goto done;
659 }
660 syncache_drop(sc, sch);
661 TCPSTAT_INC(tcps_sc_unreach);
662 done:
663 SCH_UNLOCK(sch);
664 }
665
666 /*
667 * Build a new TCP socket structure from a syncache entry.
668 */
669 static struct socket *
670 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
671 {
672 struct inpcb *inp = NULL;
673 struct socket *so;
674 struct tcpcb *tp;
675 int error;
676 char *s;
677
678 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
679
680 /*
681 * Ok, create the full blown connection, and set things up
682 * as they would have been set up if we had created the
683 * connection when the SYN arrived. If we can't create
684 * the connection, abort it.
685 */
686 so = sonewconn(lso, SS_ISCONNECTED);
687 if (so == NULL) {
688 /*
689 * Drop the connection; we will either send a RST or
690 * have the peer retransmit its SYN again after its
691 * RTO and try again.
692 */
693 TCPSTAT_INC(tcps_listendrop);
694 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
695 log(LOG_DEBUG, "%s; %s: Socket create failed "
696 "due to limits or memory shortage\n",
697 s, __func__);
698 free(s, M_TCPLOG);
699 }
700 goto abort2;
701 }
702 #ifdef MAC
703 mac_socketpeer_set_from_mbuf(m, so);
704 #endif
705
706 inp = sotoinpcb(so);
707 inp->inp_inc.inc_fibnum = so->so_fibnum;
708 INP_WLOCK(inp);
709 INP_HASH_WLOCK(&V_tcbinfo);
710
711 /* Insert new socket into PCB hash list. */
712 inp->inp_inc.inc_flags = sc->sc_inc.inc_flags;
713 #ifdef INET6
714 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
715 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
716 } else {
717 inp->inp_vflag &= ~INP_IPV6;
718 inp->inp_vflag |= INP_IPV4;
719 #endif
720 inp->inp_laddr = sc->sc_inc.inc_laddr;
721 #ifdef INET6
722 }
723 #endif
724
725 /*
726 * If there's an mbuf and it has a flowid, then let's initialise the
727 * inp with that particular flowid.
728 */
729 if (m != NULL && M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
730 inp->inp_flowid = m->m_pkthdr.flowid;
731 inp->inp_flowtype = M_HASHTYPE_GET(m);
732 }
733
734 /*
735 * Install in the reservation hash table for now, but don't yet
736 * install a connection group since the full 4-tuple isn't yet
737 * configured.
738 */
739 inp->inp_lport = sc->sc_inc.inc_lport;
740 if ((error = in_pcbinshash_nopcbgroup(inp)) != 0) {
741 /*
742 * Undo the assignments above if we failed to
743 * put the PCB on the hash lists.
744 */
745 #ifdef INET6
746 if (sc->sc_inc.inc_flags & INC_ISIPV6)
747 inp->in6p_laddr = in6addr_any;
748 else
749 #endif
750 inp->inp_laddr.s_addr = INADDR_ANY;
751 inp->inp_lport = 0;
752 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
753 log(LOG_DEBUG, "%s; %s: in_pcbinshash failed "
754 "with error %i\n",
755 s, __func__, error);
756 free(s, M_TCPLOG);
757 }
758 INP_HASH_WUNLOCK(&V_tcbinfo);
759 goto abort;
760 }
761 #ifdef IPSEC
762 /* Copy old policy into new socket's. */
763 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
764 printf("syncache_socket: could not copy policy\n");
765 #endif
766 #ifdef INET6
767 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
768 struct inpcb *oinp = sotoinpcb(lso);
769 struct in6_addr laddr6;
770 struct sockaddr_in6 sin6;
771 /*
772 * Inherit socket options from the listening socket.
773 * Note that in6p_inputopts are not (and should not be)
774 * copied, since it stores previously received options and is
775 * used to detect if each new option is different than the
776 * previous one and hence should be passed to a user.
777 * If we copied in6p_inputopts, a user would not be able to
778 * receive options just after calling the accept system call.
779 */
780 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
781 if (oinp->in6p_outputopts)
782 inp->in6p_outputopts =
783 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
784
785 sin6.sin6_family = AF_INET6;
786 sin6.sin6_len = sizeof(sin6);
787 sin6.sin6_addr = sc->sc_inc.inc6_faddr;
788 sin6.sin6_port = sc->sc_inc.inc_fport;
789 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
790 laddr6 = inp->in6p_laddr;
791 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
792 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
793 if ((error = in6_pcbconnect_mbuf(inp, (struct sockaddr *)&sin6,
794 thread0.td_ucred, m)) != 0) {
795 inp->in6p_laddr = laddr6;
796 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
797 log(LOG_DEBUG, "%s; %s: in6_pcbconnect failed "
798 "with error %i\n",
799 s, __func__, error);
800 free(s, M_TCPLOG);
801 }
802 INP_HASH_WUNLOCK(&V_tcbinfo);
803 goto abort;
804 }
805 /* Override flowlabel from in6_pcbconnect. */
806 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
807 inp->inp_flow |= sc->sc_flowlabel;
808 }
809 #endif /* INET6 */
810 #if defined(INET) && defined(INET6)
811 else
812 #endif
813 #ifdef INET
814 {
815 struct in_addr laddr;
816 struct sockaddr_in sin;
817
818 inp->inp_options = (m) ? ip_srcroute(m) : NULL;
819
820 if (inp->inp_options == NULL) {
821 inp->inp_options = sc->sc_ipopts;
822 sc->sc_ipopts = NULL;
823 }
824
825 sin.sin_family = AF_INET;
826 sin.sin_len = sizeof(sin);
827 sin.sin_addr = sc->sc_inc.inc_faddr;
828 sin.sin_port = sc->sc_inc.inc_fport;
829 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
830 laddr = inp->inp_laddr;
831 if (inp->inp_laddr.s_addr == INADDR_ANY)
832 inp->inp_laddr = sc->sc_inc.inc_laddr;
833 if ((error = in_pcbconnect_mbuf(inp, (struct sockaddr *)&sin,
834 thread0.td_ucred, m)) != 0) {
835 inp->inp_laddr = laddr;
836 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
837 log(LOG_DEBUG, "%s; %s: in_pcbconnect failed "
838 "with error %i\n",
839 s, __func__, error);
840 free(s, M_TCPLOG);
841 }
842 INP_HASH_WUNLOCK(&V_tcbinfo);
843 goto abort;
844 }
845 }
846 #endif /* INET */
847 INP_HASH_WUNLOCK(&V_tcbinfo);
848 tp = intotcpcb(inp);
849 tcp_state_change(tp, TCPS_SYN_RECEIVED);
850 tp->iss = sc->sc_iss;
851 tp->irs = sc->sc_irs;
852 tcp_rcvseqinit(tp);
853 tcp_sendseqinit(tp);
854 tp->snd_wl1 = sc->sc_irs;
855 tp->snd_max = tp->iss + 1;
856 tp->snd_nxt = tp->iss + 1;
857 tp->rcv_up = sc->sc_irs + 1;
858 tp->rcv_wnd = sc->sc_wnd;
859 tp->rcv_adv += tp->rcv_wnd;
860 tp->last_ack_sent = tp->rcv_nxt;
861
862 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
863 if (sc->sc_flags & SCF_NOOPT)
864 tp->t_flags |= TF_NOOPT;
865 else {
866 if (sc->sc_flags & SCF_WINSCALE) {
867 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
868 tp->snd_scale = sc->sc_requested_s_scale;
869 tp->request_r_scale = sc->sc_requested_r_scale;
870 }
871 if (sc->sc_flags & SCF_TIMESTAMP) {
872 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
873 tp->ts_recent = sc->sc_tsreflect;
874 tp->ts_recent_age = tcp_ts_getticks();
875 tp->ts_offset = sc->sc_tsoff;
876 }
877 #ifdef TCP_SIGNATURE
878 if (sc->sc_flags & SCF_SIGNATURE)
879 tp->t_flags |= TF_SIGNATURE;
880 #endif
881 if (sc->sc_flags & SCF_SACK)
882 tp->t_flags |= TF_SACK_PERMIT;
883 }
884
885 if (sc->sc_flags & SCF_ECN)
886 tp->t_flags |= TF_ECN_PERMIT;
887
888 /*
889 * Set up MSS and get cached values from tcp_hostcache.
890 * This might overwrite some of the defaults we just set.
891 */
892 tcp_mss(tp, sc->sc_peer_mss);
893
894 /*
895 * If the SYN,ACK was retransmitted, indicate that CWND to be
896 * limited to one segment in cc_conn_init().
897 * NB: sc_rxmits counts all SYN,ACK transmits, not just retransmits.
898 */
899 if (sc->sc_rxmits > 1)
900 tp->snd_cwnd = 1;
901
902 #ifdef TCP_OFFLOAD
903 /*
904 * Allow a TOE driver to install its hooks. Note that we hold the
905 * pcbinfo lock too and that prevents tcp_usr_accept from accepting a
906 * new connection before the TOE driver has done its thing.
907 */
908 if (ADDED_BY_TOE(sc)) {
909 struct toedev *tod = sc->sc_tod;
910
911 tod->tod_offload_socket(tod, sc->sc_todctx, so);
912 }
913 #endif
914 /*
915 * Copy and activate timers.
916 */
917 tp->t_keepinit = sototcpcb(lso)->t_keepinit;
918 tp->t_keepidle = sototcpcb(lso)->t_keepidle;
919 tp->t_keepintvl = sototcpcb(lso)->t_keepintvl;
920 tp->t_keepcnt = sototcpcb(lso)->t_keepcnt;
921 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
922
923 INP_WUNLOCK(inp);
924
925 TCPSTAT_INC(tcps_accepts);
926 return (so);
927
928 abort:
929 INP_WUNLOCK(inp);
930 abort2:
931 if (so != NULL)
932 soabort(so);
933 return (NULL);
934 }
935
936 /*
937 * This function gets called when we receive an ACK for a
938 * socket in the LISTEN state. We look up the connection
939 * in the syncache, and if its there, we pull it out of
940 * the cache and turn it into a full-blown connection in
941 * the SYN-RECEIVED state.
942 */
943 int
944 syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
945 struct socket **lsop, struct mbuf *m)
946 {
947 struct syncache *sc;
948 struct syncache_head *sch;
949 struct syncache scs;
950 char *s;
951
952 /*
953 * Global TCP locks are held because we manipulate the PCB lists
954 * and create a new socket.
955 */
956 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
957 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK,
958 ("%s: can handle only ACK", __func__));
959
960 sc = syncache_lookup(inc, &sch); /* returns locked sch */
961 SCH_LOCK_ASSERT(sch);
962
963 #ifdef INVARIANTS
964 /*
965 * Test code for syncookies comparing the syncache stored
966 * values with the reconstructed values from the cookie.
967 */
968 if (sc != NULL)
969 syncookie_cmp(inc, sch, sc, th, to, *lsop);
970 #endif
971
972 if (sc == NULL) {
973 /*
974 * There is no syncache entry, so see if this ACK is
975 * a returning syncookie. To do this, first:
976 * A. See if this socket has had a syncache entry dropped in
977 * the past. We don't want to accept a bogus syncookie
978 * if we've never received a SYN.
979 * B. check that the syncookie is valid. If it is, then
980 * cobble up a fake syncache entry, and return.
981 */
982 if (!V_tcp_syncookies) {
983 SCH_UNLOCK(sch);
984 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
985 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
986 "segment rejected (syncookies disabled)\n",
987 s, __func__);
988 goto failed;
989 }
990 bzero(&scs, sizeof(scs));
991 sc = syncookie_lookup(inc, sch, &scs, th, to, *lsop);
992 SCH_UNLOCK(sch);
993 if (sc == NULL) {
994 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
995 log(LOG_DEBUG, "%s; %s: Segment failed "
996 "SYNCOOKIE authentication, segment rejected "
997 "(probably spoofed)\n", s, __func__);
998 goto failed;
999 }
1000 } else {
1001 /* Pull out the entry to unlock the bucket row. */
1002 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
1003 sch->sch_length--;
1004 #ifdef TCP_OFFLOAD
1005 if (ADDED_BY_TOE(sc)) {
1006 struct toedev *tod = sc->sc_tod;
1007
1008 tod->tod_syncache_removed(tod, sc->sc_todctx);
1009 }
1010 #endif
1011 SCH_UNLOCK(sch);
1012 }
1013
1014 /*
1015 * Segment validation:
1016 * ACK must match our initial sequence number + 1 (the SYN|ACK).
1017 */
1018 if (th->th_ack != sc->sc_iss + 1) {
1019 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1020 log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment "
1021 "rejected\n", s, __func__, th->th_ack, sc->sc_iss);
1022 goto failed;
1023 }
1024
1025 /*
1026 * The SEQ must fall in the window starting at the received
1027 * initial receive sequence number + 1 (the SYN).
1028 */
1029 if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
1030 SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
1031 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1032 log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment "
1033 "rejected\n", s, __func__, th->th_seq, sc->sc_irs);
1034 goto failed;
1035 }
1036
1037 /*
1038 * If timestamps were not negotiated during SYN/ACK they
1039 * must not appear on any segment during this session.
1040 */
1041 if (!(sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) {
1042 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1043 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1044 "segment rejected\n", s, __func__);
1045 goto failed;
1046 }
1047
1048 /*
1049 * If timestamps were negotiated during SYN/ACK they should
1050 * appear on every segment during this session.
1051 * XXXAO: This is only informal as there have been unverified
1052 * reports of non-compliants stacks.
1053 */
1054 if ((sc->sc_flags & SCF_TIMESTAMP) && !(to->to_flags & TOF_TS)) {
1055 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1056 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1057 "no action\n", s, __func__);
1058 free(s, M_TCPLOG);
1059 s = NULL;
1060 }
1061 }
1062
1063 /*
1064 * If timestamps were negotiated the reflected timestamp
1065 * must be equal to what we actually sent in the SYN|ACK.
1066 */
1067 if ((to->to_flags & TOF_TS) && to->to_tsecr != sc->sc_ts) {
1068 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1069 log(LOG_DEBUG, "%s; %s: TSECR %u != TS %u, "
1070 "segment rejected\n",
1071 s, __func__, to->to_tsecr, sc->sc_ts);
1072 goto failed;
1073 }
1074
1075 *lsop = syncache_socket(sc, *lsop, m);
1076
1077 if (*lsop == NULL)
1078 TCPSTAT_INC(tcps_sc_aborted);
1079 else
1080 TCPSTAT_INC(tcps_sc_completed);
1081
1082 /* how do we find the inp for the new socket? */
1083 if (sc != &scs)
1084 syncache_free(sc);
1085 return (1);
1086 failed:
1087 if (sc != NULL && sc != &scs)
1088 syncache_free(sc);
1089 if (s != NULL)
1090 free(s, M_TCPLOG);
1091 *lsop = NULL;
1092 return (0);
1093 }
1094
1095 #ifdef TCP_RFC7413
1096 static void
1097 syncache_tfo_expand(struct syncache *sc, struct socket **lsop, struct mbuf *m,
1098 uint64_t response_cookie)
1099 {
1100 struct inpcb *inp;
1101 struct tcpcb *tp;
1102 unsigned int *pending_counter;
1103
1104 /*
1105 * Global TCP locks are held because we manipulate the PCB lists
1106 * and create a new socket.
1107 */
1108 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1109
1110 pending_counter = intotcpcb(sotoinpcb(*lsop))->t_tfo_pending;
1111 *lsop = syncache_socket(sc, *lsop, m);
1112 if (*lsop == NULL) {
1113 TCPSTAT_INC(tcps_sc_aborted);
1114 atomic_subtract_int(pending_counter, 1);
1115 } else {
1116 inp = sotoinpcb(*lsop);
1117 tp = intotcpcb(inp);
1118 tp->t_flags |= TF_FASTOPEN;
1119 tp->t_tfo_cookie = response_cookie;
1120 tp->snd_max = tp->iss;
1121 tp->snd_nxt = tp->iss;
1122 tp->t_tfo_pending = pending_counter;
1123 TCPSTAT_INC(tcps_sc_completed);
1124 }
1125 }
1126 #endif /* TCP_RFC7413 */
1127
1128 /*
1129 * Given a LISTEN socket and an inbound SYN request, add
1130 * this to the syn cache, and send back a segment:
1131 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
1132 * to the source.
1133 *
1134 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
1135 * Doing so would require that we hold onto the data and deliver it
1136 * to the application. However, if we are the target of a SYN-flood
1137 * DoS attack, an attacker could send data which would eventually
1138 * consume all available buffer space if it were ACKed. By not ACKing
1139 * the data, we avoid this DoS scenario.
1140 *
1141 * The exception to the above is when a SYN with a valid TCP Fast Open (TFO)
1142 * cookie is processed, V_tcp_fastopen_enabled set to true, and the
1143 * TCP_FASTOPEN socket option is set. In this case, a new socket is created
1144 * and returned via lsop, the mbuf is not freed so that tcp_input() can
1145 * queue its data to the socket, and 1 is returned to indicate the
1146 * TFO-socket-creation path was taken.
1147 */
1148 int
1149 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
1150 struct inpcb *inp, struct socket **lsop, struct mbuf *m, void *tod,
1151 void *todctx)
1152 {
1153 struct tcpcb *tp;
1154 struct socket *so;
1155 struct syncache *sc = NULL;
1156 struct syncache_head *sch;
1157 struct mbuf *ipopts = NULL;
1158 u_int ltflags;
1159 int win, sb_hiwat, ip_ttl, ip_tos;
1160 char *s;
1161 int rv = 0;
1162 #ifdef INET6
1163 int autoflowlabel = 0;
1164 #endif
1165 #ifdef MAC
1166 struct label *maclabel;
1167 #endif
1168 struct syncache scs;
1169 struct ucred *cred;
1170 #ifdef TCP_RFC7413
1171 uint64_t tfo_response_cookie;
1172 int tfo_cookie_valid = 0;
1173 int tfo_response_cookie_valid = 0;
1174 #endif
1175
1176 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1177 INP_WLOCK_ASSERT(inp); /* listen socket */
1178 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN,
1179 ("%s: unexpected tcp flags", __func__));
1180
1181 /*
1182 * Combine all so/tp operations very early to drop the INP lock as
1183 * soon as possible.
1184 */
1185 so = *lsop;
1186 tp = sototcpcb(so);
1187 cred = crhold(so->so_cred);
1188
1189 #ifdef INET6
1190 if ((inc->inc_flags & INC_ISIPV6) &&
1191 (inp->inp_flags & IN6P_AUTOFLOWLABEL))
1192 autoflowlabel = 1;
1193 #endif
1194 ip_ttl = inp->inp_ip_ttl;
1195 ip_tos = inp->inp_ip_tos;
1196 win = sbspace(&so->so_rcv);
1197 sb_hiwat = so->so_rcv.sb_hiwat;
1198 ltflags = (tp->t_flags & (TF_NOOPT | TF_SIGNATURE));
1199
1200 #ifdef TCP_RFC7413
1201 if (V_tcp_fastopen_enabled && (tp->t_flags & TF_FASTOPEN) &&
1202 (tp->t_tfo_pending != NULL) && (to->to_flags & TOF_FASTOPEN)) {
1203 /*
1204 * Limit the number of pending TFO connections to
1205 * approximately half of the queue limit. This prevents TFO
1206 * SYN floods from starving the service by filling the
1207 * listen queue with bogus TFO connections.
1208 */
1209 if (atomic_fetchadd_int(tp->t_tfo_pending, 1) <=
1210 (so->so_qlimit / 2)) {
1211 int result;
1212
1213 result = tcp_fastopen_check_cookie(inc,
1214 to->to_tfo_cookie, to->to_tfo_len,
1215 &tfo_response_cookie);
1216 tfo_cookie_valid = (result > 0);
1217 tfo_response_cookie_valid = (result >= 0);
1218 } else
1219 atomic_subtract_int(tp->t_tfo_pending, 1);
1220 }
1221 #endif
1222
1223 /* By the time we drop the lock these should no longer be used. */
1224 so = NULL;
1225 tp = NULL;
1226
1227 #ifdef MAC
1228 if (mac_syncache_init(&maclabel) != 0) {
1229 INP_WUNLOCK(inp);
1230 INP_INFO_WUNLOCK(&V_tcbinfo);
1231 goto done;
1232 } else
1233 mac_syncache_create(maclabel, inp);
1234 #endif
1235 #ifdef TCP_RFC7413
1236 if (!tfo_cookie_valid) {
1237 INP_WUNLOCK(inp);
1238 INP_INFO_WUNLOCK(&V_tcbinfo);
1239 }
1240 #else
1241 INP_WUNLOCK(inp);
1242 INP_INFO_WUNLOCK(&V_tcbinfo);
1243 #endif
1244
1245 /*
1246 * Remember the IP options, if any.
1247 */
1248 #ifdef INET6
1249 if (!(inc->inc_flags & INC_ISIPV6))
1250 #endif
1251 #ifdef INET
1252 ipopts = (m) ? ip_srcroute(m) : NULL;
1253 #else
1254 ipopts = NULL;
1255 #endif
1256
1257 /*
1258 * See if we already have an entry for this connection.
1259 * If we do, resend the SYN,ACK, and reset the retransmit timer.
1260 *
1261 * XXX: should the syncache be re-initialized with the contents
1262 * of the new SYN here (which may have different options?)
1263 *
1264 * XXX: We do not check the sequence number to see if this is a
1265 * real retransmit or a new connection attempt. The question is
1266 * how to handle such a case; either ignore it as spoofed, or
1267 * drop the current entry and create a new one?
1268 */
1269 sc = syncache_lookup(inc, &sch); /* returns locked entry */
1270 SCH_LOCK_ASSERT(sch);
1271 if (sc != NULL) {
1272 #ifdef TCP_RFC7413
1273 if (tfo_cookie_valid) {
1274 INP_WUNLOCK(inp);
1275 INP_INFO_WUNLOCK(&V_tcbinfo);
1276 }
1277 #endif
1278 TCPSTAT_INC(tcps_sc_dupsyn);
1279 if (ipopts) {
1280 /*
1281 * If we were remembering a previous source route,
1282 * forget it and use the new one we've been given.
1283 */
1284 if (sc->sc_ipopts)
1285 (void) m_free(sc->sc_ipopts);
1286 sc->sc_ipopts = ipopts;
1287 }
1288 /*
1289 * Update timestamp if present.
1290 */
1291 if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS))
1292 sc->sc_tsreflect = to->to_tsval;
1293 else
1294 sc->sc_flags &= ~SCF_TIMESTAMP;
1295 #ifdef MAC
1296 /*
1297 * Since we have already unconditionally allocated label
1298 * storage, free it up. The syncache entry will already
1299 * have an initialized label we can use.
1300 */
1301 mac_syncache_destroy(&maclabel);
1302 #endif
1303 /* Retransmit SYN|ACK and reset retransmit count. */
1304 if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) {
1305 log(LOG_DEBUG, "%s; %s: Received duplicate SYN, "
1306 "resetting timer and retransmitting SYN|ACK\n",
1307 s, __func__);
1308 free(s, M_TCPLOG);
1309 }
1310 if (syncache_respond(sc) == 0) {
1311 sc->sc_rxmits = 0;
1312 syncache_timeout(sc, sch, 1);
1313 TCPSTAT_INC(tcps_sndacks);
1314 TCPSTAT_INC(tcps_sndtotal);
1315 }
1316 SCH_UNLOCK(sch);
1317 goto done;
1318 }
1319
1320 #ifdef TCP_RFC7413
1321 if (tfo_cookie_valid) {
1322 bzero(&scs, sizeof(scs));
1323 sc = &scs;
1324 goto skip_alloc;
1325 }
1326 #endif
1327
1328 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1329 if (sc == NULL) {
1330 /*
1331 * The zone allocator couldn't provide more entries.
1332 * Treat this as if the cache was full; drop the oldest
1333 * entry and insert the new one.
1334 */
1335 TCPSTAT_INC(tcps_sc_zonefail);
1336 if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL)
1337 syncache_drop(sc, sch);
1338 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1339 if (sc == NULL) {
1340 if (V_tcp_syncookies) {
1341 bzero(&scs, sizeof(scs));
1342 sc = &scs;
1343 } else {
1344 SCH_UNLOCK(sch);
1345 if (ipopts)
1346 (void) m_free(ipopts);
1347 goto done;
1348 }
1349 }
1350 }
1351
1352 #ifdef TCP_RFC7413
1353 skip_alloc:
1354 if (!tfo_cookie_valid && tfo_response_cookie_valid)
1355 sc->sc_tfo_cookie = &tfo_response_cookie;
1356 #endif
1357
1358 /*
1359 * Fill in the syncache values.
1360 */
1361 #ifdef MAC
1362 sc->sc_label = maclabel;
1363 #endif
1364 sc->sc_cred = cred;
1365 cred = NULL;
1366 sc->sc_ipopts = ipopts;
1367 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
1368 #ifdef INET6
1369 if (!(inc->inc_flags & INC_ISIPV6))
1370 #endif
1371 {
1372 sc->sc_ip_tos = ip_tos;
1373 sc->sc_ip_ttl = ip_ttl;
1374 }
1375 #ifdef TCP_OFFLOAD
1376 sc->sc_tod = tod;
1377 sc->sc_todctx = todctx;
1378 #endif
1379 sc->sc_irs = th->th_seq;
1380 sc->sc_iss = arc4random();
1381 sc->sc_flags = 0;
1382 sc->sc_flowlabel = 0;
1383
1384 /*
1385 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN].
1386 * win was derived from socket earlier in the function.
1387 */
1388 win = imax(win, 0);
1389 win = imin(win, TCP_MAXWIN);
1390 sc->sc_wnd = win;
1391
1392 if (V_tcp_do_rfc1323) {
1393 /*
1394 * A timestamp received in a SYN makes
1395 * it ok to send timestamp requests and replies.
1396 */
1397 if (to->to_flags & TOF_TS) {
1398 sc->sc_tsreflect = to->to_tsval;
1399 sc->sc_ts = tcp_ts_getticks();
1400 sc->sc_flags |= SCF_TIMESTAMP;
1401 }
1402 if (to->to_flags & TOF_SCALE) {
1403 int wscale = 0;
1404
1405 /*
1406 * Pick the smallest possible scaling factor that
1407 * will still allow us to scale up to sb_max, aka
1408 * kern.ipc.maxsockbuf.
1409 *
1410 * We do this because there are broken firewalls that
1411 * will corrupt the window scale option, leading to
1412 * the other endpoint believing that our advertised
1413 * window is unscaled. At scale factors larger than
1414 * 5 the unscaled window will drop below 1500 bytes,
1415 * leading to serious problems when traversing these
1416 * broken firewalls.
1417 *
1418 * With the default maxsockbuf of 256K, a scale factor
1419 * of 3 will be chosen by this algorithm. Those who
1420 * choose a larger maxsockbuf should watch out
1421 * for the compatiblity problems mentioned above.
1422 *
1423 * RFC1323: The Window field in a SYN (i.e., a <SYN>
1424 * or <SYN,ACK>) segment itself is never scaled.
1425 */
1426 while (wscale < TCP_MAX_WINSHIFT &&
1427 (TCP_MAXWIN << wscale) < sb_max)
1428 wscale++;
1429 sc->sc_requested_r_scale = wscale;
1430 sc->sc_requested_s_scale = to->to_wscale;
1431 sc->sc_flags |= SCF_WINSCALE;
1432 }
1433 }
1434 #ifdef TCP_SIGNATURE
1435 /*
1436 * If listening socket requested TCP digests, and received SYN
1437 * contains the option, flag this in the syncache so that
1438 * syncache_respond() will do the right thing with the SYN+ACK.
1439 * XXX: Currently we always record the option by default and will
1440 * attempt to use it in syncache_respond().
1441 */
1442 if (to->to_flags & TOF_SIGNATURE || ltflags & TF_SIGNATURE)
1443 sc->sc_flags |= SCF_SIGNATURE;
1444 #endif
1445 if (to->to_flags & TOF_SACKPERM)
1446 sc->sc_flags |= SCF_SACK;
1447 if (to->to_flags & TOF_MSS)
1448 sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */
1449 if (ltflags & TF_NOOPT)
1450 sc->sc_flags |= SCF_NOOPT;
1451 if ((th->th_flags & (TH_ECE|TH_CWR)) && V_tcp_do_ecn)
1452 sc->sc_flags |= SCF_ECN;
1453
1454 if (V_tcp_syncookies)
1455 sc->sc_iss = syncookie_generate(sch, sc);
1456 #ifdef INET6
1457 if (autoflowlabel) {
1458 if (V_tcp_syncookies)
1459 sc->sc_flowlabel = sc->sc_iss;
1460 else
1461 sc->sc_flowlabel = ip6_randomflowlabel();
1462 sc->sc_flowlabel = htonl(sc->sc_flowlabel) & IPV6_FLOWLABEL_MASK;
1463 }
1464 #endif
1465 SCH_UNLOCK(sch);
1466
1467 #ifdef TCP_RFC7413
1468 if (tfo_cookie_valid) {
1469 syncache_tfo_expand(sc, lsop, m, tfo_response_cookie);
1470 /* INP_WUNLOCK(inp) will be performed by the called */
1471 rv = 1;
1472 goto tfo_done;
1473 }
1474 #endif
1475
1476 /*
1477 * Do a standard 3-way handshake.
1478 */
1479 if (syncache_respond(sc) == 0) {
1480 if (V_tcp_syncookies && V_tcp_syncookiesonly && sc != &scs)
1481 syncache_free(sc);
1482 else if (sc != &scs)
1483 syncache_insert(sc, sch); /* locks and unlocks sch */
1484 TCPSTAT_INC(tcps_sndacks);
1485 TCPSTAT_INC(tcps_sndtotal);
1486 } else {
1487 if (sc != &scs)
1488 syncache_free(sc);
1489 TCPSTAT_INC(tcps_sc_dropped);
1490 }
1491
1492 done:
1493 if (m) {
1494 *lsop = NULL;
1495 m_freem(m);
1496 }
1497 #ifdef TCP_RFC7413
1498 tfo_done:
1499 #endif
1500 if (cred != NULL)
1501 crfree(cred);
1502 #ifdef MAC
1503 if (sc == &scs)
1504 mac_syncache_destroy(&maclabel);
1505 #endif
1506 return (rv);
1507 }
1508
1509 static int
1510 syncache_respond(struct syncache *sc)
1511 {
1512 struct ip *ip = NULL;
1513 struct mbuf *m;
1514 struct tcphdr *th = NULL;
1515 int optlen, error = 0; /* Make compiler happy */
1516 u_int16_t hlen, tlen, mssopt;
1517 struct tcpopt to;
1518 #ifdef INET6
1519 struct ip6_hdr *ip6 = NULL;
1520 #endif
1521
1522 hlen =
1523 #ifdef INET6
1524 (sc->sc_inc.inc_flags & INC_ISIPV6) ? sizeof(struct ip6_hdr) :
1525 #endif
1526 sizeof(struct ip);
1527 tlen = hlen + sizeof(struct tcphdr);
1528
1529 /* Determine MSS we advertize to other end of connection. */
1530 mssopt = tcp_mssopt(&sc->sc_inc);
1531 if (sc->sc_peer_mss)
1532 mssopt = max( min(sc->sc_peer_mss, mssopt), V_tcp_minmss);
1533
1534 /* XXX: Assume that the entire packet will fit in a header mbuf. */
1535 KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN,
1536 ("syncache: mbuf too small"));
1537
1538 /* Create the IP+TCP header from scratch. */
1539 m = m_gethdr(M_NOWAIT, MT_DATA);
1540 if (m == NULL)
1541 return (ENOBUFS);
1542 #ifdef MAC
1543 mac_syncache_create_mbuf(sc->sc_label, m);
1544 #endif
1545 m->m_data += max_linkhdr;
1546 m->m_len = tlen;
1547 m->m_pkthdr.len = tlen;
1548 m->m_pkthdr.rcvif = NULL;
1549
1550 #ifdef INET6
1551 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1552 ip6 = mtod(m, struct ip6_hdr *);
1553 ip6->ip6_vfc = IPV6_VERSION;
1554 ip6->ip6_nxt = IPPROTO_TCP;
1555 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1556 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1557 ip6->ip6_plen = htons(tlen - hlen);
1558 /* ip6_hlim is set after checksum */
1559 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
1560 ip6->ip6_flow |= sc->sc_flowlabel;
1561
1562 th = (struct tcphdr *)(ip6 + 1);
1563 }
1564 #endif
1565 #if defined(INET6) && defined(INET)
1566 else
1567 #endif
1568 #ifdef INET
1569 {
1570 ip = mtod(m, struct ip *);
1571 ip->ip_v = IPVERSION;
1572 ip->ip_hl = sizeof(struct ip) >> 2;
1573 ip->ip_len = htons(tlen);
1574 ip->ip_id = 0;
1575 ip->ip_off = 0;
1576 ip->ip_sum = 0;
1577 ip->ip_p = IPPROTO_TCP;
1578 ip->ip_src = sc->sc_inc.inc_laddr;
1579 ip->ip_dst = sc->sc_inc.inc_faddr;
1580 ip->ip_ttl = sc->sc_ip_ttl;
1581 ip->ip_tos = sc->sc_ip_tos;
1582
1583 /*
1584 * See if we should do MTU discovery. Route lookups are
1585 * expensive, so we will only unset the DF bit if:
1586 *
1587 * 1) path_mtu_discovery is disabled
1588 * 2) the SCF_UNREACH flag has been set
1589 */
1590 if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1591 ip->ip_off |= htons(IP_DF);
1592
1593 th = (struct tcphdr *)(ip + 1);
1594 }
1595 #endif /* INET */
1596 th->th_sport = sc->sc_inc.inc_lport;
1597 th->th_dport = sc->sc_inc.inc_fport;
1598
1599 th->th_seq = htonl(sc->sc_iss);
1600 th->th_ack = htonl(sc->sc_irs + 1);
1601 th->th_off = sizeof(struct tcphdr) >> 2;
1602 th->th_x2 = 0;
1603 th->th_flags = TH_SYN|TH_ACK;
1604 th->th_win = htons(sc->sc_wnd);
1605 th->th_urp = 0;
1606
1607 if (sc->sc_flags & SCF_ECN) {
1608 th->th_flags |= TH_ECE;
1609 TCPSTAT_INC(tcps_ecn_shs);
1610 }
1611
1612 /* Tack on the TCP options. */
1613 if ((sc->sc_flags & SCF_NOOPT) == 0) {
1614 to.to_flags = 0;
1615
1616 to.to_mss = mssopt;
1617 to.to_flags = TOF_MSS;
1618 if (sc->sc_flags & SCF_WINSCALE) {
1619 to.to_wscale = sc->sc_requested_r_scale;
1620 to.to_flags |= TOF_SCALE;
1621 }
1622 if (sc->sc_flags & SCF_TIMESTAMP) {
1623 /* Virgin timestamp or TCP cookie enhanced one. */
1624 to.to_tsval = sc->sc_ts;
1625 to.to_tsecr = sc->sc_tsreflect;
1626 to.to_flags |= TOF_TS;
1627 }
1628 if (sc->sc_flags & SCF_SACK)
1629 to.to_flags |= TOF_SACKPERM;
1630 #ifdef TCP_SIGNATURE
1631 if (sc->sc_flags & SCF_SIGNATURE)
1632 to.to_flags |= TOF_SIGNATURE;
1633 #endif
1634
1635 #ifdef TCP_RFC7413
1636 if (sc->sc_tfo_cookie) {
1637 to.to_flags |= TOF_FASTOPEN;
1638 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
1639 to.to_tfo_cookie = sc->sc_tfo_cookie;
1640 /* don't send cookie again when retransmitting response */
1641 sc->sc_tfo_cookie = NULL;
1642 }
1643 #endif
1644 optlen = tcp_addoptions(&to, (u_char *)(th + 1));
1645
1646 /* Adjust headers by option size. */
1647 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1648 m->m_len += optlen;
1649 m->m_pkthdr.len += optlen;
1650
1651 #ifdef TCP_SIGNATURE
1652 if (sc->sc_flags & SCF_SIGNATURE)
1653 tcp_signature_compute(m, 0, 0, optlen,
1654 to.to_signature, IPSEC_DIR_OUTBOUND);
1655 #endif
1656 #ifdef INET6
1657 if (sc->sc_inc.inc_flags & INC_ISIPV6)
1658 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen);
1659 else
1660 #endif
1661 ip->ip_len = htons(ntohs(ip->ip_len) + optlen);
1662 } else
1663 optlen = 0;
1664
1665 M_SETFIB(m, sc->sc_inc.inc_fibnum);
1666 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1667 #ifdef INET6
1668 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1669 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1670 th->th_sum = in6_cksum_pseudo(ip6, tlen + optlen - hlen,
1671 IPPROTO_TCP, 0);
1672 ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
1673 #ifdef TCP_OFFLOAD
1674 if (ADDED_BY_TOE(sc)) {
1675 struct toedev *tod = sc->sc_tod;
1676
1677 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1678
1679 return (error);
1680 }
1681 #endif
1682 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1683 }
1684 #endif
1685 #if defined(INET6) && defined(INET)
1686 else
1687 #endif
1688 #ifdef INET
1689 {
1690 m->m_pkthdr.csum_flags = CSUM_TCP;
1691 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1692 htons(tlen + optlen - hlen + IPPROTO_TCP));
1693 #ifdef TCP_OFFLOAD
1694 if (ADDED_BY_TOE(sc)) {
1695 struct toedev *tod = sc->sc_tod;
1696
1697 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1698
1699 return (error);
1700 }
1701 #endif
1702 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL);
1703 }
1704 #endif
1705 return (error);
1706 }
1707
1708 /*
1709 * The purpose of syncookies is to handle spoofed SYN flooding DoS attacks
1710 * that exceed the capacity of the syncache by avoiding the storage of any
1711 * of the SYNs we receive. Syncookies defend against blind SYN flooding
1712 * attacks where the attacker does not have access to our responses.
1713 *
1714 * Syncookies encode and include all necessary information about the
1715 * connection setup within the SYN|ACK that we send back. That way we
1716 * can avoid keeping any local state until the ACK to our SYN|ACK returns
1717 * (if ever). Normally the syncache and syncookies are running in parallel
1718 * with the latter taking over when the former is exhausted. When matching
1719 * syncache entry is found the syncookie is ignored.
1720 *
1721 * The only reliable information persisting the 3WHS is our inital sequence
1722 * number ISS of 32 bits. Syncookies embed a cryptographically sufficient
1723 * strong hash (MAC) value and a few bits of TCP SYN options in the ISS
1724 * of our SYN|ACK. The MAC can be recomputed when the ACK to our SYN|ACK
1725 * returns and signifies a legitimate connection if it matches the ACK.
1726 *
1727 * The available space of 32 bits to store the hash and to encode the SYN
1728 * option information is very tight and we should have at least 24 bits for
1729 * the MAC to keep the number of guesses by blind spoofing reasonably high.
1730 *
1731 * SYN option information we have to encode to fully restore a connection:
1732 * MSS: is imporant to chose an optimal segment size to avoid IP level
1733 * fragmentation along the path. The common MSS values can be encoded
1734 * in a 3-bit table. Uncommon values are captured by the next lower value
1735 * in the table leading to a slight increase in packetization overhead.
1736 * WSCALE: is necessary to allow large windows to be used for high delay-
1737 * bandwidth product links. Not scaling the window when it was initially
1738 * negotiated is bad for performance as lack of scaling further decreases
1739 * the apparent available send window. We only need to encode the WSCALE
1740 * we received from the remote end. Our end can be recalculated at any
1741 * time. The common WSCALE values can be encoded in a 3-bit table.
1742 * Uncommon values are captured by the next lower value in the table
1743 * making us under-estimate the available window size halving our
1744 * theoretically possible maximum throughput for that connection.
1745 * SACK: Greatly assists in packet loss recovery and requires 1 bit.
1746 * TIMESTAMP and SIGNATURE is not encoded because they are permanent options
1747 * that are included in all segments on a connection. We enable them when
1748 * the ACK has them.
1749 *
1750 * Security of syncookies and attack vectors:
1751 *
1752 * The MAC is computed over (faddr||laddr||fport||lport||irs||flags||secmod)
1753 * together with the gloabl secret to make it unique per connection attempt.
1754 * Thus any change of any of those parameters results in a different MAC output
1755 * in an unpredictable way unless a collision is encountered. 24 bits of the
1756 * MAC are embedded into the ISS.
1757 *
1758 * To prevent replay attacks two rotating global secrets are updated with a
1759 * new random value every 15 seconds. The life-time of a syncookie is thus
1760 * 15-30 seconds.
1761 *
1762 * Vector 1: Attacking the secret. This requires finding a weakness in the
1763 * MAC itself or the way it is used here. The attacker can do a chosen plain
1764 * text attack by varying and testing the all parameters under his control.
1765 * The strength depends on the size and randomness of the secret, and the
1766 * cryptographic security of the MAC function. Due to the constant updating
1767 * of the secret the attacker has at most 29.999 seconds to find the secret
1768 * and launch spoofed connections. After that he has to start all over again.
1769 *
1770 * Vector 2: Collision attack on the MAC of a single ACK. With a 24 bit MAC
1771 * size an average of 4,823 attempts are required for a 50% chance of success
1772 * to spoof a single syncookie (birthday collision paradox). However the
1773 * attacker is blind and doesn't know if one of his attempts succeeded unless
1774 * he has a side channel to interfere success from. A single connection setup
1775 * success average of 90% requires 8,790 packets, 99.99% requires 17,578 packets.
1776 * This many attempts are required for each one blind spoofed connection. For
1777 * every additional spoofed connection he has to launch another N attempts.
1778 * Thus for a sustained rate 100 spoofed connections per second approximately
1779 * 1,800,000 packets per second would have to be sent.
1780 *
1781 * NB: The MAC function should be fast so that it doesn't become a CPU
1782 * exhaustion attack vector itself.
1783 *
1784 * References:
1785 * RFC4987 TCP SYN Flooding Attacks and Common Mitigations
1786 * SYN cookies were first proposed by cryptographer Dan J. Bernstein in 1996
1787 * http://cr.yp.to/syncookies.html (overview)
1788 * http://cr.yp.to/syncookies/archive (details)
1789 *
1790 *
1791 * Schematic construction of a syncookie enabled Initial Sequence Number:
1792 * 0 1 2 3
1793 * 12345678901234567890123456789012
1794 * |xxxxxxxxxxxxxxxxxxxxxxxxWWWMMMSP|
1795 *
1796 * x 24 MAC (truncated)
1797 * W 3 Send Window Scale index
1798 * M 3 MSS index
1799 * S 1 SACK permitted
1800 * P 1 Odd/even secret
1801 */
1802
1803 /*
1804 * Distribution and probability of certain MSS values. Those in between are
1805 * rounded down to the next lower one.
1806 * [An Analysis of TCP Maximum Segment Sizes, S. Alcock and R. Nelson, 2011]
1807 * .2% .3% 5% 7% 7% 20% 15% 45%
1808 */
1809 static int tcp_sc_msstab[] = { 216, 536, 1200, 1360, 1400, 1440, 1452, 1460 };
1810
1811 /*
1812 * Distribution and probability of certain WSCALE values. We have to map the
1813 * (send) window scale (shift) option with a range of 0-14 from 4 bits into 3
1814 * bits based on prevalence of certain values. Where we don't have an exact
1815 * match for are rounded down to the next lower one letting us under-estimate
1816 * the true available window. At the moment this would happen only for the
1817 * very uncommon values 3, 5 and those above 8 (more than 16MB socket buffer
1818 * and window size). The absence of the WSCALE option (no scaling in either
1819 * direction) is encoded with index zero.
1820 * [WSCALE values histograms, Allman, 2012]
1821 * X 10 10 35 5 6 14 10% by host
1822 * X 11 4 5 5 18 49 3% by connections
1823 */
1824 static int tcp_sc_wstab[] = { 0, 0, 1, 2, 4, 6, 7, 8 };
1825
1826 /*
1827 * Compute the MAC for the SYN cookie. SIPHASH-2-4 is chosen for its speed
1828 * and good cryptographic properties.
1829 */
1830 static uint32_t
1831 syncookie_mac(struct in_conninfo *inc, tcp_seq irs, uint8_t flags,
1832 uint8_t *secbits, uintptr_t secmod)
1833 {
1834 SIPHASH_CTX ctx;
1835 uint32_t siphash[2];
1836
1837 SipHash24_Init(&ctx);
1838 SipHash_SetKey(&ctx, secbits);
1839 switch (inc->inc_flags & INC_ISIPV6) {
1840 #ifdef INET
1841 case 0:
1842 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(inc->inc_faddr));
1843 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(inc->inc_laddr));
1844 break;
1845 #endif
1846 #ifdef INET6
1847 case INC_ISIPV6:
1848 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(inc->inc6_faddr));
1849 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(inc->inc6_laddr));
1850 break;
1851 #endif
1852 }
1853 SipHash_Update(&ctx, &inc->inc_fport, sizeof(inc->inc_fport));
1854 SipHash_Update(&ctx, &inc->inc_lport, sizeof(inc->inc_lport));
1855 SipHash_Update(&ctx, &irs, sizeof(irs));
1856 SipHash_Update(&ctx, &flags, sizeof(flags));
1857 SipHash_Update(&ctx, &secmod, sizeof(secmod));
1858 SipHash_Final((u_int8_t *)&siphash, &ctx);
1859
1860 return (siphash[0] ^ siphash[1]);
1861 }
1862
1863 static tcp_seq
1864 syncookie_generate(struct syncache_head *sch, struct syncache *sc)
1865 {
1866 u_int i, mss, secbit, wscale;
1867 uint32_t iss, hash;
1868 uint8_t *secbits;
1869 union syncookie cookie;
1870
1871 SCH_LOCK_ASSERT(sch);
1872
1873 cookie.cookie = 0;
1874
1875 /* Map our computed MSS into the 3-bit index. */
1876 mss = min(tcp_mssopt(&sc->sc_inc), max(sc->sc_peer_mss, V_tcp_minmss));
1877 for (i = sizeof(tcp_sc_msstab) / sizeof(*tcp_sc_msstab) - 1;
1878 tcp_sc_msstab[i] > mss && i > 0;
1879 i--)
1880 ;
1881 cookie.flags.mss_idx = i;
1882
1883 /*
1884 * Map the send window scale into the 3-bit index but only if
1885 * the wscale option was received.
1886 */
1887 if (sc->sc_flags & SCF_WINSCALE) {
1888 wscale = sc->sc_requested_s_scale;
1889 for (i = sizeof(tcp_sc_wstab) / sizeof(*tcp_sc_wstab) - 1;
1890 tcp_sc_wstab[i] > wscale && i > 0;
1891 i--)
1892 ;
1893 cookie.flags.wscale_idx = i;
1894 }
1895
1896 /* Can we do SACK? */
1897 if (sc->sc_flags & SCF_SACK)
1898 cookie.flags.sack_ok = 1;
1899
1900 /* Which of the two secrets to use. */
1901 secbit = sch->sch_sc->secret.oddeven & 0x1;
1902 cookie.flags.odd_even = secbit;
1903
1904 secbits = sch->sch_sc->secret.key[secbit];
1905 hash = syncookie_mac(&sc->sc_inc, sc->sc_irs, cookie.cookie, secbits,
1906 (uintptr_t)sch);
1907
1908 /*
1909 * Put the flags into the hash and XOR them to get better ISS number
1910 * variance. This doesn't enhance the cryptographic strength and is
1911 * done to prevent the 8 cookie bits from showing up directly on the
1912 * wire.
1913 */
1914 iss = hash & ~0xff;
1915 iss |= cookie.cookie ^ (hash >> 24);
1916
1917 /* Randomize the timestamp. */
1918 if (sc->sc_flags & SCF_TIMESTAMP) {
1919 sc->sc_ts = arc4random();
1920 sc->sc_tsoff = sc->sc_ts - tcp_ts_getticks();
1921 }
1922
1923 TCPSTAT_INC(tcps_sc_sendcookie);
1924 return (iss);
1925 }
1926
1927 static struct syncache *
1928 syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
1929 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
1930 struct socket *lso)
1931 {
1932 uint32_t hash;
1933 uint8_t *secbits;
1934 tcp_seq ack, seq;
1935 int wnd, wscale = 0;
1936 union syncookie cookie;
1937
1938 SCH_LOCK_ASSERT(sch);
1939
1940 /*
1941 * Pull information out of SYN-ACK/ACK and revert sequence number
1942 * advances.
1943 */
1944 ack = th->th_ack - 1;
1945 seq = th->th_seq - 1;
1946
1947 /*
1948 * Unpack the flags containing enough information to restore the
1949 * connection.
1950 */
1951 cookie.cookie = (ack & 0xff) ^ (ack >> 24);
1952
1953 /* Which of the two secrets to use. */
1954 secbits = sch->sch_sc->secret.key[cookie.flags.odd_even];
1955
1956 hash = syncookie_mac(inc, seq, cookie.cookie, secbits, (uintptr_t)sch);
1957
1958 /* The recomputed hash matches the ACK if this was a genuine cookie. */
1959 if ((ack & ~0xff) != (hash & ~0xff))
1960 return (NULL);
1961
1962 /* Fill in the syncache values. */
1963 sc->sc_flags = 0;
1964 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
1965 sc->sc_ipopts = NULL;
1966
1967 sc->sc_irs = seq;
1968 sc->sc_iss = ack;
1969
1970 switch (inc->inc_flags & INC_ISIPV6) {
1971 #ifdef INET
1972 case 0:
1973 sc->sc_ip_ttl = sotoinpcb(lso)->inp_ip_ttl;
1974 sc->sc_ip_tos = sotoinpcb(lso)->inp_ip_tos;
1975 break;
1976 #endif
1977 #ifdef INET6
1978 case INC_ISIPV6:
1979 if (sotoinpcb(lso)->inp_flags & IN6P_AUTOFLOWLABEL)
1980 sc->sc_flowlabel = sc->sc_iss & IPV6_FLOWLABEL_MASK;
1981 break;
1982 #endif
1983 }
1984
1985 sc->sc_peer_mss = tcp_sc_msstab[cookie.flags.mss_idx];
1986
1987 /* We can simply recompute receive window scale we sent earlier. */
1988 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < sb_max)
1989 wscale++;
1990
1991 /* Only use wscale if it was enabled in the orignal SYN. */
1992 if (cookie.flags.wscale_idx > 0) {
1993 sc->sc_requested_r_scale = wscale;
1994 sc->sc_requested_s_scale = tcp_sc_wstab[cookie.flags.wscale_idx];
1995 sc->sc_flags |= SCF_WINSCALE;
1996 }
1997
1998 wnd = sbspace(&lso->so_rcv);
1999 wnd = imax(wnd, 0);
2000 wnd = imin(wnd, TCP_MAXWIN);
2001 sc->sc_wnd = wnd;
2002
2003 if (cookie.flags.sack_ok)
2004 sc->sc_flags |= SCF_SACK;
2005
2006 if (to->to_flags & TOF_TS) {
2007 sc->sc_flags |= SCF_TIMESTAMP;
2008 sc->sc_tsreflect = to->to_tsval;
2009 sc->sc_ts = to->to_tsecr;
2010 sc->sc_tsoff = to->to_tsecr - tcp_ts_getticks();
2011 }
2012
2013 if (to->to_flags & TOF_SIGNATURE)
2014 sc->sc_flags |= SCF_SIGNATURE;
2015
2016 sc->sc_rxmits = 0;
2017
2018 TCPSTAT_INC(tcps_sc_recvcookie);
2019 return (sc);
2020 }
2021
2022 #ifdef INVARIANTS
2023 static int
2024 syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
2025 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
2026 struct socket *lso)
2027 {
2028 struct syncache scs, *scx;
2029 char *s;
2030
2031 bzero(&scs, sizeof(scs));
2032 scx = syncookie_lookup(inc, sch, &scs, th, to, lso);
2033
2034 if ((s = tcp_log_addrs(inc, th, NULL, NULL)) == NULL)
2035 return (0);
2036
2037 if (scx != NULL) {
2038 if (sc->sc_peer_mss != scx->sc_peer_mss)
2039 log(LOG_DEBUG, "%s; %s: mss different %i vs %i\n",
2040 s, __func__, sc->sc_peer_mss, scx->sc_peer_mss);
2041
2042 if (sc->sc_requested_r_scale != scx->sc_requested_r_scale)
2043 log(LOG_DEBUG, "%s; %s: rwscale different %i vs %i\n",
2044 s, __func__, sc->sc_requested_r_scale,
2045 scx->sc_requested_r_scale);
2046
2047 if (sc->sc_requested_s_scale != scx->sc_requested_s_scale)
2048 log(LOG_DEBUG, "%s; %s: swscale different %i vs %i\n",
2049 s, __func__, sc->sc_requested_s_scale,
2050 scx->sc_requested_s_scale);
2051
2052 if ((sc->sc_flags & SCF_SACK) != (scx->sc_flags & SCF_SACK))
2053 log(LOG_DEBUG, "%s; %s: SACK different\n", s, __func__);
2054 }
2055
2056 if (s != NULL)
2057 free(s, M_TCPLOG);
2058 return (0);
2059 }
2060 #endif /* INVARIANTS */
2061
2062 static void
2063 syncookie_reseed(void *arg)
2064 {
2065 struct tcp_syncache *sc = arg;
2066 uint8_t *secbits;
2067 int secbit;
2068
2069 /*
2070 * Reseeding the secret doesn't have to be protected by a lock.
2071 * It only must be ensured that the new random values are visible
2072 * to all CPUs in a SMP environment. The atomic with release
2073 * semantics ensures that.
2074 */
2075 secbit = (sc->secret.oddeven & 0x1) ? 0 : 1;
2076 secbits = sc->secret.key[secbit];
2077 arc4rand(secbits, SYNCOOKIE_SECRET_SIZE, 0);
2078 atomic_add_rel_int(&sc->secret.oddeven, 1);
2079
2080 /* Reschedule ourself. */
2081 callout_schedule(&sc->secret.reseed, SYNCOOKIE_LIFETIME * hz);
2082 }
2083
2084 /*
2085 * Returns the current number of syncache entries. This number
2086 * will probably change before you get around to calling
2087 * syncache_pcblist.
2088 */
2089 int
2090 syncache_pcbcount(void)
2091 {
2092 struct syncache_head *sch;
2093 int count, i;
2094
2095 for (count = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
2096 /* No need to lock for a read. */
2097 sch = &V_tcp_syncache.hashbase[i];
2098 count += sch->sch_length;
2099 }
2100 return count;
2101 }
2102
2103 /*
2104 * Exports the syncache entries to userland so that netstat can display
2105 * them alongside the other sockets. This function is intended to be
2106 * called only from tcp_pcblist.
2107 *
2108 * Due to concurrency on an active system, the number of pcbs exported
2109 * may have no relation to max_pcbs. max_pcbs merely indicates the
2110 * amount of space the caller allocated for this function to use.
2111 */
2112 int
2113 syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported)
2114 {
2115 struct xtcpcb xt;
2116 struct syncache *sc;
2117 struct syncache_head *sch;
2118 int count, error, i;
2119
2120 for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
2121 sch = &V_tcp_syncache.hashbase[i];
2122 SCH_LOCK(sch);
2123 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
2124 if (count >= max_pcbs) {
2125 SCH_UNLOCK(sch);
2126 goto exit;
2127 }
2128 if (cr_cansee(req->td->td_ucred, sc->sc_cred) != 0)
2129 continue;
2130 bzero(&xt, sizeof(xt));
2131 xt.xt_len = sizeof(xt);
2132 if (sc->sc_inc.inc_flags & INC_ISIPV6)
2133 xt.xt_inp.inp_vflag = INP_IPV6;
2134 else
2135 xt.xt_inp.inp_vflag = INP_IPV4;
2136 bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc, sizeof (struct in_conninfo));
2137 xt.xt_tp.t_inpcb = &xt.xt_inp;
2138 xt.xt_tp.t_state = TCPS_SYN_RECEIVED;
2139 xt.xt_socket.xso_protocol = IPPROTO_TCP;
2140 xt.xt_socket.xso_len = sizeof (struct xsocket);
2141 xt.xt_socket.so_type = SOCK_STREAM;
2142 xt.xt_socket.so_state = SS_ISCONNECTING;
2143 error = SYSCTL_OUT(req, &xt, sizeof xt);
2144 if (error) {
2145 SCH_UNLOCK(sch);
2146 goto exit;
2147 }
2148 count++;
2149 }
2150 SCH_UNLOCK(sch);
2151 }
2152 exit:
2153 *pcbs_exported = count;
2154 return error;
2155 }
Cache object: 256fad118412d7cbe165a6f43337b98a
|