1 /*-
2 * Copyright (c) 2001 McAfee, Inc.
3 * Copyright (c) 2006,2013 Andre Oppermann, Internet Business Solutions AG
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Jonathan Lemon
7 * and McAfee Research, the Security Research Division of McAfee, Inc. under
8 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program. [2001 McAfee, Inc.]
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 #include "opt_pcbgroup.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/proc.h> /* for proc0 declaration */
51 #include <sys/random.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/syslog.h>
55 #include <sys/ucred.h>
56
57 #include <sys/md5.h>
58 #include <crypto/siphash/siphash.h>
59
60 #include <vm/uma.h>
61
62 #include <net/if.h>
63 #include <net/route.h>
64 #include <net/vnet.h>
65
66 #include <netinet/in.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/ip.h>
69 #include <netinet/in_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/ip_var.h>
72 #include <netinet/ip_options.h>
73 #ifdef INET6
74 #include <netinet/ip6.h>
75 #include <netinet/icmp6.h>
76 #include <netinet6/nd6.h>
77 #include <netinet6/ip6_var.h>
78 #include <netinet6/in6_pcb.h>
79 #endif
80 #include <netinet/tcp.h>
81 #ifdef TCP_RFC7413
82 #include <netinet/tcp_fastopen.h>
83 #endif
84 #include <netinet/tcp_fsm.h>
85 #include <netinet/tcp_seq.h>
86 #include <netinet/tcp_timer.h>
87 #include <netinet/tcp_var.h>
88 #include <netinet/tcp_syncache.h>
89 #ifdef INET6
90 #include <netinet6/tcp6_var.h>
91 #endif
92 #ifdef TCP_OFFLOAD
93 #include <netinet/toecore.h>
94 #endif
95
96 #ifdef IPSEC
97 #include <netipsec/ipsec.h>
98 #ifdef INET6
99 #include <netipsec/ipsec6.h>
100 #endif
101 #include <netipsec/key.h>
102 #endif /*IPSEC*/
103
104 #include <machine/in_cksum.h>
105
106 #include <security/mac/mac_framework.h>
107
108 static VNET_DEFINE(int, tcp_syncookies) = 1;
109 #define V_tcp_syncookies VNET(tcp_syncookies)
110 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
111 &VNET_NAME(tcp_syncookies), 0,
112 "Use TCP SYN cookies if the syncache overflows");
113
114 static VNET_DEFINE(int, tcp_syncookiesonly) = 0;
115 #define V_tcp_syncookiesonly VNET(tcp_syncookiesonly)
116 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_RW,
117 &VNET_NAME(tcp_syncookiesonly), 0,
118 "Use only TCP SYN cookies");
119
120 #ifdef TCP_OFFLOAD
121 #define ADDED_BY_TOE(sc) ((sc)->sc_tod != NULL)
122 #endif
123
124 static void syncache_drop(struct syncache *, struct syncache_head *);
125 static void syncache_free(struct syncache *);
126 static void syncache_insert(struct syncache *, struct syncache_head *);
127 static int syncache_respond(struct syncache *, const struct mbuf *);
128 static struct socket *syncache_socket(struct syncache *, struct socket *,
129 struct mbuf *m);
130 static int syncache_sysctl_count(SYSCTL_HANDLER_ARGS);
131 static void syncache_timeout(struct syncache *sc, struct syncache_head *sch,
132 int docallout);
133 static void syncache_timer(void *);
134
135 static uint32_t syncookie_mac(struct in_conninfo *, tcp_seq, uint8_t,
136 uint8_t *, uintptr_t);
137 static tcp_seq syncookie_generate(struct syncache_head *, struct syncache *);
138 static struct syncache
139 *syncookie_lookup(struct in_conninfo *, struct syncache_head *,
140 struct syncache *, struct tcphdr *, struct tcpopt *,
141 struct socket *);
142 static void syncookie_reseed(void *);
143 #ifdef INVARIANTS
144 static int syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
145 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
146 struct socket *lso);
147 #endif
148
149 /*
150 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
151 * 3 retransmits corresponds to a timeout of 3 * (1 + 2 + 4 + 8) == 45 seconds,
152 * the odds are that the user has given up attempting to connect by then.
153 */
154 #define SYNCACHE_MAXREXMTS 3
155
156 /* Arbitrary values */
157 #define TCP_SYNCACHE_HASHSIZE 512
158 #define TCP_SYNCACHE_BUCKETLIMIT 30
159
160 static VNET_DEFINE(struct tcp_syncache, tcp_syncache);
161 #define V_tcp_syncache VNET(tcp_syncache)
162
163 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0,
164 "TCP SYN cache");
165
166 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
167 &VNET_NAME(tcp_syncache.bucket_limit), 0,
168 "Per-bucket hash limit for syncache");
169
170 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
171 &VNET_NAME(tcp_syncache.cache_limit), 0,
172 "Overall entry limit for syncache");
173
174 SYSCTL_VNET_PROC(_net_inet_tcp_syncache, OID_AUTO, count, (CTLTYPE_UINT|CTLFLAG_RD),
175 NULL, 0, &syncache_sysctl_count, "IU",
176 "Current number of entries in syncache");
177
178 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
179 &VNET_NAME(tcp_syncache.hashsize), 0,
180 "Size of TCP syncache hashtable");
181
182 SYSCTL_VNET_UINT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
183 &VNET_NAME(tcp_syncache.rexmt_limit), 0,
184 "Limit on SYN/ACK retransmissions");
185
186 VNET_DEFINE(int, tcp_sc_rst_sock_fail) = 1;
187 SYSCTL_VNET_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail,
188 CTLFLAG_RW, &VNET_NAME(tcp_sc_rst_sock_fail), 0,
189 "Send reset on socket allocation failure");
190
191 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
192
193 #define SYNCACHE_HASH(inc, mask) \
194 ((V_tcp_syncache.hash_secret ^ \
195 (inc)->inc_faddr.s_addr ^ \
196 ((inc)->inc_faddr.s_addr >> 16) ^ \
197 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
198
199 #define SYNCACHE_HASH6(inc, mask) \
200 ((V_tcp_syncache.hash_secret ^ \
201 (inc)->inc6_faddr.s6_addr32[0] ^ \
202 (inc)->inc6_faddr.s6_addr32[3] ^ \
203 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
204
205 #define ENDPTS_EQ(a, b) ( \
206 (a)->ie_fport == (b)->ie_fport && \
207 (a)->ie_lport == (b)->ie_lport && \
208 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \
209 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \
210 )
211
212 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
213
214 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx)
215 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx)
216 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED)
217
218 /*
219 * Requires the syncache entry to be already removed from the bucket list.
220 */
221 static void
222 syncache_free(struct syncache *sc)
223 {
224
225 if (sc->sc_ipopts)
226 (void) m_free(sc->sc_ipopts);
227 if (sc->sc_cred)
228 crfree(sc->sc_cred);
229 #ifdef MAC
230 mac_syncache_destroy(&sc->sc_label);
231 #endif
232
233 uma_zfree(V_tcp_syncache.zone, sc);
234 }
235
236 void
237 syncache_init(void)
238 {
239 int i;
240
241 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
242 V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
243 V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
244 V_tcp_syncache.hash_secret = arc4random();
245
246 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
247 &V_tcp_syncache.hashsize);
248 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
249 &V_tcp_syncache.bucket_limit);
250 if (!powerof2(V_tcp_syncache.hashsize) ||
251 V_tcp_syncache.hashsize == 0) {
252 printf("WARNING: syncache hash size is not a power of 2.\n");
253 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
254 }
255 V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1;
256
257 /* Set limits. */
258 V_tcp_syncache.cache_limit =
259 V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit;
260 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
261 &V_tcp_syncache.cache_limit);
262
263 /* Allocate the hash table. */
264 V_tcp_syncache.hashbase = malloc(V_tcp_syncache.hashsize *
265 sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK | M_ZERO);
266
267 #ifdef VIMAGE
268 V_tcp_syncache.vnet = curvnet;
269 #endif
270
271 /* Initialize the hash buckets. */
272 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
273 TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket);
274 mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
275 NULL, MTX_DEF);
276 callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer,
277 &V_tcp_syncache.hashbase[i].sch_mtx, 0);
278 V_tcp_syncache.hashbase[i].sch_length = 0;
279 V_tcp_syncache.hashbase[i].sch_sc = &V_tcp_syncache;
280 V_tcp_syncache.hashbase[i].sch_last_overflow =
281 -(SYNCOOKIE_LIFETIME + 1);
282 }
283
284 /* Create the syncache entry zone. */
285 V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
286 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
287 V_tcp_syncache.cache_limit = uma_zone_set_max(V_tcp_syncache.zone,
288 V_tcp_syncache.cache_limit);
289
290 /* Start the SYN cookie reseeder callout. */
291 callout_init(&V_tcp_syncache.secret.reseed, 1);
292 arc4rand(V_tcp_syncache.secret.key[0], SYNCOOKIE_SECRET_SIZE, 0);
293 arc4rand(V_tcp_syncache.secret.key[1], SYNCOOKIE_SECRET_SIZE, 0);
294 callout_reset(&V_tcp_syncache.secret.reseed, SYNCOOKIE_LIFETIME * hz,
295 syncookie_reseed, &V_tcp_syncache);
296 }
297
298 #ifdef VIMAGE
299 void
300 syncache_destroy(void)
301 {
302 struct syncache_head *sch;
303 struct syncache *sc, *nsc;
304 int i;
305
306 /* Cleanup hash buckets: stop timers, free entries, destroy locks. */
307 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
308
309 sch = &V_tcp_syncache.hashbase[i];
310 callout_drain(&sch->sch_timer);
311
312 SCH_LOCK(sch);
313 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc)
314 syncache_drop(sc, sch);
315 SCH_UNLOCK(sch);
316 KASSERT(TAILQ_EMPTY(&sch->sch_bucket),
317 ("%s: sch->sch_bucket not empty", __func__));
318 KASSERT(sch->sch_length == 0, ("%s: sch->sch_length %d not 0",
319 __func__, sch->sch_length));
320 mtx_destroy(&sch->sch_mtx);
321 }
322
323 KASSERT(uma_zone_get_cur(V_tcp_syncache.zone) == 0,
324 ("%s: cache_count not 0", __func__));
325
326 /* Free the allocated global resources. */
327 uma_zdestroy(V_tcp_syncache.zone);
328 free(V_tcp_syncache.hashbase, M_SYNCACHE);
329
330 callout_drain(&V_tcp_syncache.secret.reseed);
331 }
332 #endif
333
334 static int
335 syncache_sysctl_count(SYSCTL_HANDLER_ARGS)
336 {
337 int count;
338
339 count = uma_zone_get_cur(V_tcp_syncache.zone);
340 return (sysctl_handle_int(oidp, &count, 0, req));
341 }
342
343 /*
344 * Inserts a syncache entry into the specified bucket row.
345 * Locks and unlocks the syncache_head autonomously.
346 */
347 static void
348 syncache_insert(struct syncache *sc, struct syncache_head *sch)
349 {
350 struct syncache *sc2;
351
352 SCH_LOCK(sch);
353
354 /*
355 * Make sure that we don't overflow the per-bucket limit.
356 * If the bucket is full, toss the oldest element.
357 */
358 if (sch->sch_length >= V_tcp_syncache.bucket_limit) {
359 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
360 ("sch->sch_length incorrect"));
361 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
362 sch->sch_last_overflow = time_uptime;
363 syncache_drop(sc2, sch);
364 TCPSTAT_INC(tcps_sc_bucketoverflow);
365 }
366
367 /* Put it into the bucket. */
368 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash);
369 sch->sch_length++;
370
371 #ifdef TCP_OFFLOAD
372 if (ADDED_BY_TOE(sc)) {
373 struct toedev *tod = sc->sc_tod;
374
375 tod->tod_syncache_added(tod, sc->sc_todctx);
376 }
377 #endif
378
379 /* Reinitialize the bucket row's timer. */
380 if (sch->sch_length == 1)
381 sch->sch_nextc = ticks + INT_MAX;
382 syncache_timeout(sc, sch, 1);
383
384 SCH_UNLOCK(sch);
385
386 TCPSTAT_INC(tcps_sc_added);
387 }
388
389 /*
390 * Remove and free entry from syncache bucket row.
391 * Expects locked syncache head.
392 */
393 static void
394 syncache_drop(struct syncache *sc, struct syncache_head *sch)
395 {
396
397 SCH_LOCK_ASSERT(sch);
398
399 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
400 sch->sch_length--;
401
402 #ifdef TCP_OFFLOAD
403 if (ADDED_BY_TOE(sc)) {
404 struct toedev *tod = sc->sc_tod;
405
406 tod->tod_syncache_removed(tod, sc->sc_todctx);
407 }
408 #endif
409
410 syncache_free(sc);
411 }
412
413 /*
414 * Engage/reengage time on bucket row.
415 */
416 static void
417 syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout)
418 {
419 sc->sc_rxttime = ticks +
420 TCPTV_RTOBASE * (tcp_syn_backoff[sc->sc_rxmits]);
421 sc->sc_rxmits++;
422 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) {
423 sch->sch_nextc = sc->sc_rxttime;
424 if (docallout)
425 callout_reset(&sch->sch_timer, sch->sch_nextc - ticks,
426 syncache_timer, (void *)sch);
427 }
428 }
429
430 /*
431 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
432 * If we have retransmitted an entry the maximum number of times, expire it.
433 * One separate timer for each bucket row.
434 */
435 static void
436 syncache_timer(void *xsch)
437 {
438 struct syncache_head *sch = (struct syncache_head *)xsch;
439 struct syncache *sc, *nsc;
440 int tick = ticks;
441 char *s;
442
443 CURVNET_SET(sch->sch_sc->vnet);
444
445 /* NB: syncache_head has already been locked by the callout. */
446 SCH_LOCK_ASSERT(sch);
447
448 /*
449 * In the following cycle we may remove some entries and/or
450 * advance some timeouts, so re-initialize the bucket timer.
451 */
452 sch->sch_nextc = tick + INT_MAX;
453
454 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) {
455 /*
456 * We do not check if the listen socket still exists
457 * and accept the case where the listen socket may be
458 * gone by the time we resend the SYN/ACK. We do
459 * not expect this to happens often. If it does,
460 * then the RST will be sent by the time the remote
461 * host does the SYN/ACK->ACK.
462 */
463 if (TSTMP_GT(sc->sc_rxttime, tick)) {
464 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc))
465 sch->sch_nextc = sc->sc_rxttime;
466 continue;
467 }
468 if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) {
469 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
470 log(LOG_DEBUG, "%s; %s: Retransmits exhausted, "
471 "giving up and removing syncache entry\n",
472 s, __func__);
473 free(s, M_TCPLOG);
474 }
475 syncache_drop(sc, sch);
476 TCPSTAT_INC(tcps_sc_stale);
477 continue;
478 }
479 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
480 log(LOG_DEBUG, "%s; %s: Response timeout, "
481 "retransmitting (%u) SYN|ACK\n",
482 s, __func__, sc->sc_rxmits);
483 free(s, M_TCPLOG);
484 }
485
486 (void) syncache_respond(sc, NULL);
487 TCPSTAT_INC(tcps_sc_retransmitted);
488 syncache_timeout(sc, sch, 0);
489 }
490 if (!TAILQ_EMPTY(&(sch)->sch_bucket))
491 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
492 syncache_timer, (void *)(sch));
493 CURVNET_RESTORE();
494 }
495
496 /*
497 * Find an entry in the syncache.
498 * Returns always with locked syncache_head plus a matching entry or NULL.
499 */
500 static struct syncache *
501 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
502 {
503 struct syncache *sc;
504 struct syncache_head *sch;
505
506 #ifdef INET6
507 if (inc->inc_flags & INC_ISIPV6) {
508 sch = &V_tcp_syncache.hashbase[
509 SYNCACHE_HASH6(inc, V_tcp_syncache.hashmask)];
510 *schp = sch;
511
512 SCH_LOCK(sch);
513
514 /* Circle through bucket row to find matching entry. */
515 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
516 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
517 return (sc);
518 }
519 } else
520 #endif
521 {
522 sch = &V_tcp_syncache.hashbase[
523 SYNCACHE_HASH(inc, V_tcp_syncache.hashmask)];
524 *schp = sch;
525
526 SCH_LOCK(sch);
527
528 /* Circle through bucket row to find matching entry. */
529 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
530 #ifdef INET6
531 if (sc->sc_inc.inc_flags & INC_ISIPV6)
532 continue;
533 #endif
534 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
535 return (sc);
536 }
537 }
538 SCH_LOCK_ASSERT(*schp);
539 return (NULL); /* always returns with locked sch */
540 }
541
542 /*
543 * This function is called when we get a RST for a
544 * non-existent connection, so that we can see if the
545 * connection is in the syn cache. If it is, zap it.
546 */
547 void
548 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
549 {
550 struct syncache *sc;
551 struct syncache_head *sch;
552 char *s = NULL;
553
554 sc = syncache_lookup(inc, &sch); /* returns locked sch */
555 SCH_LOCK_ASSERT(sch);
556
557 /*
558 * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags.
559 * See RFC 793 page 65, section SEGMENT ARRIVES.
560 */
561 if (th->th_flags & (TH_ACK|TH_SYN|TH_FIN)) {
562 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
563 log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or "
564 "FIN flag set, segment ignored\n", s, __func__);
565 TCPSTAT_INC(tcps_badrst);
566 goto done;
567 }
568
569 /*
570 * No corresponding connection was found in syncache.
571 * If syncookies are enabled and possibly exclusively
572 * used, or we are under memory pressure, a valid RST
573 * may not find a syncache entry. In that case we're
574 * done and no SYN|ACK retransmissions will happen.
575 * Otherwise the RST was misdirected or spoofed.
576 */
577 if (sc == NULL) {
578 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
579 log(LOG_DEBUG, "%s; %s: Spurious RST without matching "
580 "syncache entry (possibly syncookie only), "
581 "segment ignored\n", s, __func__);
582 TCPSTAT_INC(tcps_badrst);
583 goto done;
584 }
585
586 /*
587 * If the RST bit is set, check the sequence number to see
588 * if this is a valid reset segment.
589 * RFC 793 page 37:
590 * In all states except SYN-SENT, all reset (RST) segments
591 * are validated by checking their SEQ-fields. A reset is
592 * valid if its sequence number is in the window.
593 *
594 * The sequence number in the reset segment is normally an
595 * echo of our outgoing acknowlegement numbers, but some hosts
596 * send a reset with the sequence number at the rightmost edge
597 * of our receive window, and we have to handle this case.
598 */
599 if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
600 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
601 syncache_drop(sc, sch);
602 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
603 log(LOG_DEBUG, "%s; %s: Our SYN|ACK was rejected, "
604 "connection attempt aborted by remote endpoint\n",
605 s, __func__);
606 TCPSTAT_INC(tcps_sc_reset);
607 } else {
608 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
609 log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != "
610 "IRS %u (+WND %u), segment ignored\n",
611 s, __func__, th->th_seq, sc->sc_irs, sc->sc_wnd);
612 TCPSTAT_INC(tcps_badrst);
613 }
614
615 done:
616 if (s != NULL)
617 free(s, M_TCPLOG);
618 SCH_UNLOCK(sch);
619 }
620
621 void
622 syncache_badack(struct in_conninfo *inc)
623 {
624 struct syncache *sc;
625 struct syncache_head *sch;
626
627 sc = syncache_lookup(inc, &sch); /* returns locked sch */
628 SCH_LOCK_ASSERT(sch);
629 if (sc != NULL) {
630 syncache_drop(sc, sch);
631 TCPSTAT_INC(tcps_sc_badack);
632 }
633 SCH_UNLOCK(sch);
634 }
635
636 void
637 syncache_unreach(struct in_conninfo *inc, struct tcphdr *th)
638 {
639 struct syncache *sc;
640 struct syncache_head *sch;
641
642 sc = syncache_lookup(inc, &sch); /* returns locked sch */
643 SCH_LOCK_ASSERT(sch);
644 if (sc == NULL)
645 goto done;
646
647 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
648 if (ntohl(th->th_seq) != sc->sc_iss)
649 goto done;
650
651 /*
652 * If we've rertransmitted 3 times and this is our second error,
653 * we remove the entry. Otherwise, we allow it to continue on.
654 * This prevents us from incorrectly nuking an entry during a
655 * spurious network outage.
656 *
657 * See tcp_notify().
658 */
659 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) {
660 sc->sc_flags |= SCF_UNREACH;
661 goto done;
662 }
663 syncache_drop(sc, sch);
664 TCPSTAT_INC(tcps_sc_unreach);
665 done:
666 SCH_UNLOCK(sch);
667 }
668
669 /*
670 * Build a new TCP socket structure from a syncache entry.
671 *
672 * On success return the newly created socket with its underlying inp locked.
673 */
674 static struct socket *
675 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
676 {
677 struct inpcb *inp = NULL;
678 struct socket *so;
679 struct tcpcb *tp;
680 int error;
681 char *s;
682
683 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
684
685 /*
686 * Ok, create the full blown connection, and set things up
687 * as they would have been set up if we had created the
688 * connection when the SYN arrived. If we can't create
689 * the connection, abort it.
690 */
691 so = sonewconn(lso, 0);
692 if (so == NULL) {
693 /*
694 * Drop the connection; we will either send a RST or
695 * have the peer retransmit its SYN again after its
696 * RTO and try again.
697 */
698 TCPSTAT_INC(tcps_listendrop);
699 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
700 log(LOG_DEBUG, "%s; %s: Socket create failed "
701 "due to limits or memory shortage\n",
702 s, __func__);
703 free(s, M_TCPLOG);
704 }
705 goto abort2;
706 }
707 #ifdef MAC
708 mac_socketpeer_set_from_mbuf(m, so);
709 #endif
710
711 inp = sotoinpcb(so);
712 inp->inp_inc.inc_fibnum = so->so_fibnum;
713 INP_WLOCK(inp);
714 /*
715 * Exclusive pcbinfo lock is not required in syncache socket case even
716 * if two inpcb locks can be acquired simultaneously:
717 * - the inpcb in LISTEN state,
718 * - the newly created inp.
719 *
720 * In this case, an inp cannot be at same time in LISTEN state and
721 * just created by an accept() call.
722 */
723 INP_HASH_WLOCK(&V_tcbinfo);
724
725 /* Insert new socket into PCB hash list. */
726 inp->inp_inc.inc_flags = sc->sc_inc.inc_flags;
727 #ifdef INET6
728 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
729 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
730 } else {
731 inp->inp_vflag &= ~INP_IPV6;
732 inp->inp_vflag |= INP_IPV4;
733 #endif
734 inp->inp_laddr = sc->sc_inc.inc_laddr;
735 #ifdef INET6
736 }
737 #endif
738
739 /*
740 * If there's an mbuf and it has a flowid, then let's initialise the
741 * inp with that particular flowid.
742 */
743 if (m != NULL && M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
744 inp->inp_flowid = m->m_pkthdr.flowid;
745 inp->inp_flowtype = M_HASHTYPE_GET(m);
746 }
747
748 /*
749 * Install in the reservation hash table for now, but don't yet
750 * install a connection group since the full 4-tuple isn't yet
751 * configured.
752 */
753 inp->inp_lport = sc->sc_inc.inc_lport;
754 if ((error = in_pcbinshash_nopcbgroup(inp)) != 0) {
755 /*
756 * Undo the assignments above if we failed to
757 * put the PCB on the hash lists.
758 */
759 #ifdef INET6
760 if (sc->sc_inc.inc_flags & INC_ISIPV6)
761 inp->in6p_laddr = in6addr_any;
762 else
763 #endif
764 inp->inp_laddr.s_addr = INADDR_ANY;
765 inp->inp_lport = 0;
766 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
767 log(LOG_DEBUG, "%s; %s: in_pcbinshash failed "
768 "with error %i\n",
769 s, __func__, error);
770 free(s, M_TCPLOG);
771 }
772 INP_HASH_WUNLOCK(&V_tcbinfo);
773 goto abort;
774 }
775 #ifdef IPSEC
776 /* Copy old policy into new socket's. */
777 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
778 printf("syncache_socket: could not copy policy\n");
779 #endif
780 #ifdef INET6
781 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
782 struct inpcb *oinp = sotoinpcb(lso);
783 struct in6_addr laddr6;
784 struct sockaddr_in6 sin6;
785 /*
786 * Inherit socket options from the listening socket.
787 * Note that in6p_inputopts are not (and should not be)
788 * copied, since it stores previously received options and is
789 * used to detect if each new option is different than the
790 * previous one and hence should be passed to a user.
791 * If we copied in6p_inputopts, a user would not be able to
792 * receive options just after calling the accept system call.
793 */
794 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
795 if (oinp->in6p_outputopts)
796 inp->in6p_outputopts =
797 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
798
799 sin6.sin6_family = AF_INET6;
800 sin6.sin6_len = sizeof(sin6);
801 sin6.sin6_addr = sc->sc_inc.inc6_faddr;
802 sin6.sin6_port = sc->sc_inc.inc_fport;
803 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
804 laddr6 = inp->in6p_laddr;
805 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
806 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
807 if ((error = in6_pcbconnect_mbuf(inp, (struct sockaddr *)&sin6,
808 thread0.td_ucred, m)) != 0) {
809 inp->in6p_laddr = laddr6;
810 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
811 log(LOG_DEBUG, "%s; %s: in6_pcbconnect failed "
812 "with error %i\n",
813 s, __func__, error);
814 free(s, M_TCPLOG);
815 }
816 INP_HASH_WUNLOCK(&V_tcbinfo);
817 goto abort;
818 }
819 /* Override flowlabel from in6_pcbconnect. */
820 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
821 inp->inp_flow |= sc->sc_flowlabel;
822 }
823 #endif /* INET6 */
824 #if defined(INET) && defined(INET6)
825 else
826 #endif
827 #ifdef INET
828 {
829 struct in_addr laddr;
830 struct sockaddr_in sin;
831
832 inp->inp_options = (m) ? ip_srcroute(m) : NULL;
833
834 if (inp->inp_options == NULL) {
835 inp->inp_options = sc->sc_ipopts;
836 sc->sc_ipopts = NULL;
837 }
838
839 sin.sin_family = AF_INET;
840 sin.sin_len = sizeof(sin);
841 sin.sin_addr = sc->sc_inc.inc_faddr;
842 sin.sin_port = sc->sc_inc.inc_fport;
843 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
844 laddr = inp->inp_laddr;
845 if (inp->inp_laddr.s_addr == INADDR_ANY)
846 inp->inp_laddr = sc->sc_inc.inc_laddr;
847 if ((error = in_pcbconnect_mbuf(inp, (struct sockaddr *)&sin,
848 thread0.td_ucred, m)) != 0) {
849 inp->inp_laddr = laddr;
850 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
851 log(LOG_DEBUG, "%s; %s: in_pcbconnect failed "
852 "with error %i\n",
853 s, __func__, error);
854 free(s, M_TCPLOG);
855 }
856 INP_HASH_WUNLOCK(&V_tcbinfo);
857 goto abort;
858 }
859 }
860 #endif /* INET */
861 INP_HASH_WUNLOCK(&V_tcbinfo);
862 tp = intotcpcb(inp);
863 tcp_state_change(tp, TCPS_SYN_RECEIVED);
864 tp->iss = sc->sc_iss;
865 tp->irs = sc->sc_irs;
866 tcp_rcvseqinit(tp);
867 tcp_sendseqinit(tp);
868 tp->snd_wl1 = sc->sc_irs;
869 tp->snd_max = tp->iss + 1;
870 tp->snd_nxt = tp->iss + 1;
871 tp->rcv_up = sc->sc_irs + 1;
872 tp->rcv_wnd = sc->sc_wnd;
873 tp->rcv_adv += tp->rcv_wnd;
874 tp->last_ack_sent = tp->rcv_nxt;
875
876 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
877 if (sc->sc_flags & SCF_NOOPT)
878 tp->t_flags |= TF_NOOPT;
879 else {
880 if (sc->sc_flags & SCF_WINSCALE) {
881 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
882 tp->snd_scale = sc->sc_requested_s_scale;
883 tp->request_r_scale = sc->sc_requested_r_scale;
884 }
885 if (sc->sc_flags & SCF_TIMESTAMP) {
886 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
887 tp->ts_recent = sc->sc_tsreflect;
888 tp->ts_recent_age = tcp_ts_getticks();
889 tp->ts_offset = sc->sc_tsoff;
890 }
891 #ifdef TCP_SIGNATURE
892 if (sc->sc_flags & SCF_SIGNATURE)
893 tp->t_flags |= TF_SIGNATURE;
894 #endif
895 if (sc->sc_flags & SCF_SACK)
896 tp->t_flags |= TF_SACK_PERMIT;
897 }
898
899 if (sc->sc_flags & SCF_ECN)
900 tp->t_flags |= TF_ECN_PERMIT;
901
902 /*
903 * Set up MSS and get cached values from tcp_hostcache.
904 * This might overwrite some of the defaults we just set.
905 */
906 tcp_mss(tp, sc->sc_peer_mss);
907
908 /*
909 * If the SYN,ACK was retransmitted, indicate that CWND to be
910 * limited to one segment in cc_conn_init().
911 * NB: sc_rxmits counts all SYN,ACK transmits, not just retransmits.
912 */
913 if (sc->sc_rxmits > 1)
914 tp->snd_cwnd = 1;
915
916 #ifdef TCP_OFFLOAD
917 /*
918 * Allow a TOE driver to install its hooks. Note that we hold the
919 * pcbinfo lock too and that prevents tcp_usr_accept from accepting a
920 * new connection before the TOE driver has done its thing.
921 */
922 if (ADDED_BY_TOE(sc)) {
923 struct toedev *tod = sc->sc_tod;
924
925 tod->tod_offload_socket(tod, sc->sc_todctx, so);
926 }
927 #endif
928 /*
929 * Copy and activate timers.
930 */
931 tp->t_keepinit = sototcpcb(lso)->t_keepinit;
932 tp->t_keepidle = sototcpcb(lso)->t_keepidle;
933 tp->t_keepintvl = sototcpcb(lso)->t_keepintvl;
934 tp->t_keepcnt = sototcpcb(lso)->t_keepcnt;
935 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
936
937 TCPSTAT_INC(tcps_accepts);
938 return (so);
939
940 abort:
941 INP_WUNLOCK(inp);
942 abort2:
943 if (so != NULL)
944 soabort(so);
945 return (NULL);
946 }
947
948 /*
949 * This function gets called when we receive an ACK for a
950 * socket in the LISTEN state. We look up the connection
951 * in the syncache, and if its there, we pull it out of
952 * the cache and turn it into a full-blown connection in
953 * the SYN-RECEIVED state.
954 *
955 * On syncache_socket() success the newly created socket
956 * has its underlying inp locked.
957 */
958 int
959 syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
960 struct socket **lsop, struct mbuf *m)
961 {
962 struct syncache *sc;
963 struct syncache_head *sch;
964 struct syncache scs;
965 char *s;
966
967 /*
968 * Global TCP locks are held because we manipulate the PCB lists
969 * and create a new socket.
970 */
971 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
972 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK,
973 ("%s: can handle only ACK", __func__));
974
975 sc = syncache_lookup(inc, &sch); /* returns locked sch */
976 SCH_LOCK_ASSERT(sch);
977
978 #ifdef INVARIANTS
979 /*
980 * Test code for syncookies comparing the syncache stored
981 * values with the reconstructed values from the cookie.
982 */
983 if (sc != NULL)
984 syncookie_cmp(inc, sch, sc, th, to, *lsop);
985 #endif
986
987 if (sc == NULL) {
988 /*
989 * There is no syncache entry, so see if this ACK is
990 * a returning syncookie. To do this, first:
991 * A. Check if syncookies are used in case of syncache
992 * overflows
993 * B. See if this socket has had a syncache entry dropped in
994 * the recent past. We don't want to accept a bogus
995 * syncookie if we've never received a SYN or accept it
996 * twice.
997 * C. check that the syncookie is valid. If it is, then
998 * cobble up a fake syncache entry, and return.
999 */
1000 if (!V_tcp_syncookies) {
1001 SCH_UNLOCK(sch);
1002 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1003 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
1004 "segment rejected (syncookies disabled)\n",
1005 s, __func__);
1006 goto failed;
1007 }
1008 if (!V_tcp_syncookiesonly &&
1009 sch->sch_last_overflow < time_uptime - SYNCOOKIE_LIFETIME) {
1010 SCH_UNLOCK(sch);
1011 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1012 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
1013 "segment rejected (no syncache entry)\n",
1014 s, __func__);
1015 goto failed;
1016 }
1017 bzero(&scs, sizeof(scs));
1018 sc = syncookie_lookup(inc, sch, &scs, th, to, *lsop);
1019 SCH_UNLOCK(sch);
1020 if (sc == NULL) {
1021 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1022 log(LOG_DEBUG, "%s; %s: Segment failed "
1023 "SYNCOOKIE authentication, segment rejected "
1024 "(probably spoofed)\n", s, __func__);
1025 goto failed;
1026 }
1027 } else {
1028 /* Pull out the entry to unlock the bucket row. */
1029 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
1030 sch->sch_length--;
1031 #ifdef TCP_OFFLOAD
1032 if (ADDED_BY_TOE(sc)) {
1033 struct toedev *tod = sc->sc_tod;
1034
1035 tod->tod_syncache_removed(tod, sc->sc_todctx);
1036 }
1037 #endif
1038 SCH_UNLOCK(sch);
1039 }
1040
1041 /*
1042 * Segment validation:
1043 * ACK must match our initial sequence number + 1 (the SYN|ACK).
1044 */
1045 if (th->th_ack != sc->sc_iss + 1) {
1046 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1047 log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment "
1048 "rejected\n", s, __func__, th->th_ack, sc->sc_iss);
1049 goto failed;
1050 }
1051
1052 /*
1053 * The SEQ must fall in the window starting at the received
1054 * initial receive sequence number + 1 (the SYN).
1055 */
1056 if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
1057 SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
1058 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1059 log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment "
1060 "rejected\n", s, __func__, th->th_seq, sc->sc_irs);
1061 goto failed;
1062 }
1063
1064 /*
1065 * If timestamps were not negotiated during SYN/ACK they
1066 * must not appear on any segment during this session.
1067 */
1068 if (!(sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) {
1069 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1070 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1071 "segment rejected\n", s, __func__);
1072 goto failed;
1073 }
1074
1075 /*
1076 * If timestamps were negotiated during SYN/ACK they should
1077 * appear on every segment during this session.
1078 * XXXAO: This is only informal as there have been unverified
1079 * reports of non-compliants stacks.
1080 */
1081 if ((sc->sc_flags & SCF_TIMESTAMP) && !(to->to_flags & TOF_TS)) {
1082 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1083 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1084 "no action\n", s, __func__);
1085 free(s, M_TCPLOG);
1086 s = NULL;
1087 }
1088 }
1089
1090 /*
1091 * If timestamps were negotiated the reflected timestamp
1092 * must be equal to what we actually sent in the SYN|ACK.
1093 */
1094 if ((to->to_flags & TOF_TS) && to->to_tsecr != sc->sc_ts) {
1095 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1096 log(LOG_DEBUG, "%s; %s: TSECR %u != TS %u, "
1097 "segment rejected\n",
1098 s, __func__, to->to_tsecr, sc->sc_ts);
1099 goto failed;
1100 }
1101
1102 *lsop = syncache_socket(sc, *lsop, m);
1103
1104 if (*lsop == NULL)
1105 TCPSTAT_INC(tcps_sc_aborted);
1106 else
1107 TCPSTAT_INC(tcps_sc_completed);
1108
1109 /* how do we find the inp for the new socket? */
1110 if (sc != &scs)
1111 syncache_free(sc);
1112 return (1);
1113 failed:
1114 if (sc != NULL && sc != &scs)
1115 syncache_free(sc);
1116 if (s != NULL)
1117 free(s, M_TCPLOG);
1118 *lsop = NULL;
1119 return (0);
1120 }
1121
1122 #ifdef TCP_RFC7413
1123 static void
1124 syncache_tfo_expand(struct syncache *sc, struct socket **lsop, struct mbuf *m,
1125 uint64_t response_cookie)
1126 {
1127 struct inpcb *inp;
1128 struct tcpcb *tp;
1129 unsigned int *pending_counter;
1130
1131 /*
1132 * Global TCP locks are held because we manipulate the PCB lists
1133 * and create a new socket.
1134 */
1135 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1136
1137 pending_counter = intotcpcb(sotoinpcb(*lsop))->t_tfo_pending;
1138 *lsop = syncache_socket(sc, *lsop, m);
1139 if (*lsop == NULL) {
1140 TCPSTAT_INC(tcps_sc_aborted);
1141 atomic_subtract_int(pending_counter, 1);
1142 } else {
1143 inp = sotoinpcb(*lsop);
1144 tp = intotcpcb(inp);
1145 tp->t_flags |= TF_FASTOPEN;
1146 tp->t_tfo_cookie = response_cookie;
1147 tp->snd_max = tp->iss;
1148 tp->snd_nxt = tp->iss;
1149 tp->t_tfo_pending = pending_counter;
1150 TCPSTAT_INC(tcps_sc_completed);
1151 }
1152 }
1153 #endif /* TCP_RFC7413 */
1154
1155 /*
1156 * Given a LISTEN socket and an inbound SYN request, add
1157 * this to the syn cache, and send back a segment:
1158 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
1159 * to the source.
1160 *
1161 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
1162 * Doing so would require that we hold onto the data and deliver it
1163 * to the application. However, if we are the target of a SYN-flood
1164 * DoS attack, an attacker could send data which would eventually
1165 * consume all available buffer space if it were ACKed. By not ACKing
1166 * the data, we avoid this DoS scenario.
1167 *
1168 * The exception to the above is when a SYN with a valid TCP Fast Open (TFO)
1169 * cookie is processed, V_tcp_fastopen_enabled set to true, and the
1170 * TCP_FASTOPEN socket option is set. In this case, a new socket is created
1171 * and returned via lsop, the mbuf is not freed so that tcp_input() can
1172 * queue its data to the socket, and 1 is returned to indicate the
1173 * TFO-socket-creation path was taken.
1174 */
1175 int
1176 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
1177 struct inpcb *inp, struct socket **lsop, struct mbuf *m, void *tod,
1178 void *todctx)
1179 {
1180 struct tcpcb *tp;
1181 struct socket *so;
1182 struct syncache *sc = NULL;
1183 struct syncache_head *sch;
1184 struct mbuf *ipopts = NULL;
1185 u_int ltflags;
1186 int win, sb_hiwat, ip_ttl, ip_tos;
1187 char *s;
1188 int rv = 0;
1189 #ifdef INET6
1190 int autoflowlabel = 0;
1191 #endif
1192 #ifdef MAC
1193 struct label *maclabel;
1194 #endif
1195 struct syncache scs;
1196 struct ucred *cred;
1197 #ifdef TCP_RFC7413
1198 uint64_t tfo_response_cookie;
1199 int tfo_cookie_valid = 0;
1200 int tfo_response_cookie_valid = 0;
1201 #endif
1202
1203 INP_WLOCK_ASSERT(inp); /* listen socket */
1204 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN,
1205 ("%s: unexpected tcp flags", __func__));
1206
1207 /*
1208 * Combine all so/tp operations very early to drop the INP lock as
1209 * soon as possible.
1210 */
1211 so = *lsop;
1212 tp = sototcpcb(so);
1213 cred = crhold(so->so_cred);
1214
1215 #ifdef INET6
1216 if ((inc->inc_flags & INC_ISIPV6) &&
1217 (inp->inp_flags & IN6P_AUTOFLOWLABEL))
1218 autoflowlabel = 1;
1219 #endif
1220 ip_ttl = inp->inp_ip_ttl;
1221 ip_tos = inp->inp_ip_tos;
1222 win = sbspace(&so->so_rcv);
1223 sb_hiwat = so->so_rcv.sb_hiwat;
1224 ltflags = (tp->t_flags & (TF_NOOPT | TF_SIGNATURE));
1225
1226 #ifdef TCP_RFC7413
1227 if (V_tcp_fastopen_enabled && (tp->t_flags & TF_FASTOPEN) &&
1228 (tp->t_tfo_pending != NULL) && (to->to_flags & TOF_FASTOPEN)) {
1229 /*
1230 * Limit the number of pending TFO connections to
1231 * approximately half of the queue limit. This prevents TFO
1232 * SYN floods from starving the service by filling the
1233 * listen queue with bogus TFO connections.
1234 */
1235 if (atomic_fetchadd_int(tp->t_tfo_pending, 1) <=
1236 (so->so_qlimit / 2)) {
1237 int result;
1238
1239 result = tcp_fastopen_check_cookie(inc,
1240 to->to_tfo_cookie, to->to_tfo_len,
1241 &tfo_response_cookie);
1242 tfo_cookie_valid = (result > 0);
1243 tfo_response_cookie_valid = (result >= 0);
1244 } else
1245 atomic_subtract_int(tp->t_tfo_pending, 1);
1246 }
1247 #endif
1248
1249 /* By the time we drop the lock these should no longer be used. */
1250 so = NULL;
1251 tp = NULL;
1252
1253 #ifdef MAC
1254 if (mac_syncache_init(&maclabel) != 0) {
1255 INP_WUNLOCK(inp);
1256 goto done;
1257 } else
1258 mac_syncache_create(maclabel, inp);
1259 #endif
1260 #ifdef TCP_RFC7413
1261 if (!tfo_cookie_valid)
1262 #endif
1263 INP_WUNLOCK(inp);
1264
1265 /*
1266 * Remember the IP options, if any.
1267 */
1268 #ifdef INET6
1269 if (!(inc->inc_flags & INC_ISIPV6))
1270 #endif
1271 #ifdef INET
1272 ipopts = (m) ? ip_srcroute(m) : NULL;
1273 #else
1274 ipopts = NULL;
1275 #endif
1276
1277 /*
1278 * See if we already have an entry for this connection.
1279 * If we do, resend the SYN,ACK, and reset the retransmit timer.
1280 *
1281 * XXX: should the syncache be re-initialized with the contents
1282 * of the new SYN here (which may have different options?)
1283 *
1284 * XXX: We do not check the sequence number to see if this is a
1285 * real retransmit or a new connection attempt. The question is
1286 * how to handle such a case; either ignore it as spoofed, or
1287 * drop the current entry and create a new one?
1288 */
1289 sc = syncache_lookup(inc, &sch); /* returns locked entry */
1290 SCH_LOCK_ASSERT(sch);
1291 if (sc != NULL) {
1292 #ifdef TCP_RFC7413
1293 if (tfo_cookie_valid)
1294 INP_WUNLOCK(inp);
1295 #endif
1296 TCPSTAT_INC(tcps_sc_dupsyn);
1297 if (ipopts) {
1298 /*
1299 * If we were remembering a previous source route,
1300 * forget it and use the new one we've been given.
1301 */
1302 if (sc->sc_ipopts)
1303 (void) m_free(sc->sc_ipopts);
1304 sc->sc_ipopts = ipopts;
1305 }
1306 /*
1307 * Update timestamp if present.
1308 */
1309 if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS))
1310 sc->sc_tsreflect = to->to_tsval;
1311 else
1312 sc->sc_flags &= ~SCF_TIMESTAMP;
1313 #ifdef MAC
1314 /*
1315 * Since we have already unconditionally allocated label
1316 * storage, free it up. The syncache entry will already
1317 * have an initialized label we can use.
1318 */
1319 mac_syncache_destroy(&maclabel);
1320 #endif
1321 /* Retransmit SYN|ACK and reset retransmit count. */
1322 if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) {
1323 log(LOG_DEBUG, "%s; %s: Received duplicate SYN, "
1324 "resetting timer and retransmitting SYN|ACK\n",
1325 s, __func__);
1326 free(s, M_TCPLOG);
1327 }
1328 if (syncache_respond(sc, m) == 0) {
1329 sc->sc_rxmits = 0;
1330 syncache_timeout(sc, sch, 1);
1331 TCPSTAT_INC(tcps_sndacks);
1332 TCPSTAT_INC(tcps_sndtotal);
1333 }
1334 SCH_UNLOCK(sch);
1335 goto done;
1336 }
1337
1338 #ifdef TCP_RFC7413
1339 if (tfo_cookie_valid) {
1340 bzero(&scs, sizeof(scs));
1341 sc = &scs;
1342 goto skip_alloc;
1343 }
1344 #endif
1345
1346 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1347 if (sc == NULL) {
1348 /*
1349 * The zone allocator couldn't provide more entries.
1350 * Treat this as if the cache was full; drop the oldest
1351 * entry and insert the new one.
1352 */
1353 TCPSTAT_INC(tcps_sc_zonefail);
1354 if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL) {
1355 sch->sch_last_overflow = time_uptime;
1356 syncache_drop(sc, sch);
1357 }
1358 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1359 if (sc == NULL) {
1360 if (V_tcp_syncookies) {
1361 bzero(&scs, sizeof(scs));
1362 sc = &scs;
1363 } else {
1364 SCH_UNLOCK(sch);
1365 if (ipopts)
1366 (void) m_free(ipopts);
1367 goto done;
1368 }
1369 }
1370 }
1371
1372 #ifdef TCP_RFC7413
1373 skip_alloc:
1374 if (!tfo_cookie_valid && tfo_response_cookie_valid)
1375 sc->sc_tfo_cookie = &tfo_response_cookie;
1376 #endif
1377
1378 /*
1379 * Fill in the syncache values.
1380 */
1381 #ifdef MAC
1382 sc->sc_label = maclabel;
1383 #endif
1384 sc->sc_cred = cred;
1385 cred = NULL;
1386 sc->sc_ipopts = ipopts;
1387 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
1388 #ifdef INET6
1389 if (!(inc->inc_flags & INC_ISIPV6))
1390 #endif
1391 {
1392 sc->sc_ip_tos = ip_tos;
1393 sc->sc_ip_ttl = ip_ttl;
1394 }
1395 #ifdef TCP_OFFLOAD
1396 sc->sc_tod = tod;
1397 sc->sc_todctx = todctx;
1398 #endif
1399 sc->sc_irs = th->th_seq;
1400 sc->sc_iss = arc4random();
1401 sc->sc_flags = 0;
1402 sc->sc_flowlabel = 0;
1403
1404 /*
1405 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN].
1406 * win was derived from socket earlier in the function.
1407 */
1408 win = imax(win, 0);
1409 win = imin(win, TCP_MAXWIN);
1410 sc->sc_wnd = win;
1411
1412 if (V_tcp_do_rfc1323) {
1413 /*
1414 * A timestamp received in a SYN makes
1415 * it ok to send timestamp requests and replies.
1416 */
1417 if (to->to_flags & TOF_TS) {
1418 sc->sc_tsreflect = to->to_tsval;
1419 sc->sc_ts = tcp_ts_getticks();
1420 sc->sc_flags |= SCF_TIMESTAMP;
1421 }
1422 if (to->to_flags & TOF_SCALE) {
1423 int wscale = 0;
1424
1425 /*
1426 * Pick the smallest possible scaling factor that
1427 * will still allow us to scale up to sb_max, aka
1428 * kern.ipc.maxsockbuf.
1429 *
1430 * We do this because there are broken firewalls that
1431 * will corrupt the window scale option, leading to
1432 * the other endpoint believing that our advertised
1433 * window is unscaled. At scale factors larger than
1434 * 5 the unscaled window will drop below 1500 bytes,
1435 * leading to serious problems when traversing these
1436 * broken firewalls.
1437 *
1438 * With the default maxsockbuf of 256K, a scale factor
1439 * of 3 will be chosen by this algorithm. Those who
1440 * choose a larger maxsockbuf should watch out
1441 * for the compatiblity problems mentioned above.
1442 *
1443 * RFC1323: The Window field in a SYN (i.e., a <SYN>
1444 * or <SYN,ACK>) segment itself is never scaled.
1445 */
1446 while (wscale < TCP_MAX_WINSHIFT &&
1447 (TCP_MAXWIN << wscale) < sb_max)
1448 wscale++;
1449 sc->sc_requested_r_scale = wscale;
1450 sc->sc_requested_s_scale = to->to_wscale;
1451 sc->sc_flags |= SCF_WINSCALE;
1452 }
1453 }
1454 #ifdef TCP_SIGNATURE
1455 /*
1456 * If listening socket requested TCP digests, and received SYN
1457 * contains the option, flag this in the syncache so that
1458 * syncache_respond() will do the right thing with the SYN+ACK.
1459 * XXX: Currently we always record the option by default and will
1460 * attempt to use it in syncache_respond().
1461 */
1462 if (to->to_flags & TOF_SIGNATURE || ltflags & TF_SIGNATURE)
1463 sc->sc_flags |= SCF_SIGNATURE;
1464 #endif
1465 if (to->to_flags & TOF_SACKPERM)
1466 sc->sc_flags |= SCF_SACK;
1467 if (to->to_flags & TOF_MSS)
1468 sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */
1469 if (ltflags & TF_NOOPT)
1470 sc->sc_flags |= SCF_NOOPT;
1471 if ((th->th_flags & (TH_ECE|TH_CWR)) && V_tcp_do_ecn)
1472 sc->sc_flags |= SCF_ECN;
1473
1474 if (V_tcp_syncookies)
1475 sc->sc_iss = syncookie_generate(sch, sc);
1476 #ifdef INET6
1477 if (autoflowlabel) {
1478 if (V_tcp_syncookies)
1479 sc->sc_flowlabel = sc->sc_iss;
1480 else
1481 sc->sc_flowlabel = ip6_randomflowlabel();
1482 sc->sc_flowlabel = htonl(sc->sc_flowlabel) & IPV6_FLOWLABEL_MASK;
1483 }
1484 #endif
1485 SCH_UNLOCK(sch);
1486
1487 #ifdef TCP_RFC7413
1488 if (tfo_cookie_valid) {
1489 syncache_tfo_expand(sc, lsop, m, tfo_response_cookie);
1490 /* INP_WUNLOCK(inp) will be performed by the called */
1491 rv = 1;
1492 goto tfo_done;
1493 }
1494 #endif
1495
1496 /*
1497 * Do a standard 3-way handshake.
1498 */
1499 if (syncache_respond(sc, m) == 0) {
1500 if (V_tcp_syncookies && V_tcp_syncookiesonly && sc != &scs)
1501 syncache_free(sc);
1502 else if (sc != &scs)
1503 syncache_insert(sc, sch); /* locks and unlocks sch */
1504 TCPSTAT_INC(tcps_sndacks);
1505 TCPSTAT_INC(tcps_sndtotal);
1506 } else {
1507 if (sc != &scs)
1508 syncache_free(sc);
1509 TCPSTAT_INC(tcps_sc_dropped);
1510 }
1511
1512 done:
1513 if (m) {
1514 *lsop = NULL;
1515 m_freem(m);
1516 }
1517 #ifdef TCP_RFC7413
1518 tfo_done:
1519 #endif
1520 if (cred != NULL)
1521 crfree(cred);
1522 #ifdef MAC
1523 if (sc == &scs)
1524 mac_syncache_destroy(&maclabel);
1525 #endif
1526 return (rv);
1527 }
1528
1529 /*
1530 * Send SYN|ACK to the peer. Either in response to the peer's SYN,
1531 * i.e. m0 != NULL, or upon 3WHS ACK timeout, i.e. m0 == NULL.
1532 */
1533 static int
1534 syncache_respond(struct syncache *sc, const struct mbuf *m0)
1535 {
1536 struct ip *ip = NULL;
1537 struct mbuf *m;
1538 struct tcphdr *th = NULL;
1539 int optlen, error = 0; /* Make compiler happy */
1540 u_int16_t hlen, tlen, mssopt;
1541 struct tcpopt to;
1542 #ifdef INET6
1543 struct ip6_hdr *ip6 = NULL;
1544 #endif
1545
1546 hlen =
1547 #ifdef INET6
1548 (sc->sc_inc.inc_flags & INC_ISIPV6) ? sizeof(struct ip6_hdr) :
1549 #endif
1550 sizeof(struct ip);
1551 tlen = hlen + sizeof(struct tcphdr);
1552
1553 /* Determine MSS we advertize to other end of connection. */
1554 mssopt = max(tcp_mssopt(&sc->sc_inc), V_tcp_minmss);
1555
1556 /* XXX: Assume that the entire packet will fit in a header mbuf. */
1557 KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN,
1558 ("syncache: mbuf too small"));
1559
1560 /* Create the IP+TCP header from scratch. */
1561 m = m_gethdr(M_NOWAIT, MT_DATA);
1562 if (m == NULL)
1563 return (ENOBUFS);
1564 #ifdef MAC
1565 mac_syncache_create_mbuf(sc->sc_label, m);
1566 #endif
1567 m->m_data += max_linkhdr;
1568 m->m_len = tlen;
1569 m->m_pkthdr.len = tlen;
1570 m->m_pkthdr.rcvif = NULL;
1571
1572 #ifdef INET6
1573 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1574 ip6 = mtod(m, struct ip6_hdr *);
1575 ip6->ip6_vfc = IPV6_VERSION;
1576 ip6->ip6_nxt = IPPROTO_TCP;
1577 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1578 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1579 ip6->ip6_plen = htons(tlen - hlen);
1580 /* ip6_hlim is set after checksum */
1581 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
1582 ip6->ip6_flow |= sc->sc_flowlabel;
1583
1584 th = (struct tcphdr *)(ip6 + 1);
1585 }
1586 #endif
1587 #if defined(INET6) && defined(INET)
1588 else
1589 #endif
1590 #ifdef INET
1591 {
1592 ip = mtod(m, struct ip *);
1593 ip->ip_v = IPVERSION;
1594 ip->ip_hl = sizeof(struct ip) >> 2;
1595 ip->ip_len = htons(tlen);
1596 ip->ip_id = 0;
1597 ip->ip_off = 0;
1598 ip->ip_sum = 0;
1599 ip->ip_p = IPPROTO_TCP;
1600 ip->ip_src = sc->sc_inc.inc_laddr;
1601 ip->ip_dst = sc->sc_inc.inc_faddr;
1602 ip->ip_ttl = sc->sc_ip_ttl;
1603 ip->ip_tos = sc->sc_ip_tos;
1604
1605 /*
1606 * See if we should do MTU discovery. Route lookups are
1607 * expensive, so we will only unset the DF bit if:
1608 *
1609 * 1) path_mtu_discovery is disabled
1610 * 2) the SCF_UNREACH flag has been set
1611 */
1612 if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1613 ip->ip_off |= htons(IP_DF);
1614
1615 th = (struct tcphdr *)(ip + 1);
1616 }
1617 #endif /* INET */
1618 th->th_sport = sc->sc_inc.inc_lport;
1619 th->th_dport = sc->sc_inc.inc_fport;
1620
1621 th->th_seq = htonl(sc->sc_iss);
1622 th->th_ack = htonl(sc->sc_irs + 1);
1623 th->th_off = sizeof(struct tcphdr) >> 2;
1624 th->th_x2 = 0;
1625 th->th_flags = TH_SYN|TH_ACK;
1626 th->th_win = htons(sc->sc_wnd);
1627 th->th_urp = 0;
1628
1629 if (sc->sc_flags & SCF_ECN) {
1630 th->th_flags |= TH_ECE;
1631 TCPSTAT_INC(tcps_ecn_shs);
1632 }
1633
1634 /* Tack on the TCP options. */
1635 if ((sc->sc_flags & SCF_NOOPT) == 0) {
1636 to.to_flags = 0;
1637
1638 to.to_mss = mssopt;
1639 to.to_flags = TOF_MSS;
1640 if (sc->sc_flags & SCF_WINSCALE) {
1641 to.to_wscale = sc->sc_requested_r_scale;
1642 to.to_flags |= TOF_SCALE;
1643 }
1644 if (sc->sc_flags & SCF_TIMESTAMP) {
1645 /* Virgin timestamp or TCP cookie enhanced one. */
1646 to.to_tsval = sc->sc_ts;
1647 to.to_tsecr = sc->sc_tsreflect;
1648 to.to_flags |= TOF_TS;
1649 }
1650 if (sc->sc_flags & SCF_SACK)
1651 to.to_flags |= TOF_SACKPERM;
1652 #ifdef TCP_SIGNATURE
1653 if (sc->sc_flags & SCF_SIGNATURE)
1654 to.to_flags |= TOF_SIGNATURE;
1655 #endif
1656
1657 #ifdef TCP_RFC7413
1658 if (sc->sc_tfo_cookie) {
1659 to.to_flags |= TOF_FASTOPEN;
1660 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
1661 to.to_tfo_cookie = sc->sc_tfo_cookie;
1662 /* don't send cookie again when retransmitting response */
1663 sc->sc_tfo_cookie = NULL;
1664 }
1665 #endif
1666 optlen = tcp_addoptions(&to, (u_char *)(th + 1));
1667
1668 /* Adjust headers by option size. */
1669 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1670 m->m_len += optlen;
1671 m->m_pkthdr.len += optlen;
1672
1673 #ifdef TCP_SIGNATURE
1674 if (sc->sc_flags & SCF_SIGNATURE)
1675 tcp_signature_compute(m, 0, 0, optlen,
1676 to.to_signature, IPSEC_DIR_OUTBOUND);
1677 #endif
1678 #ifdef INET6
1679 if (sc->sc_inc.inc_flags & INC_ISIPV6)
1680 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen);
1681 else
1682 #endif
1683 ip->ip_len = htons(ntohs(ip->ip_len) + optlen);
1684 } else
1685 optlen = 0;
1686
1687 M_SETFIB(m, sc->sc_inc.inc_fibnum);
1688 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1689 /*
1690 * If we have peer's SYN and it has a flowid, then let's assign it to
1691 * our SYN|ACK. ip6_output() and ip_output() will not assign flowid
1692 * to SYN|ACK due to lack of inp here.
1693 */
1694 if (m0 != NULL && M_HASHTYPE_GET(m0) != M_HASHTYPE_NONE) {
1695 m->m_pkthdr.flowid = m0->m_pkthdr.flowid;
1696 M_HASHTYPE_SET(m, M_HASHTYPE_GET(m0));
1697 }
1698 #ifdef INET6
1699 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1700 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1701 th->th_sum = in6_cksum_pseudo(ip6, tlen + optlen - hlen,
1702 IPPROTO_TCP, 0);
1703 ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
1704 #ifdef TCP_OFFLOAD
1705 if (ADDED_BY_TOE(sc)) {
1706 struct toedev *tod = sc->sc_tod;
1707
1708 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1709
1710 return (error);
1711 }
1712 #endif
1713 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1714 }
1715 #endif
1716 #if defined(INET6) && defined(INET)
1717 else
1718 #endif
1719 #ifdef INET
1720 {
1721 m->m_pkthdr.csum_flags = CSUM_TCP;
1722 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1723 htons(tlen + optlen - hlen + IPPROTO_TCP));
1724 #ifdef TCP_OFFLOAD
1725 if (ADDED_BY_TOE(sc)) {
1726 struct toedev *tod = sc->sc_tod;
1727
1728 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1729
1730 return (error);
1731 }
1732 #endif
1733 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL);
1734 }
1735 #endif
1736 return (error);
1737 }
1738
1739 /*
1740 * The purpose of syncookies is to handle spoofed SYN flooding DoS attacks
1741 * that exceed the capacity of the syncache by avoiding the storage of any
1742 * of the SYNs we receive. Syncookies defend against blind SYN flooding
1743 * attacks where the attacker does not have access to our responses.
1744 *
1745 * Syncookies encode and include all necessary information about the
1746 * connection setup within the SYN|ACK that we send back. That way we
1747 * can avoid keeping any local state until the ACK to our SYN|ACK returns
1748 * (if ever). Normally the syncache and syncookies are running in parallel
1749 * with the latter taking over when the former is exhausted. When matching
1750 * syncache entry is found the syncookie is ignored.
1751 *
1752 * The only reliable information persisting the 3WHS is our inital sequence
1753 * number ISS of 32 bits. Syncookies embed a cryptographically sufficient
1754 * strong hash (MAC) value and a few bits of TCP SYN options in the ISS
1755 * of our SYN|ACK. The MAC can be recomputed when the ACK to our SYN|ACK
1756 * returns and signifies a legitimate connection if it matches the ACK.
1757 *
1758 * The available space of 32 bits to store the hash and to encode the SYN
1759 * option information is very tight and we should have at least 24 bits for
1760 * the MAC to keep the number of guesses by blind spoofing reasonably high.
1761 *
1762 * SYN option information we have to encode to fully restore a connection:
1763 * MSS: is imporant to chose an optimal segment size to avoid IP level
1764 * fragmentation along the path. The common MSS values can be encoded
1765 * in a 3-bit table. Uncommon values are captured by the next lower value
1766 * in the table leading to a slight increase in packetization overhead.
1767 * WSCALE: is necessary to allow large windows to be used for high delay-
1768 * bandwidth product links. Not scaling the window when it was initially
1769 * negotiated is bad for performance as lack of scaling further decreases
1770 * the apparent available send window. We only need to encode the WSCALE
1771 * we received from the remote end. Our end can be recalculated at any
1772 * time. The common WSCALE values can be encoded in a 3-bit table.
1773 * Uncommon values are captured by the next lower value in the table
1774 * making us under-estimate the available window size halving our
1775 * theoretically possible maximum throughput for that connection.
1776 * SACK: Greatly assists in packet loss recovery and requires 1 bit.
1777 * TIMESTAMP and SIGNATURE is not encoded because they are permanent options
1778 * that are included in all segments on a connection. We enable them when
1779 * the ACK has them.
1780 *
1781 * Security of syncookies and attack vectors:
1782 *
1783 * The MAC is computed over (faddr||laddr||fport||lport||irs||flags||secmod)
1784 * together with the gloabl secret to make it unique per connection attempt.
1785 * Thus any change of any of those parameters results in a different MAC output
1786 * in an unpredictable way unless a collision is encountered. 24 bits of the
1787 * MAC are embedded into the ISS.
1788 *
1789 * To prevent replay attacks two rotating global secrets are updated with a
1790 * new random value every 15 seconds. The life-time of a syncookie is thus
1791 * 15-30 seconds.
1792 *
1793 * Vector 1: Attacking the secret. This requires finding a weakness in the
1794 * MAC itself or the way it is used here. The attacker can do a chosen plain
1795 * text attack by varying and testing the all parameters under his control.
1796 * The strength depends on the size and randomness of the secret, and the
1797 * cryptographic security of the MAC function. Due to the constant updating
1798 * of the secret the attacker has at most 29.999 seconds to find the secret
1799 * and launch spoofed connections. After that he has to start all over again.
1800 *
1801 * Vector 2: Collision attack on the MAC of a single ACK. With a 24 bit MAC
1802 * size an average of 4,823 attempts are required for a 50% chance of success
1803 * to spoof a single syncookie (birthday collision paradox). However the
1804 * attacker is blind and doesn't know if one of his attempts succeeded unless
1805 * he has a side channel to interfere success from. A single connection setup
1806 * success average of 90% requires 8,790 packets, 99.99% requires 17,578 packets.
1807 * This many attempts are required for each one blind spoofed connection. For
1808 * every additional spoofed connection he has to launch another N attempts.
1809 * Thus for a sustained rate 100 spoofed connections per second approximately
1810 * 1,800,000 packets per second would have to be sent.
1811 *
1812 * NB: The MAC function should be fast so that it doesn't become a CPU
1813 * exhaustion attack vector itself.
1814 *
1815 * References:
1816 * RFC4987 TCP SYN Flooding Attacks and Common Mitigations
1817 * SYN cookies were first proposed by cryptographer Dan J. Bernstein in 1996
1818 * http://cr.yp.to/syncookies.html (overview)
1819 * http://cr.yp.to/syncookies/archive (details)
1820 *
1821 *
1822 * Schematic construction of a syncookie enabled Initial Sequence Number:
1823 * 0 1 2 3
1824 * 12345678901234567890123456789012
1825 * |xxxxxxxxxxxxxxxxxxxxxxxxWWWMMMSP|
1826 *
1827 * x 24 MAC (truncated)
1828 * W 3 Send Window Scale index
1829 * M 3 MSS index
1830 * S 1 SACK permitted
1831 * P 1 Odd/even secret
1832 */
1833
1834 /*
1835 * Distribution and probability of certain MSS values. Those in between are
1836 * rounded down to the next lower one.
1837 * [An Analysis of TCP Maximum Segment Sizes, S. Alcock and R. Nelson, 2011]
1838 * .2% .3% 5% 7% 7% 20% 15% 45%
1839 */
1840 static int tcp_sc_msstab[] = { 216, 536, 1200, 1360, 1400, 1440, 1452, 1460 };
1841
1842 /*
1843 * Distribution and probability of certain WSCALE values. We have to map the
1844 * (send) window scale (shift) option with a range of 0-14 from 4 bits into 3
1845 * bits based on prevalence of certain values. Where we don't have an exact
1846 * match for are rounded down to the next lower one letting us under-estimate
1847 * the true available window. At the moment this would happen only for the
1848 * very uncommon values 3, 5 and those above 8 (more than 16MB socket buffer
1849 * and window size). The absence of the WSCALE option (no scaling in either
1850 * direction) is encoded with index zero.
1851 * [WSCALE values histograms, Allman, 2012]
1852 * X 10 10 35 5 6 14 10% by host
1853 * X 11 4 5 5 18 49 3% by connections
1854 */
1855 static int tcp_sc_wstab[] = { 0, 0, 1, 2, 4, 6, 7, 8 };
1856
1857 /*
1858 * Compute the MAC for the SYN cookie. SIPHASH-2-4 is chosen for its speed
1859 * and good cryptographic properties.
1860 */
1861 static uint32_t
1862 syncookie_mac(struct in_conninfo *inc, tcp_seq irs, uint8_t flags,
1863 uint8_t *secbits, uintptr_t secmod)
1864 {
1865 SIPHASH_CTX ctx;
1866 uint32_t siphash[2];
1867
1868 SipHash24_Init(&ctx);
1869 SipHash_SetKey(&ctx, secbits);
1870 switch (inc->inc_flags & INC_ISIPV6) {
1871 #ifdef INET
1872 case 0:
1873 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(inc->inc_faddr));
1874 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(inc->inc_laddr));
1875 break;
1876 #endif
1877 #ifdef INET6
1878 case INC_ISIPV6:
1879 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(inc->inc6_faddr));
1880 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(inc->inc6_laddr));
1881 break;
1882 #endif
1883 }
1884 SipHash_Update(&ctx, &inc->inc_fport, sizeof(inc->inc_fport));
1885 SipHash_Update(&ctx, &inc->inc_lport, sizeof(inc->inc_lport));
1886 SipHash_Update(&ctx, &irs, sizeof(irs));
1887 SipHash_Update(&ctx, &flags, sizeof(flags));
1888 SipHash_Update(&ctx, &secmod, sizeof(secmod));
1889 SipHash_Final((u_int8_t *)&siphash, &ctx);
1890
1891 return (siphash[0] ^ siphash[1]);
1892 }
1893
1894 static tcp_seq
1895 syncookie_generate(struct syncache_head *sch, struct syncache *sc)
1896 {
1897 u_int i, secbit, wscale;
1898 uint32_t iss, hash;
1899 uint8_t *secbits;
1900 union syncookie cookie;
1901
1902 SCH_LOCK_ASSERT(sch);
1903
1904 cookie.cookie = 0;
1905
1906 /* Map our computed MSS into the 3-bit index. */
1907 for (i = sizeof(tcp_sc_msstab) / sizeof(*tcp_sc_msstab) - 1;
1908 tcp_sc_msstab[i] > sc->sc_peer_mss && i > 0;
1909 i--)
1910 ;
1911 cookie.flags.mss_idx = i;
1912
1913 /*
1914 * Map the send window scale into the 3-bit index but only if
1915 * the wscale option was received.
1916 */
1917 if (sc->sc_flags & SCF_WINSCALE) {
1918 wscale = sc->sc_requested_s_scale;
1919 for (i = sizeof(tcp_sc_wstab) / sizeof(*tcp_sc_wstab) - 1;
1920 tcp_sc_wstab[i] > wscale && i > 0;
1921 i--)
1922 ;
1923 cookie.flags.wscale_idx = i;
1924 }
1925
1926 /* Can we do SACK? */
1927 if (sc->sc_flags & SCF_SACK)
1928 cookie.flags.sack_ok = 1;
1929
1930 /* Which of the two secrets to use. */
1931 secbit = sch->sch_sc->secret.oddeven & 0x1;
1932 cookie.flags.odd_even = secbit;
1933
1934 secbits = sch->sch_sc->secret.key[secbit];
1935 hash = syncookie_mac(&sc->sc_inc, sc->sc_irs, cookie.cookie, secbits,
1936 (uintptr_t)sch);
1937
1938 /*
1939 * Put the flags into the hash and XOR them to get better ISS number
1940 * variance. This doesn't enhance the cryptographic strength and is
1941 * done to prevent the 8 cookie bits from showing up directly on the
1942 * wire.
1943 */
1944 iss = hash & ~0xff;
1945 iss |= cookie.cookie ^ (hash >> 24);
1946
1947 /* Randomize the timestamp. */
1948 if (sc->sc_flags & SCF_TIMESTAMP) {
1949 sc->sc_ts = arc4random();
1950 sc->sc_tsoff = sc->sc_ts - tcp_ts_getticks();
1951 }
1952
1953 TCPSTAT_INC(tcps_sc_sendcookie);
1954 return (iss);
1955 }
1956
1957 static struct syncache *
1958 syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
1959 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
1960 struct socket *lso)
1961 {
1962 uint32_t hash;
1963 uint8_t *secbits;
1964 tcp_seq ack, seq;
1965 int wnd, wscale = 0;
1966 union syncookie cookie;
1967
1968 SCH_LOCK_ASSERT(sch);
1969
1970 /*
1971 * Pull information out of SYN-ACK/ACK and revert sequence number
1972 * advances.
1973 */
1974 ack = th->th_ack - 1;
1975 seq = th->th_seq - 1;
1976
1977 /*
1978 * Unpack the flags containing enough information to restore the
1979 * connection.
1980 */
1981 cookie.cookie = (ack & 0xff) ^ (ack >> 24);
1982
1983 /* Which of the two secrets to use. */
1984 secbits = sch->sch_sc->secret.key[cookie.flags.odd_even];
1985
1986 hash = syncookie_mac(inc, seq, cookie.cookie, secbits, (uintptr_t)sch);
1987
1988 /* The recomputed hash matches the ACK if this was a genuine cookie. */
1989 if ((ack & ~0xff) != (hash & ~0xff))
1990 return (NULL);
1991
1992 /* Fill in the syncache values. */
1993 sc->sc_flags = 0;
1994 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
1995 sc->sc_ipopts = NULL;
1996
1997 sc->sc_irs = seq;
1998 sc->sc_iss = ack;
1999
2000 switch (inc->inc_flags & INC_ISIPV6) {
2001 #ifdef INET
2002 case 0:
2003 sc->sc_ip_ttl = sotoinpcb(lso)->inp_ip_ttl;
2004 sc->sc_ip_tos = sotoinpcb(lso)->inp_ip_tos;
2005 break;
2006 #endif
2007 #ifdef INET6
2008 case INC_ISIPV6:
2009 if (sotoinpcb(lso)->inp_flags & IN6P_AUTOFLOWLABEL)
2010 sc->sc_flowlabel = sc->sc_iss & IPV6_FLOWLABEL_MASK;
2011 break;
2012 #endif
2013 }
2014
2015 sc->sc_peer_mss = tcp_sc_msstab[cookie.flags.mss_idx];
2016
2017 /* We can simply recompute receive window scale we sent earlier. */
2018 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < sb_max)
2019 wscale++;
2020
2021 /* Only use wscale if it was enabled in the orignal SYN. */
2022 if (cookie.flags.wscale_idx > 0) {
2023 sc->sc_requested_r_scale = wscale;
2024 sc->sc_requested_s_scale = tcp_sc_wstab[cookie.flags.wscale_idx];
2025 sc->sc_flags |= SCF_WINSCALE;
2026 }
2027
2028 wnd = sbspace(&lso->so_rcv);
2029 wnd = imax(wnd, 0);
2030 wnd = imin(wnd, TCP_MAXWIN);
2031 sc->sc_wnd = wnd;
2032
2033 if (cookie.flags.sack_ok)
2034 sc->sc_flags |= SCF_SACK;
2035
2036 if (to->to_flags & TOF_TS) {
2037 sc->sc_flags |= SCF_TIMESTAMP;
2038 sc->sc_tsreflect = to->to_tsval;
2039 sc->sc_ts = to->to_tsecr;
2040 sc->sc_tsoff = to->to_tsecr - tcp_ts_getticks();
2041 }
2042
2043 if (to->to_flags & TOF_SIGNATURE)
2044 sc->sc_flags |= SCF_SIGNATURE;
2045
2046 sc->sc_rxmits = 0;
2047
2048 TCPSTAT_INC(tcps_sc_recvcookie);
2049 return (sc);
2050 }
2051
2052 #ifdef INVARIANTS
2053 static int
2054 syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
2055 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
2056 struct socket *lso)
2057 {
2058 struct syncache scs, *scx;
2059 char *s;
2060
2061 bzero(&scs, sizeof(scs));
2062 scx = syncookie_lookup(inc, sch, &scs, th, to, lso);
2063
2064 if ((s = tcp_log_addrs(inc, th, NULL, NULL)) == NULL)
2065 return (0);
2066
2067 if (scx != NULL) {
2068 if (sc->sc_peer_mss != scx->sc_peer_mss)
2069 log(LOG_DEBUG, "%s; %s: mss different %i vs %i\n",
2070 s, __func__, sc->sc_peer_mss, scx->sc_peer_mss);
2071
2072 if (sc->sc_requested_r_scale != scx->sc_requested_r_scale)
2073 log(LOG_DEBUG, "%s; %s: rwscale different %i vs %i\n",
2074 s, __func__, sc->sc_requested_r_scale,
2075 scx->sc_requested_r_scale);
2076
2077 if (sc->sc_requested_s_scale != scx->sc_requested_s_scale)
2078 log(LOG_DEBUG, "%s; %s: swscale different %i vs %i\n",
2079 s, __func__, sc->sc_requested_s_scale,
2080 scx->sc_requested_s_scale);
2081
2082 if ((sc->sc_flags & SCF_SACK) != (scx->sc_flags & SCF_SACK))
2083 log(LOG_DEBUG, "%s; %s: SACK different\n", s, __func__);
2084 }
2085
2086 if (s != NULL)
2087 free(s, M_TCPLOG);
2088 return (0);
2089 }
2090 #endif /* INVARIANTS */
2091
2092 static void
2093 syncookie_reseed(void *arg)
2094 {
2095 struct tcp_syncache *sc = arg;
2096 uint8_t *secbits;
2097 int secbit;
2098
2099 /*
2100 * Reseeding the secret doesn't have to be protected by a lock.
2101 * It only must be ensured that the new random values are visible
2102 * to all CPUs in a SMP environment. The atomic with release
2103 * semantics ensures that.
2104 */
2105 secbit = (sc->secret.oddeven & 0x1) ? 0 : 1;
2106 secbits = sc->secret.key[secbit];
2107 arc4rand(secbits, SYNCOOKIE_SECRET_SIZE, 0);
2108 atomic_add_rel_int(&sc->secret.oddeven, 1);
2109
2110 /* Reschedule ourself. */
2111 callout_schedule(&sc->secret.reseed, SYNCOOKIE_LIFETIME * hz);
2112 }
2113
2114 /*
2115 * Returns the current number of syncache entries. This number
2116 * will probably change before you get around to calling
2117 * syncache_pcblist.
2118 */
2119 int
2120 syncache_pcbcount(void)
2121 {
2122 struct syncache_head *sch;
2123 int count, i;
2124
2125 for (count = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
2126 /* No need to lock for a read. */
2127 sch = &V_tcp_syncache.hashbase[i];
2128 count += sch->sch_length;
2129 }
2130 return count;
2131 }
2132
2133 /*
2134 * Exports the syncache entries to userland so that netstat can display
2135 * them alongside the other sockets. This function is intended to be
2136 * called only from tcp_pcblist.
2137 *
2138 * Due to concurrency on an active system, the number of pcbs exported
2139 * may have no relation to max_pcbs. max_pcbs merely indicates the
2140 * amount of space the caller allocated for this function to use.
2141 */
2142 int
2143 syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported)
2144 {
2145 struct xtcpcb xt;
2146 struct syncache *sc;
2147 struct syncache_head *sch;
2148 int count, error, i;
2149
2150 for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
2151 sch = &V_tcp_syncache.hashbase[i];
2152 SCH_LOCK(sch);
2153 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
2154 if (count >= max_pcbs) {
2155 SCH_UNLOCK(sch);
2156 goto exit;
2157 }
2158 if (cr_cansee(req->td->td_ucred, sc->sc_cred) != 0)
2159 continue;
2160 bzero(&xt, sizeof(xt));
2161 xt.xt_len = sizeof(xt);
2162 if (sc->sc_inc.inc_flags & INC_ISIPV6)
2163 xt.xt_inp.inp_vflag = INP_IPV6;
2164 else
2165 xt.xt_inp.inp_vflag = INP_IPV4;
2166 bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc, sizeof (struct in_conninfo));
2167 xt.xt_tp.t_inpcb = &xt.xt_inp;
2168 xt.xt_tp.t_state = TCPS_SYN_RECEIVED;
2169 xt.xt_socket.xso_protocol = IPPROTO_TCP;
2170 xt.xt_socket.xso_len = sizeof (struct xsocket);
2171 xt.xt_socket.so_type = SOCK_STREAM;
2172 xt.xt_socket.so_state = SS_ISCONNECTING;
2173 error = SYSCTL_OUT(req, &xt, sizeof xt);
2174 if (error) {
2175 SCH_UNLOCK(sch);
2176 goto exit;
2177 }
2178 count++;
2179 }
2180 SCH_UNLOCK(sch);
2181 }
2182 exit:
2183 *pcbs_exported = count;
2184 return error;
2185 }
Cache object: 4ef1dd6cbc4dabfc3cdcfc9944fc5814
|