1 /*-
2 * Copyright (c) 2001 McAfee, Inc.
3 * Copyright (c) 2006,2013 Andre Oppermann, Internet Business Solutions AG
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Jonathan Lemon
7 * and McAfee Research, the Security Research Division of McAfee, Inc. under
8 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program. [2001 McAfee, Inc.]
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/11.2/sys/netinet/tcp_syncache.c 334740 2018-06-06 22:02:44Z tuexen $");
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 #include "opt_pcbgroup.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/hash.h>
44 #include <sys/refcount.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47 #include <sys/limits.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/malloc.h>
51 #include <sys/mbuf.h>
52 #include <sys/proc.h> /* for proc0 declaration */
53 #include <sys/random.h>
54 #include <sys/socket.h>
55 #include <sys/socketvar.h>
56 #include <sys/syslog.h>
57 #include <sys/ucred.h>
58
59 #include <sys/md5.h>
60 #include <crypto/siphash/siphash.h>
61
62 #include <vm/uma.h>
63
64 #include <net/if.h>
65 #include <net/if_var.h>
66 #include <net/route.h>
67 #include <net/vnet.h>
68
69 #include <netinet/in.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/ip.h>
72 #include <netinet/in_var.h>
73 #include <netinet/in_pcb.h>
74 #include <netinet/ip_var.h>
75 #include <netinet/ip_options.h>
76 #ifdef INET6
77 #include <netinet/ip6.h>
78 #include <netinet/icmp6.h>
79 #include <netinet6/nd6.h>
80 #include <netinet6/ip6_var.h>
81 #include <netinet6/in6_pcb.h>
82 #endif
83 #include <netinet/tcp.h>
84 #ifdef TCP_RFC7413
85 #include <netinet/tcp_fastopen.h>
86 #endif
87 #include <netinet/tcp_fsm.h>
88 #include <netinet/tcp_seq.h>
89 #include <netinet/tcp_timer.h>
90 #include <netinet/tcp_var.h>
91 #include <netinet/tcp_syncache.h>
92 #ifdef INET6
93 #include <netinet6/tcp6_var.h>
94 #endif
95 #ifdef TCP_OFFLOAD
96 #include <netinet/toecore.h>
97 #endif
98
99 #include <netipsec/ipsec_support.h>
100
101 #include <machine/in_cksum.h>
102
103 #include <security/mac/mac_framework.h>
104
105 static VNET_DEFINE(int, tcp_syncookies) = 1;
106 #define V_tcp_syncookies VNET(tcp_syncookies)
107 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_VNET | CTLFLAG_RW,
108 &VNET_NAME(tcp_syncookies), 0,
109 "Use TCP SYN cookies if the syncache overflows");
110
111 static VNET_DEFINE(int, tcp_syncookiesonly) = 0;
112 #define V_tcp_syncookiesonly VNET(tcp_syncookiesonly)
113 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_VNET | CTLFLAG_RW,
114 &VNET_NAME(tcp_syncookiesonly), 0,
115 "Use only TCP SYN cookies");
116
117 #ifdef TCP_OFFLOAD
118 #define ADDED_BY_TOE(sc) ((sc)->sc_tod != NULL)
119 #endif
120
121 static void syncache_drop(struct syncache *, struct syncache_head *);
122 static void syncache_free(struct syncache *);
123 static void syncache_insert(struct syncache *, struct syncache_head *);
124 static int syncache_respond(struct syncache *, struct syncache_head *, int,
125 const struct mbuf *);
126 static struct socket *syncache_socket(struct syncache *, struct socket *,
127 struct mbuf *m);
128 static void syncache_timeout(struct syncache *sc, struct syncache_head *sch,
129 int docallout);
130 static void syncache_timer(void *);
131
132 static uint32_t syncookie_mac(struct in_conninfo *, tcp_seq, uint8_t,
133 uint8_t *, uintptr_t);
134 static tcp_seq syncookie_generate(struct syncache_head *, struct syncache *);
135 static struct syncache
136 *syncookie_lookup(struct in_conninfo *, struct syncache_head *,
137 struct syncache *, struct tcphdr *, struct tcpopt *,
138 struct socket *);
139 static void syncookie_reseed(void *);
140 #ifdef INVARIANTS
141 static int syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
142 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
143 struct socket *lso);
144 #endif
145
146 /*
147 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
148 * 3 retransmits corresponds to a timeout of 3 * (1 + 2 + 4 + 8) == 45 seconds,
149 * the odds are that the user has given up attempting to connect by then.
150 */
151 #define SYNCACHE_MAXREXMTS 3
152
153 /* Arbitrary values */
154 #define TCP_SYNCACHE_HASHSIZE 512
155 #define TCP_SYNCACHE_BUCKETLIMIT 30
156
157 static VNET_DEFINE(struct tcp_syncache, tcp_syncache);
158 #define V_tcp_syncache VNET(tcp_syncache)
159
160 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0,
161 "TCP SYN cache");
162
163 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
164 &VNET_NAME(tcp_syncache.bucket_limit), 0,
165 "Per-bucket hash limit for syncache");
166
167 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
168 &VNET_NAME(tcp_syncache.cache_limit), 0,
169 "Overall entry limit for syncache");
170
171 SYSCTL_UMA_CUR(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_VNET,
172 &VNET_NAME(tcp_syncache.zone), "Current number of entries in syncache");
173
174 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
175 &VNET_NAME(tcp_syncache.hashsize), 0,
176 "Size of TCP syncache hashtable");
177
178 static int
179 sysctl_net_inet_tcp_syncache_rexmtlimit_check(SYSCTL_HANDLER_ARGS)
180 {
181 int error;
182 u_int new;
183
184 new = V_tcp_syncache.rexmt_limit;
185 error = sysctl_handle_int(oidp, &new, 0, req);
186 if ((error == 0) && (req->newptr != NULL)) {
187 if (new > TCP_MAXRXTSHIFT)
188 error = EINVAL;
189 else
190 V_tcp_syncache.rexmt_limit = new;
191 }
192 return (error);
193 }
194
195 SYSCTL_PROC(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit,
196 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW,
197 &VNET_NAME(tcp_syncache.rexmt_limit), 0,
198 sysctl_net_inet_tcp_syncache_rexmtlimit_check, "UI",
199 "Limit on SYN/ACK retransmissions");
200
201 VNET_DEFINE(int, tcp_sc_rst_sock_fail) = 1;
202 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail,
203 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_sc_rst_sock_fail), 0,
204 "Send reset on socket allocation failure");
205
206 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
207
208 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx)
209 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx)
210 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED)
211
212 /*
213 * Requires the syncache entry to be already removed from the bucket list.
214 */
215 static void
216 syncache_free(struct syncache *sc)
217 {
218
219 if (sc->sc_ipopts)
220 (void) m_free(sc->sc_ipopts);
221 if (sc->sc_cred)
222 crfree(sc->sc_cred);
223 #ifdef MAC
224 mac_syncache_destroy(&sc->sc_label);
225 #endif
226
227 uma_zfree(V_tcp_syncache.zone, sc);
228 }
229
230 void
231 syncache_init(void)
232 {
233 int i;
234
235 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
236 V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
237 V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
238 V_tcp_syncache.hash_secret = arc4random();
239
240 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
241 &V_tcp_syncache.hashsize);
242 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
243 &V_tcp_syncache.bucket_limit);
244 if (!powerof2(V_tcp_syncache.hashsize) ||
245 V_tcp_syncache.hashsize == 0) {
246 printf("WARNING: syncache hash size is not a power of 2.\n");
247 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
248 }
249 V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1;
250
251 /* Set limits. */
252 V_tcp_syncache.cache_limit =
253 V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit;
254 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
255 &V_tcp_syncache.cache_limit);
256
257 /* Allocate the hash table. */
258 V_tcp_syncache.hashbase = malloc(V_tcp_syncache.hashsize *
259 sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK | M_ZERO);
260
261 #ifdef VIMAGE
262 V_tcp_syncache.vnet = curvnet;
263 #endif
264
265 /* Initialize the hash buckets. */
266 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
267 TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket);
268 mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
269 NULL, MTX_DEF);
270 callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer,
271 &V_tcp_syncache.hashbase[i].sch_mtx, 0);
272 V_tcp_syncache.hashbase[i].sch_length = 0;
273 V_tcp_syncache.hashbase[i].sch_sc = &V_tcp_syncache;
274 V_tcp_syncache.hashbase[i].sch_last_overflow =
275 -(SYNCOOKIE_LIFETIME + 1);
276 }
277
278 /* Create the syncache entry zone. */
279 V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
280 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
281 V_tcp_syncache.cache_limit = uma_zone_set_max(V_tcp_syncache.zone,
282 V_tcp_syncache.cache_limit);
283
284 /* Start the SYN cookie reseeder callout. */
285 callout_init(&V_tcp_syncache.secret.reseed, 1);
286 arc4rand(V_tcp_syncache.secret.key[0], SYNCOOKIE_SECRET_SIZE, 0);
287 arc4rand(V_tcp_syncache.secret.key[1], SYNCOOKIE_SECRET_SIZE, 0);
288 callout_reset(&V_tcp_syncache.secret.reseed, SYNCOOKIE_LIFETIME * hz,
289 syncookie_reseed, &V_tcp_syncache);
290 }
291
292 #ifdef VIMAGE
293 void
294 syncache_destroy(void)
295 {
296 struct syncache_head *sch;
297 struct syncache *sc, *nsc;
298 int i;
299
300 /*
301 * Stop the re-seed timer before freeing resources. No need to
302 * possibly schedule it another time.
303 */
304 callout_drain(&V_tcp_syncache.secret.reseed);
305
306 /* Cleanup hash buckets: stop timers, free entries, destroy locks. */
307 for (i = 0; i < V_tcp_syncache.hashsize; i++) {
308
309 sch = &V_tcp_syncache.hashbase[i];
310 callout_drain(&sch->sch_timer);
311
312 SCH_LOCK(sch);
313 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc)
314 syncache_drop(sc, sch);
315 SCH_UNLOCK(sch);
316 KASSERT(TAILQ_EMPTY(&sch->sch_bucket),
317 ("%s: sch->sch_bucket not empty", __func__));
318 KASSERT(sch->sch_length == 0, ("%s: sch->sch_length %d not 0",
319 __func__, sch->sch_length));
320 mtx_destroy(&sch->sch_mtx);
321 }
322
323 KASSERT(uma_zone_get_cur(V_tcp_syncache.zone) == 0,
324 ("%s: cache_count not 0", __func__));
325
326 /* Free the allocated global resources. */
327 uma_zdestroy(V_tcp_syncache.zone);
328 free(V_tcp_syncache.hashbase, M_SYNCACHE);
329 }
330 #endif
331
332 /*
333 * Inserts a syncache entry into the specified bucket row.
334 * Locks and unlocks the syncache_head autonomously.
335 */
336 static void
337 syncache_insert(struct syncache *sc, struct syncache_head *sch)
338 {
339 struct syncache *sc2;
340
341 SCH_LOCK(sch);
342
343 /*
344 * Make sure that we don't overflow the per-bucket limit.
345 * If the bucket is full, toss the oldest element.
346 */
347 if (sch->sch_length >= V_tcp_syncache.bucket_limit) {
348 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
349 ("sch->sch_length incorrect"));
350 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
351 sch->sch_last_overflow = time_uptime;
352 syncache_drop(sc2, sch);
353 TCPSTAT_INC(tcps_sc_bucketoverflow);
354 }
355
356 /* Put it into the bucket. */
357 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash);
358 sch->sch_length++;
359
360 #ifdef TCP_OFFLOAD
361 if (ADDED_BY_TOE(sc)) {
362 struct toedev *tod = sc->sc_tod;
363
364 tod->tod_syncache_added(tod, sc->sc_todctx);
365 }
366 #endif
367
368 /* Reinitialize the bucket row's timer. */
369 if (sch->sch_length == 1)
370 sch->sch_nextc = ticks + INT_MAX;
371 syncache_timeout(sc, sch, 1);
372
373 SCH_UNLOCK(sch);
374
375 TCPSTATES_INC(TCPS_SYN_RECEIVED);
376 TCPSTAT_INC(tcps_sc_added);
377 }
378
379 /*
380 * Remove and free entry from syncache bucket row.
381 * Expects locked syncache head.
382 */
383 static void
384 syncache_drop(struct syncache *sc, struct syncache_head *sch)
385 {
386
387 SCH_LOCK_ASSERT(sch);
388
389 TCPSTATES_DEC(TCPS_SYN_RECEIVED);
390 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
391 sch->sch_length--;
392
393 #ifdef TCP_OFFLOAD
394 if (ADDED_BY_TOE(sc)) {
395 struct toedev *tod = sc->sc_tod;
396
397 tod->tod_syncache_removed(tod, sc->sc_todctx);
398 }
399 #endif
400
401 syncache_free(sc);
402 }
403
404 /*
405 * Engage/reengage time on bucket row.
406 */
407 static void
408 syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout)
409 {
410 int rexmt;
411
412 if (sc->sc_rxmits == 0)
413 rexmt = TCPTV_RTOBASE;
414 else
415 TCPT_RANGESET(rexmt, TCPTV_RTOBASE * tcp_syn_backoff[sc->sc_rxmits],
416 tcp_rexmit_min, TCPTV_REXMTMAX);
417 sc->sc_rxttime = ticks + rexmt;
418 sc->sc_rxmits++;
419 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) {
420 sch->sch_nextc = sc->sc_rxttime;
421 if (docallout)
422 callout_reset(&sch->sch_timer, sch->sch_nextc - ticks,
423 syncache_timer, (void *)sch);
424 }
425 }
426
427 /*
428 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
429 * If we have retransmitted an entry the maximum number of times, expire it.
430 * One separate timer for each bucket row.
431 */
432 static void
433 syncache_timer(void *xsch)
434 {
435 struct syncache_head *sch = (struct syncache_head *)xsch;
436 struct syncache *sc, *nsc;
437 int tick = ticks;
438 char *s;
439
440 CURVNET_SET(sch->sch_sc->vnet);
441
442 /* NB: syncache_head has already been locked by the callout. */
443 SCH_LOCK_ASSERT(sch);
444
445 /*
446 * In the following cycle we may remove some entries and/or
447 * advance some timeouts, so re-initialize the bucket timer.
448 */
449 sch->sch_nextc = tick + INT_MAX;
450
451 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) {
452 /*
453 * We do not check if the listen socket still exists
454 * and accept the case where the listen socket may be
455 * gone by the time we resend the SYN/ACK. We do
456 * not expect this to happens often. If it does,
457 * then the RST will be sent by the time the remote
458 * host does the SYN/ACK->ACK.
459 */
460 if (TSTMP_GT(sc->sc_rxttime, tick)) {
461 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc))
462 sch->sch_nextc = sc->sc_rxttime;
463 continue;
464 }
465 if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) {
466 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
467 log(LOG_DEBUG, "%s; %s: Retransmits exhausted, "
468 "giving up and removing syncache entry\n",
469 s, __func__);
470 free(s, M_TCPLOG);
471 }
472 syncache_drop(sc, sch);
473 TCPSTAT_INC(tcps_sc_stale);
474 continue;
475 }
476 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
477 log(LOG_DEBUG, "%s; %s: Response timeout, "
478 "retransmitting (%u) SYN|ACK\n",
479 s, __func__, sc->sc_rxmits);
480 free(s, M_TCPLOG);
481 }
482
483 syncache_respond(sc, sch, 1, NULL);
484 TCPSTAT_INC(tcps_sc_retransmitted);
485 syncache_timeout(sc, sch, 0);
486 }
487 if (!TAILQ_EMPTY(&(sch)->sch_bucket))
488 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
489 syncache_timer, (void *)(sch));
490 CURVNET_RESTORE();
491 }
492
493 /*
494 * Find an entry in the syncache.
495 * Returns always with locked syncache_head plus a matching entry or NULL.
496 */
497 static struct syncache *
498 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
499 {
500 struct syncache *sc;
501 struct syncache_head *sch;
502 uint32_t hash;
503
504 /*
505 * The hash is built on foreign port + local port + foreign address.
506 * We rely on the fact that struct in_conninfo starts with 16 bits
507 * of foreign port, then 16 bits of local port then followed by 128
508 * bits of foreign address. In case of IPv4 address, the first 3
509 * 32-bit words of the address always are zeroes.
510 */
511 hash = jenkins_hash32((uint32_t *)&inc->inc_ie, 5,
512 V_tcp_syncache.hash_secret) & V_tcp_syncache.hashmask;
513
514 sch = &V_tcp_syncache.hashbase[hash];
515 *schp = sch;
516 SCH_LOCK(sch);
517
518 /* Circle through bucket row to find matching entry. */
519 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash)
520 if (bcmp(&inc->inc_ie, &sc->sc_inc.inc_ie,
521 sizeof(struct in_endpoints)) == 0)
522 break;
523
524 return (sc); /* Always returns with locked sch. */
525 }
526
527 /*
528 * This function is called when we get a RST for a
529 * non-existent connection, so that we can see if the
530 * connection is in the syn cache. If it is, zap it.
531 */
532 void
533 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
534 {
535 struct syncache *sc;
536 struct syncache_head *sch;
537 char *s = NULL;
538
539 sc = syncache_lookup(inc, &sch); /* returns locked sch */
540 SCH_LOCK_ASSERT(sch);
541
542 /*
543 * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags.
544 * See RFC 793 page 65, section SEGMENT ARRIVES.
545 */
546 if (th->th_flags & (TH_ACK|TH_SYN|TH_FIN)) {
547 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
548 log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or "
549 "FIN flag set, segment ignored\n", s, __func__);
550 TCPSTAT_INC(tcps_badrst);
551 goto done;
552 }
553
554 /*
555 * No corresponding connection was found in syncache.
556 * If syncookies are enabled and possibly exclusively
557 * used, or we are under memory pressure, a valid RST
558 * may not find a syncache entry. In that case we're
559 * done and no SYN|ACK retransmissions will happen.
560 * Otherwise the RST was misdirected or spoofed.
561 */
562 if (sc == NULL) {
563 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
564 log(LOG_DEBUG, "%s; %s: Spurious RST without matching "
565 "syncache entry (possibly syncookie only), "
566 "segment ignored\n", s, __func__);
567 TCPSTAT_INC(tcps_badrst);
568 goto done;
569 }
570
571 /*
572 * If the RST bit is set, check the sequence number to see
573 * if this is a valid reset segment.
574 * RFC 793 page 37:
575 * In all states except SYN-SENT, all reset (RST) segments
576 * are validated by checking their SEQ-fields. A reset is
577 * valid if its sequence number is in the window.
578 *
579 * The sequence number in the reset segment is normally an
580 * echo of our outgoing acknowlegement numbers, but some hosts
581 * send a reset with the sequence number at the rightmost edge
582 * of our receive window, and we have to handle this case.
583 */
584 if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
585 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
586 syncache_drop(sc, sch);
587 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
588 log(LOG_DEBUG, "%s; %s: Our SYN|ACK was rejected, "
589 "connection attempt aborted by remote endpoint\n",
590 s, __func__);
591 TCPSTAT_INC(tcps_sc_reset);
592 } else {
593 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
594 log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != "
595 "IRS %u (+WND %u), segment ignored\n",
596 s, __func__, th->th_seq, sc->sc_irs, sc->sc_wnd);
597 TCPSTAT_INC(tcps_badrst);
598 }
599
600 done:
601 if (s != NULL)
602 free(s, M_TCPLOG);
603 SCH_UNLOCK(sch);
604 }
605
606 void
607 syncache_badack(struct in_conninfo *inc)
608 {
609 struct syncache *sc;
610 struct syncache_head *sch;
611
612 sc = syncache_lookup(inc, &sch); /* returns locked sch */
613 SCH_LOCK_ASSERT(sch);
614 if (sc != NULL) {
615 syncache_drop(sc, sch);
616 TCPSTAT_INC(tcps_sc_badack);
617 }
618 SCH_UNLOCK(sch);
619 }
620
621 void
622 syncache_unreach(struct in_conninfo *inc, tcp_seq th_seq)
623 {
624 struct syncache *sc;
625 struct syncache_head *sch;
626
627 sc = syncache_lookup(inc, &sch); /* returns locked sch */
628 SCH_LOCK_ASSERT(sch);
629 if (sc == NULL)
630 goto done;
631
632 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
633 if (ntohl(th_seq) != sc->sc_iss)
634 goto done;
635
636 /*
637 * If we've rertransmitted 3 times and this is our second error,
638 * we remove the entry. Otherwise, we allow it to continue on.
639 * This prevents us from incorrectly nuking an entry during a
640 * spurious network outage.
641 *
642 * See tcp_notify().
643 */
644 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) {
645 sc->sc_flags |= SCF_UNREACH;
646 goto done;
647 }
648 syncache_drop(sc, sch);
649 TCPSTAT_INC(tcps_sc_unreach);
650 done:
651 SCH_UNLOCK(sch);
652 }
653
654 /*
655 * Build a new TCP socket structure from a syncache entry.
656 *
657 * On success return the newly created socket with its underlying inp locked.
658 */
659 static struct socket *
660 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
661 {
662 struct tcp_function_block *blk;
663 struct inpcb *inp = NULL;
664 struct socket *so;
665 struct tcpcb *tp;
666 int error;
667 char *s;
668
669 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
670
671 /*
672 * Ok, create the full blown connection, and set things up
673 * as they would have been set up if we had created the
674 * connection when the SYN arrived. If we can't create
675 * the connection, abort it.
676 */
677 so = sonewconn(lso, 0);
678 if (so == NULL) {
679 /*
680 * Drop the connection; we will either send a RST or
681 * have the peer retransmit its SYN again after its
682 * RTO and try again.
683 */
684 TCPSTAT_INC(tcps_listendrop);
685 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
686 log(LOG_DEBUG, "%s; %s: Socket create failed "
687 "due to limits or memory shortage\n",
688 s, __func__);
689 free(s, M_TCPLOG);
690 }
691 goto abort2;
692 }
693 #ifdef MAC
694 mac_socketpeer_set_from_mbuf(m, so);
695 #endif
696
697 inp = sotoinpcb(so);
698 inp->inp_inc.inc_fibnum = so->so_fibnum;
699 INP_WLOCK(inp);
700 /*
701 * Exclusive pcbinfo lock is not required in syncache socket case even
702 * if two inpcb locks can be acquired simultaneously:
703 * - the inpcb in LISTEN state,
704 * - the newly created inp.
705 *
706 * In this case, an inp cannot be at same time in LISTEN state and
707 * just created by an accept() call.
708 */
709 INP_HASH_WLOCK(&V_tcbinfo);
710
711 /* Insert new socket into PCB hash list. */
712 inp->inp_inc.inc_flags = sc->sc_inc.inc_flags;
713 #ifdef INET6
714 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
715 inp->inp_vflag &= ~INP_IPV4;
716 inp->inp_vflag |= INP_IPV6;
717 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
718 } else {
719 inp->inp_vflag &= ~INP_IPV6;
720 inp->inp_vflag |= INP_IPV4;
721 #endif
722 inp->inp_laddr = sc->sc_inc.inc_laddr;
723 #ifdef INET6
724 }
725 #endif
726
727 /*
728 * If there's an mbuf and it has a flowid, then let's initialise the
729 * inp with that particular flowid.
730 */
731 if (m != NULL && M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
732 inp->inp_flowid = m->m_pkthdr.flowid;
733 inp->inp_flowtype = M_HASHTYPE_GET(m);
734 }
735
736 /*
737 * Install in the reservation hash table for now, but don't yet
738 * install a connection group since the full 4-tuple isn't yet
739 * configured.
740 */
741 inp->inp_lport = sc->sc_inc.inc_lport;
742 if ((error = in_pcbinshash_nopcbgroup(inp)) != 0) {
743 /*
744 * Undo the assignments above if we failed to
745 * put the PCB on the hash lists.
746 */
747 #ifdef INET6
748 if (sc->sc_inc.inc_flags & INC_ISIPV6)
749 inp->in6p_laddr = in6addr_any;
750 else
751 #endif
752 inp->inp_laddr.s_addr = INADDR_ANY;
753 inp->inp_lport = 0;
754 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
755 log(LOG_DEBUG, "%s; %s: in_pcbinshash failed "
756 "with error %i\n",
757 s, __func__, error);
758 free(s, M_TCPLOG);
759 }
760 INP_HASH_WUNLOCK(&V_tcbinfo);
761 goto abort;
762 }
763 #ifdef INET6
764 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
765 struct inpcb *oinp = sotoinpcb(lso);
766 struct in6_addr laddr6;
767 struct sockaddr_in6 sin6;
768 /*
769 * Inherit socket options from the listening socket.
770 * Note that in6p_inputopts are not (and should not be)
771 * copied, since it stores previously received options and is
772 * used to detect if each new option is different than the
773 * previous one and hence should be passed to a user.
774 * If we copied in6p_inputopts, a user would not be able to
775 * receive options just after calling the accept system call.
776 */
777 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
778 if (oinp->in6p_outputopts)
779 inp->in6p_outputopts =
780 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
781
782 sin6.sin6_family = AF_INET6;
783 sin6.sin6_len = sizeof(sin6);
784 sin6.sin6_addr = sc->sc_inc.inc6_faddr;
785 sin6.sin6_port = sc->sc_inc.inc_fport;
786 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
787 laddr6 = inp->in6p_laddr;
788 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
789 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
790 if ((error = in6_pcbconnect_mbuf(inp, (struct sockaddr *)&sin6,
791 thread0.td_ucred, m)) != 0) {
792 inp->in6p_laddr = laddr6;
793 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
794 log(LOG_DEBUG, "%s; %s: in6_pcbconnect failed "
795 "with error %i\n",
796 s, __func__, error);
797 free(s, M_TCPLOG);
798 }
799 INP_HASH_WUNLOCK(&V_tcbinfo);
800 goto abort;
801 }
802 /* Override flowlabel from in6_pcbconnect. */
803 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
804 inp->inp_flow |= sc->sc_flowlabel;
805 }
806 #endif /* INET6 */
807 #if defined(INET) && defined(INET6)
808 else
809 #endif
810 #ifdef INET
811 {
812 struct in_addr laddr;
813 struct sockaddr_in sin;
814
815 inp->inp_options = (m) ? ip_srcroute(m) : NULL;
816
817 if (inp->inp_options == NULL) {
818 inp->inp_options = sc->sc_ipopts;
819 sc->sc_ipopts = NULL;
820 }
821
822 sin.sin_family = AF_INET;
823 sin.sin_len = sizeof(sin);
824 sin.sin_addr = sc->sc_inc.inc_faddr;
825 sin.sin_port = sc->sc_inc.inc_fport;
826 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
827 laddr = inp->inp_laddr;
828 if (inp->inp_laddr.s_addr == INADDR_ANY)
829 inp->inp_laddr = sc->sc_inc.inc_laddr;
830 if ((error = in_pcbconnect_mbuf(inp, (struct sockaddr *)&sin,
831 thread0.td_ucred, m)) != 0) {
832 inp->inp_laddr = laddr;
833 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
834 log(LOG_DEBUG, "%s; %s: in_pcbconnect failed "
835 "with error %i\n",
836 s, __func__, error);
837 free(s, M_TCPLOG);
838 }
839 INP_HASH_WUNLOCK(&V_tcbinfo);
840 goto abort;
841 }
842 }
843 #endif /* INET */
844 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
845 /* Copy old policy into new socket's. */
846 if (ipsec_copy_pcbpolicy(sotoinpcb(lso), inp) != 0)
847 printf("syncache_socket: could not copy policy\n");
848 #endif
849 INP_HASH_WUNLOCK(&V_tcbinfo);
850 tp = intotcpcb(inp);
851 tcp_state_change(tp, TCPS_SYN_RECEIVED);
852 tp->iss = sc->sc_iss;
853 tp->irs = sc->sc_irs;
854 tcp_rcvseqinit(tp);
855 tcp_sendseqinit(tp);
856 blk = sototcpcb(lso)->t_fb;
857 if (blk != tp->t_fb) {
858 /*
859 * Our parents t_fb was not the default,
860 * we need to release our ref on tp->t_fb and
861 * pickup one on the new entry.
862 */
863 struct tcp_function_block *rblk;
864
865 rblk = find_and_ref_tcp_fb(blk);
866 KASSERT(rblk != NULL,
867 ("cannot find blk %p out of syncache?", blk));
868 if (tp->t_fb->tfb_tcp_fb_fini)
869 (*tp->t_fb->tfb_tcp_fb_fini)(tp);
870 refcount_release(&tp->t_fb->tfb_refcnt);
871 tp->t_fb = rblk;
872 if (tp->t_fb->tfb_tcp_fb_init) {
873 (*tp->t_fb->tfb_tcp_fb_init)(tp);
874 }
875 }
876 tp->snd_wl1 = sc->sc_irs;
877 tp->snd_max = tp->iss + 1;
878 tp->snd_nxt = tp->iss + 1;
879 tp->rcv_up = sc->sc_irs + 1;
880 tp->rcv_wnd = sc->sc_wnd;
881 tp->rcv_adv += tp->rcv_wnd;
882 tp->last_ack_sent = tp->rcv_nxt;
883
884 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
885 if (sc->sc_flags & SCF_NOOPT)
886 tp->t_flags |= TF_NOOPT;
887 else {
888 if (sc->sc_flags & SCF_WINSCALE) {
889 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
890 tp->snd_scale = sc->sc_requested_s_scale;
891 tp->request_r_scale = sc->sc_requested_r_scale;
892 }
893 if (sc->sc_flags & SCF_TIMESTAMP) {
894 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
895 tp->ts_recent = sc->sc_tsreflect;
896 tp->ts_recent_age = tcp_ts_getticks();
897 tp->ts_offset = sc->sc_tsoff;
898 }
899 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
900 if (sc->sc_flags & SCF_SIGNATURE)
901 tp->t_flags |= TF_SIGNATURE;
902 #endif
903 if (sc->sc_flags & SCF_SACK)
904 tp->t_flags |= TF_SACK_PERMIT;
905 }
906
907 if (sc->sc_flags & SCF_ECN)
908 tp->t_flags |= TF_ECN_PERMIT;
909
910 /*
911 * Set up MSS and get cached values from tcp_hostcache.
912 * This might overwrite some of the defaults we just set.
913 */
914 tcp_mss(tp, sc->sc_peer_mss);
915
916 /*
917 * If the SYN,ACK was retransmitted, indicate that CWND to be
918 * limited to one segment in cc_conn_init().
919 * NB: sc_rxmits counts all SYN,ACK transmits, not just retransmits.
920 */
921 if (sc->sc_rxmits > 1)
922 tp->snd_cwnd = 1;
923
924 #ifdef TCP_OFFLOAD
925 /*
926 * Allow a TOE driver to install its hooks. Note that we hold the
927 * pcbinfo lock too and that prevents tcp_usr_accept from accepting a
928 * new connection before the TOE driver has done its thing.
929 */
930 if (ADDED_BY_TOE(sc)) {
931 struct toedev *tod = sc->sc_tod;
932
933 tod->tod_offload_socket(tod, sc->sc_todctx, so);
934 }
935 #endif
936 /*
937 * Copy and activate timers.
938 */
939 tp->t_keepinit = sototcpcb(lso)->t_keepinit;
940 tp->t_keepidle = sototcpcb(lso)->t_keepidle;
941 tp->t_keepintvl = sototcpcb(lso)->t_keepintvl;
942 tp->t_keepcnt = sototcpcb(lso)->t_keepcnt;
943 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
944
945 TCPSTAT_INC(tcps_accepts);
946 return (so);
947
948 abort:
949 INP_WUNLOCK(inp);
950 abort2:
951 if (so != NULL)
952 soabort(so);
953 return (NULL);
954 }
955
956 /*
957 * This function gets called when we receive an ACK for a
958 * socket in the LISTEN state. We look up the connection
959 * in the syncache, and if its there, we pull it out of
960 * the cache and turn it into a full-blown connection in
961 * the SYN-RECEIVED state.
962 *
963 * On syncache_socket() success the newly created socket
964 * has its underlying inp locked.
965 */
966 int
967 syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
968 struct socket **lsop, struct mbuf *m)
969 {
970 struct syncache *sc;
971 struct syncache_head *sch;
972 struct syncache scs;
973 char *s;
974
975 /*
976 * Global TCP locks are held because we manipulate the PCB lists
977 * and create a new socket.
978 */
979 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
980 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK,
981 ("%s: can handle only ACK", __func__));
982
983 sc = syncache_lookup(inc, &sch); /* returns locked sch */
984 SCH_LOCK_ASSERT(sch);
985
986 #ifdef INVARIANTS
987 /*
988 * Test code for syncookies comparing the syncache stored
989 * values with the reconstructed values from the cookie.
990 */
991 if (sc != NULL)
992 syncookie_cmp(inc, sch, sc, th, to, *lsop);
993 #endif
994
995 if (sc == NULL) {
996 /*
997 * There is no syncache entry, so see if this ACK is
998 * a returning syncookie. To do this, first:
999 * A. Check if syncookies are used in case of syncache
1000 * overflows
1001 * B. See if this socket has had a syncache entry dropped in
1002 * the recent past. We don't want to accept a bogus
1003 * syncookie if we've never received a SYN or accept it
1004 * twice.
1005 * C. check that the syncookie is valid. If it is, then
1006 * cobble up a fake syncache entry, and return.
1007 */
1008 if (!V_tcp_syncookies) {
1009 SCH_UNLOCK(sch);
1010 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1011 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
1012 "segment rejected (syncookies disabled)\n",
1013 s, __func__);
1014 goto failed;
1015 }
1016 if (!V_tcp_syncookiesonly &&
1017 sch->sch_last_overflow < time_uptime - SYNCOOKIE_LIFETIME) {
1018 SCH_UNLOCK(sch);
1019 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1020 log(LOG_DEBUG, "%s; %s: Spurious ACK, "
1021 "segment rejected (no syncache entry)\n",
1022 s, __func__);
1023 goto failed;
1024 }
1025 bzero(&scs, sizeof(scs));
1026 sc = syncookie_lookup(inc, sch, &scs, th, to, *lsop);
1027 SCH_UNLOCK(sch);
1028 if (sc == NULL) {
1029 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1030 log(LOG_DEBUG, "%s; %s: Segment failed "
1031 "SYNCOOKIE authentication, segment rejected "
1032 "(probably spoofed)\n", s, __func__);
1033 goto failed;
1034 }
1035 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1036 /* If received ACK has MD5 signature, check it. */
1037 if ((to->to_flags & TOF_SIGNATURE) != 0 &&
1038 (!TCPMD5_ENABLED() ||
1039 TCPMD5_INPUT(m, th, to->to_signature) != 0)) {
1040 /* Drop the ACK. */
1041 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1042 log(LOG_DEBUG, "%s; %s: Segment rejected, "
1043 "MD5 signature doesn't match.\n",
1044 s, __func__);
1045 free(s, M_TCPLOG);
1046 }
1047 TCPSTAT_INC(tcps_sig_err_sigopt);
1048 return (-1); /* Do not send RST */
1049 }
1050 #endif /* TCP_SIGNATURE */
1051 } else {
1052 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1053 /*
1054 * If listening socket requested TCP digests, check that
1055 * received ACK has signature and it is correct.
1056 * If not, drop the ACK and leave sc entry in th cache,
1057 * because SYN was received with correct signature.
1058 */
1059 if (sc->sc_flags & SCF_SIGNATURE) {
1060 if ((to->to_flags & TOF_SIGNATURE) == 0) {
1061 /* No signature */
1062 TCPSTAT_INC(tcps_sig_err_nosigopt);
1063 SCH_UNLOCK(sch);
1064 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1065 log(LOG_DEBUG, "%s; %s: Segment "
1066 "rejected, MD5 signature wasn't "
1067 "provided.\n", s, __func__);
1068 free(s, M_TCPLOG);
1069 }
1070 return (-1); /* Do not send RST */
1071 }
1072 if (!TCPMD5_ENABLED() ||
1073 TCPMD5_INPUT(m, th, to->to_signature) != 0) {
1074 /* Doesn't match or no SA */
1075 SCH_UNLOCK(sch);
1076 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1077 log(LOG_DEBUG, "%s; %s: Segment "
1078 "rejected, MD5 signature doesn't "
1079 "match.\n", s, __func__);
1080 free(s, M_TCPLOG);
1081 }
1082 return (-1); /* Do not send RST */
1083 }
1084 }
1085 #endif /* TCP_SIGNATURE */
1086 /*
1087 * Pull out the entry to unlock the bucket row.
1088 *
1089 * NOTE: We must decrease TCPS_SYN_RECEIVED count here, not
1090 * tcp_state_change(). The tcpcb is not existent at this
1091 * moment. A new one will be allocated via syncache_socket->
1092 * sonewconn->tcp_usr_attach in TCPS_CLOSED state, then
1093 * syncache_socket() will change it to TCPS_SYN_RECEIVED.
1094 */
1095 TCPSTATES_DEC(TCPS_SYN_RECEIVED);
1096 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
1097 sch->sch_length--;
1098 #ifdef TCP_OFFLOAD
1099 if (ADDED_BY_TOE(sc)) {
1100 struct toedev *tod = sc->sc_tod;
1101
1102 tod->tod_syncache_removed(tod, sc->sc_todctx);
1103 }
1104 #endif
1105 SCH_UNLOCK(sch);
1106 }
1107
1108 /*
1109 * Segment validation:
1110 * ACK must match our initial sequence number + 1 (the SYN|ACK).
1111 */
1112 if (th->th_ack != sc->sc_iss + 1) {
1113 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1114 log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment "
1115 "rejected\n", s, __func__, th->th_ack, sc->sc_iss);
1116 goto failed;
1117 }
1118
1119 /*
1120 * The SEQ must fall in the window starting at the received
1121 * initial receive sequence number + 1 (the SYN).
1122 */
1123 if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
1124 SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
1125 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1126 log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment "
1127 "rejected\n", s, __func__, th->th_seq, sc->sc_irs);
1128 goto failed;
1129 }
1130
1131 /*
1132 * If timestamps were not negotiated during SYN/ACK they
1133 * must not appear on any segment during this session.
1134 */
1135 if (!(sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) {
1136 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1137 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1138 "segment rejected\n", s, __func__);
1139 goto failed;
1140 }
1141
1142 /*
1143 * If timestamps were negotiated during SYN/ACK they should
1144 * appear on every segment during this session.
1145 * XXXAO: This is only informal as there have been unverified
1146 * reports of non-compliants stacks.
1147 */
1148 if ((sc->sc_flags & SCF_TIMESTAMP) && !(to->to_flags & TOF_TS)) {
1149 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1150 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1151 "no action\n", s, __func__);
1152 free(s, M_TCPLOG);
1153 s = NULL;
1154 }
1155 }
1156
1157 /*
1158 * If timestamps were negotiated, the reflected timestamp
1159 * must be equal to what we actually sent in the SYN|ACK
1160 * except in the case of 0. Some boxes are known for sending
1161 * broken timestamp replies during the 3whs (and potentially
1162 * during the connection also).
1163 *
1164 * Accept the final ACK of 3whs with reflected timestamp of 0
1165 * instead of sending a RST and deleting the syncache entry.
1166 */
1167 if ((to->to_flags & TOF_TS) && to->to_tsecr &&
1168 to->to_tsecr != sc->sc_ts) {
1169 if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
1170 log(LOG_DEBUG, "%s; %s: TSECR %u != TS %u, "
1171 "segment rejected\n",
1172 s, __func__, to->to_tsecr, sc->sc_ts);
1173 goto failed;
1174 }
1175
1176 *lsop = syncache_socket(sc, *lsop, m);
1177
1178 if (*lsop == NULL)
1179 TCPSTAT_INC(tcps_sc_aborted);
1180 else
1181 TCPSTAT_INC(tcps_sc_completed);
1182
1183 /* how do we find the inp for the new socket? */
1184 if (sc != &scs)
1185 syncache_free(sc);
1186 return (1);
1187 failed:
1188 if (sc != NULL && sc != &scs)
1189 syncache_free(sc);
1190 if (s != NULL)
1191 free(s, M_TCPLOG);
1192 *lsop = NULL;
1193 return (0);
1194 }
1195
1196 #ifdef TCP_RFC7413
1197 static void
1198 syncache_tfo_expand(struct syncache *sc, struct socket **lsop, struct mbuf *m,
1199 uint64_t response_cookie)
1200 {
1201 struct inpcb *inp;
1202 struct tcpcb *tp;
1203 unsigned int *pending_counter;
1204
1205 /*
1206 * Global TCP locks are held because we manipulate the PCB lists
1207 * and create a new socket.
1208 */
1209 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1210
1211 pending_counter = intotcpcb(sotoinpcb(*lsop))->t_tfo_pending;
1212 *lsop = syncache_socket(sc, *lsop, m);
1213 if (*lsop == NULL) {
1214 TCPSTAT_INC(tcps_sc_aborted);
1215 atomic_subtract_int(pending_counter, 1);
1216 } else {
1217 inp = sotoinpcb(*lsop);
1218 tp = intotcpcb(inp);
1219 tp->t_flags |= TF_FASTOPEN;
1220 tp->t_tfo_cookie = response_cookie;
1221 tp->snd_max = tp->iss;
1222 tp->snd_nxt = tp->iss;
1223 tp->t_tfo_pending = pending_counter;
1224 TCPSTAT_INC(tcps_sc_completed);
1225 }
1226 }
1227 #endif /* TCP_RFC7413 */
1228
1229 /*
1230 * Given a LISTEN socket and an inbound SYN request, add
1231 * this to the syn cache, and send back a segment:
1232 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
1233 * to the source.
1234 *
1235 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
1236 * Doing so would require that we hold onto the data and deliver it
1237 * to the application. However, if we are the target of a SYN-flood
1238 * DoS attack, an attacker could send data which would eventually
1239 * consume all available buffer space if it were ACKed. By not ACKing
1240 * the data, we avoid this DoS scenario.
1241 *
1242 * The exception to the above is when a SYN with a valid TCP Fast Open (TFO)
1243 * cookie is processed, V_tcp_fastopen_enabled set to true, and the
1244 * TCP_FASTOPEN socket option is set. In this case, a new socket is created
1245 * and returned via lsop, the mbuf is not freed so that tcp_input() can
1246 * queue its data to the socket, and 1 is returned to indicate the
1247 * TFO-socket-creation path was taken.
1248 */
1249 int
1250 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
1251 struct inpcb *inp, struct socket **lsop, struct mbuf *m, void *tod,
1252 void *todctx)
1253 {
1254 struct tcpcb *tp;
1255 struct socket *so;
1256 struct syncache *sc = NULL;
1257 struct syncache_head *sch;
1258 struct mbuf *ipopts = NULL;
1259 u_int ltflags;
1260 int win, sb_hiwat, ip_ttl, ip_tos;
1261 char *s;
1262 int rv = 0;
1263 #ifdef INET6
1264 int autoflowlabel = 0;
1265 #endif
1266 #ifdef MAC
1267 struct label *maclabel;
1268 #endif
1269 struct syncache scs;
1270 struct ucred *cred;
1271 #ifdef TCP_RFC7413
1272 uint64_t tfo_response_cookie;
1273 int tfo_cookie_valid = 0;
1274 int tfo_response_cookie_valid = 0;
1275 #endif
1276
1277 INP_WLOCK_ASSERT(inp); /* listen socket */
1278 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN,
1279 ("%s: unexpected tcp flags", __func__));
1280
1281 /*
1282 * Combine all so/tp operations very early to drop the INP lock as
1283 * soon as possible.
1284 */
1285 so = *lsop;
1286 tp = sototcpcb(so);
1287 cred = crhold(so->so_cred);
1288
1289 #ifdef INET6
1290 if ((inc->inc_flags & INC_ISIPV6) &&
1291 (inp->inp_flags & IN6P_AUTOFLOWLABEL))
1292 autoflowlabel = 1;
1293 #endif
1294 ip_ttl = inp->inp_ip_ttl;
1295 ip_tos = inp->inp_ip_tos;
1296 win = sbspace(&so->so_rcv);
1297 sb_hiwat = so->so_rcv.sb_hiwat;
1298 ltflags = (tp->t_flags & (TF_NOOPT | TF_SIGNATURE));
1299
1300 #ifdef TCP_RFC7413
1301 if (V_tcp_fastopen_enabled && (tp->t_flags & TF_FASTOPEN) &&
1302 (tp->t_tfo_pending != NULL) && (to->to_flags & TOF_FASTOPEN)) {
1303 /*
1304 * Limit the number of pending TFO connections to
1305 * approximately half of the queue limit. This prevents TFO
1306 * SYN floods from starving the service by filling the
1307 * listen queue with bogus TFO connections.
1308 */
1309 if (atomic_fetchadd_int(tp->t_tfo_pending, 1) <=
1310 (so->so_qlimit / 2)) {
1311 int result;
1312
1313 result = tcp_fastopen_check_cookie(inc,
1314 to->to_tfo_cookie, to->to_tfo_len,
1315 &tfo_response_cookie);
1316 tfo_cookie_valid = (result > 0);
1317 tfo_response_cookie_valid = (result >= 0);
1318 } else
1319 atomic_subtract_int(tp->t_tfo_pending, 1);
1320 }
1321 #endif
1322
1323 /* By the time we drop the lock these should no longer be used. */
1324 so = NULL;
1325 tp = NULL;
1326
1327 #ifdef MAC
1328 if (mac_syncache_init(&maclabel) != 0) {
1329 INP_WUNLOCK(inp);
1330 goto done;
1331 } else
1332 mac_syncache_create(maclabel, inp);
1333 #endif
1334 #ifdef TCP_RFC7413
1335 if (!tfo_cookie_valid)
1336 #endif
1337 INP_WUNLOCK(inp);
1338
1339 /*
1340 * Remember the IP options, if any.
1341 */
1342 #ifdef INET6
1343 if (!(inc->inc_flags & INC_ISIPV6))
1344 #endif
1345 #ifdef INET
1346 ipopts = (m) ? ip_srcroute(m) : NULL;
1347 #else
1348 ipopts = NULL;
1349 #endif
1350
1351 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1352 /*
1353 * If listening socket requested TCP digests, check that received
1354 * SYN has signature and it is correct. If signature doesn't match
1355 * or TCP_SIGNATURE support isn't enabled, drop the packet.
1356 */
1357 if (ltflags & TF_SIGNATURE) {
1358 if ((to->to_flags & TOF_SIGNATURE) == 0) {
1359 TCPSTAT_INC(tcps_sig_err_nosigopt);
1360 goto done;
1361 }
1362 if (!TCPMD5_ENABLED() ||
1363 TCPMD5_INPUT(m, th, to->to_signature) != 0)
1364 goto done;
1365 }
1366 #endif /* TCP_SIGNATURE */
1367 /*
1368 * See if we already have an entry for this connection.
1369 * If we do, resend the SYN,ACK, and reset the retransmit timer.
1370 *
1371 * XXX: should the syncache be re-initialized with the contents
1372 * of the new SYN here (which may have different options?)
1373 *
1374 * XXX: We do not check the sequence number to see if this is a
1375 * real retransmit or a new connection attempt. The question is
1376 * how to handle such a case; either ignore it as spoofed, or
1377 * drop the current entry and create a new one?
1378 */
1379 sc = syncache_lookup(inc, &sch); /* returns locked entry */
1380 SCH_LOCK_ASSERT(sch);
1381 if (sc != NULL) {
1382 #ifdef TCP_RFC7413
1383 if (tfo_cookie_valid)
1384 INP_WUNLOCK(inp);
1385 #endif
1386 TCPSTAT_INC(tcps_sc_dupsyn);
1387 if (ipopts) {
1388 /*
1389 * If we were remembering a previous source route,
1390 * forget it and use the new one we've been given.
1391 */
1392 if (sc->sc_ipopts)
1393 (void) m_free(sc->sc_ipopts);
1394 sc->sc_ipopts = ipopts;
1395 }
1396 /*
1397 * Update timestamp if present.
1398 */
1399 if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS))
1400 sc->sc_tsreflect = to->to_tsval;
1401 else
1402 sc->sc_flags &= ~SCF_TIMESTAMP;
1403 #ifdef MAC
1404 /*
1405 * Since we have already unconditionally allocated label
1406 * storage, free it up. The syncache entry will already
1407 * have an initialized label we can use.
1408 */
1409 mac_syncache_destroy(&maclabel);
1410 #endif
1411 /* Retransmit SYN|ACK and reset retransmit count. */
1412 if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) {
1413 log(LOG_DEBUG, "%s; %s: Received duplicate SYN, "
1414 "resetting timer and retransmitting SYN|ACK\n",
1415 s, __func__);
1416 free(s, M_TCPLOG);
1417 }
1418 if (syncache_respond(sc, sch, 1, m) == 0) {
1419 sc->sc_rxmits = 0;
1420 syncache_timeout(sc, sch, 1);
1421 TCPSTAT_INC(tcps_sndacks);
1422 TCPSTAT_INC(tcps_sndtotal);
1423 }
1424 SCH_UNLOCK(sch);
1425 goto done;
1426 }
1427
1428 #ifdef TCP_RFC7413
1429 if (tfo_cookie_valid) {
1430 bzero(&scs, sizeof(scs));
1431 sc = &scs;
1432 goto skip_alloc;
1433 }
1434 #endif
1435
1436 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1437 if (sc == NULL) {
1438 /*
1439 * The zone allocator couldn't provide more entries.
1440 * Treat this as if the cache was full; drop the oldest
1441 * entry and insert the new one.
1442 */
1443 TCPSTAT_INC(tcps_sc_zonefail);
1444 if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL) {
1445 sch->sch_last_overflow = time_uptime;
1446 syncache_drop(sc, sch);
1447 }
1448 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
1449 if (sc == NULL) {
1450 if (V_tcp_syncookies) {
1451 bzero(&scs, sizeof(scs));
1452 sc = &scs;
1453 } else {
1454 SCH_UNLOCK(sch);
1455 if (ipopts)
1456 (void) m_free(ipopts);
1457 goto done;
1458 }
1459 }
1460 }
1461
1462 #ifdef TCP_RFC7413
1463 skip_alloc:
1464 if (!tfo_cookie_valid && tfo_response_cookie_valid)
1465 sc->sc_tfo_cookie = &tfo_response_cookie;
1466 #endif
1467
1468 /*
1469 * Fill in the syncache values.
1470 */
1471 #ifdef MAC
1472 sc->sc_label = maclabel;
1473 #endif
1474 sc->sc_cred = cred;
1475 cred = NULL;
1476 sc->sc_ipopts = ipopts;
1477 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
1478 #ifdef INET6
1479 if (!(inc->inc_flags & INC_ISIPV6))
1480 #endif
1481 {
1482 sc->sc_ip_tos = ip_tos;
1483 sc->sc_ip_ttl = ip_ttl;
1484 }
1485 #ifdef TCP_OFFLOAD
1486 sc->sc_tod = tod;
1487 sc->sc_todctx = todctx;
1488 #endif
1489 sc->sc_irs = th->th_seq;
1490 sc->sc_iss = arc4random();
1491 sc->sc_flags = 0;
1492 sc->sc_flowlabel = 0;
1493
1494 /*
1495 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN].
1496 * win was derived from socket earlier in the function.
1497 */
1498 win = imax(win, 0);
1499 win = imin(win, TCP_MAXWIN);
1500 sc->sc_wnd = win;
1501
1502 if (V_tcp_do_rfc1323) {
1503 /*
1504 * A timestamp received in a SYN makes
1505 * it ok to send timestamp requests and replies.
1506 */
1507 if (to->to_flags & TOF_TS) {
1508 sc->sc_tsreflect = to->to_tsval;
1509 sc->sc_ts = tcp_ts_getticks();
1510 sc->sc_flags |= SCF_TIMESTAMP;
1511 }
1512 if (to->to_flags & TOF_SCALE) {
1513 int wscale = 0;
1514
1515 /*
1516 * Pick the smallest possible scaling factor that
1517 * will still allow us to scale up to sb_max, aka
1518 * kern.ipc.maxsockbuf.
1519 *
1520 * We do this because there are broken firewalls that
1521 * will corrupt the window scale option, leading to
1522 * the other endpoint believing that our advertised
1523 * window is unscaled. At scale factors larger than
1524 * 5 the unscaled window will drop below 1500 bytes,
1525 * leading to serious problems when traversing these
1526 * broken firewalls.
1527 *
1528 * With the default maxsockbuf of 256K, a scale factor
1529 * of 3 will be chosen by this algorithm. Those who
1530 * choose a larger maxsockbuf should watch out
1531 * for the compatibility problems mentioned above.
1532 *
1533 * RFC1323: The Window field in a SYN (i.e., a <SYN>
1534 * or <SYN,ACK>) segment itself is never scaled.
1535 */
1536 while (wscale < TCP_MAX_WINSHIFT &&
1537 (TCP_MAXWIN << wscale) < sb_max)
1538 wscale++;
1539 sc->sc_requested_r_scale = wscale;
1540 sc->sc_requested_s_scale = to->to_wscale;
1541 sc->sc_flags |= SCF_WINSCALE;
1542 }
1543 }
1544 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1545 /*
1546 * If listening socket requested TCP digests, flag this in the
1547 * syncache so that syncache_respond() will do the right thing
1548 * with the SYN+ACK.
1549 */
1550 if (ltflags & TF_SIGNATURE)
1551 sc->sc_flags |= SCF_SIGNATURE;
1552 #endif /* TCP_SIGNATURE */
1553 if (to->to_flags & TOF_SACKPERM)
1554 sc->sc_flags |= SCF_SACK;
1555 if (to->to_flags & TOF_MSS)
1556 sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */
1557 if (ltflags & TF_NOOPT)
1558 sc->sc_flags |= SCF_NOOPT;
1559 if ((th->th_flags & (TH_ECE|TH_CWR)) && V_tcp_do_ecn)
1560 sc->sc_flags |= SCF_ECN;
1561
1562 if (V_tcp_syncookies)
1563 sc->sc_iss = syncookie_generate(sch, sc);
1564 #ifdef INET6
1565 if (autoflowlabel) {
1566 if (V_tcp_syncookies)
1567 sc->sc_flowlabel = sc->sc_iss;
1568 else
1569 sc->sc_flowlabel = ip6_randomflowlabel();
1570 sc->sc_flowlabel = htonl(sc->sc_flowlabel) & IPV6_FLOWLABEL_MASK;
1571 }
1572 #endif
1573 SCH_UNLOCK(sch);
1574
1575 #ifdef TCP_RFC7413
1576 if (tfo_cookie_valid) {
1577 syncache_tfo_expand(sc, lsop, m, tfo_response_cookie);
1578 /* INP_WUNLOCK(inp) will be performed by the called */
1579 rv = 1;
1580 goto tfo_done;
1581 }
1582 #endif
1583
1584 /*
1585 * Do a standard 3-way handshake.
1586 */
1587 if (syncache_respond(sc, sch, 0, m) == 0) {
1588 if (V_tcp_syncookies && V_tcp_syncookiesonly && sc != &scs)
1589 syncache_free(sc);
1590 else if (sc != &scs)
1591 syncache_insert(sc, sch); /* locks and unlocks sch */
1592 TCPSTAT_INC(tcps_sndacks);
1593 TCPSTAT_INC(tcps_sndtotal);
1594 } else {
1595 if (sc != &scs)
1596 syncache_free(sc);
1597 TCPSTAT_INC(tcps_sc_dropped);
1598 }
1599
1600 done:
1601 if (m) {
1602 *lsop = NULL;
1603 m_freem(m);
1604 }
1605 #ifdef TCP_RFC7413
1606 tfo_done:
1607 #endif
1608 if (cred != NULL)
1609 crfree(cred);
1610 #ifdef MAC
1611 if (sc == &scs)
1612 mac_syncache_destroy(&maclabel);
1613 #endif
1614 return (rv);
1615 }
1616
1617 /*
1618 * Send SYN|ACK to the peer. Either in response to the peer's SYN,
1619 * i.e. m0 != NULL, or upon 3WHS ACK timeout, i.e. m0 == NULL.
1620 */
1621 static int
1622 syncache_respond(struct syncache *sc, struct syncache_head *sch, int locked,
1623 const struct mbuf *m0)
1624 {
1625 struct ip *ip = NULL;
1626 struct mbuf *m;
1627 struct tcphdr *th = NULL;
1628 int optlen, error = 0; /* Make compiler happy */
1629 u_int16_t hlen, tlen, mssopt;
1630 struct tcpopt to;
1631 #ifdef INET6
1632 struct ip6_hdr *ip6 = NULL;
1633 #endif
1634 hlen =
1635 #ifdef INET6
1636 (sc->sc_inc.inc_flags & INC_ISIPV6) ? sizeof(struct ip6_hdr) :
1637 #endif
1638 sizeof(struct ip);
1639 tlen = hlen + sizeof(struct tcphdr);
1640
1641 /* Determine MSS we advertize to other end of connection. */
1642 mssopt = max(tcp_mssopt(&sc->sc_inc), V_tcp_minmss);
1643
1644 /* XXX: Assume that the entire packet will fit in a header mbuf. */
1645 KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN,
1646 ("syncache: mbuf too small"));
1647
1648 /* Create the IP+TCP header from scratch. */
1649 m = m_gethdr(M_NOWAIT, MT_DATA);
1650 if (m == NULL)
1651 return (ENOBUFS);
1652 #ifdef MAC
1653 mac_syncache_create_mbuf(sc->sc_label, m);
1654 #endif
1655 m->m_data += max_linkhdr;
1656 m->m_len = tlen;
1657 m->m_pkthdr.len = tlen;
1658 m->m_pkthdr.rcvif = NULL;
1659
1660 #ifdef INET6
1661 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1662 ip6 = mtod(m, struct ip6_hdr *);
1663 ip6->ip6_vfc = IPV6_VERSION;
1664 ip6->ip6_nxt = IPPROTO_TCP;
1665 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1666 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1667 ip6->ip6_plen = htons(tlen - hlen);
1668 /* ip6_hlim is set after checksum */
1669 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
1670 ip6->ip6_flow |= sc->sc_flowlabel;
1671
1672 th = (struct tcphdr *)(ip6 + 1);
1673 }
1674 #endif
1675 #if defined(INET6) && defined(INET)
1676 else
1677 #endif
1678 #ifdef INET
1679 {
1680 ip = mtod(m, struct ip *);
1681 ip->ip_v = IPVERSION;
1682 ip->ip_hl = sizeof(struct ip) >> 2;
1683 ip->ip_len = htons(tlen);
1684 ip->ip_id = 0;
1685 ip->ip_off = 0;
1686 ip->ip_sum = 0;
1687 ip->ip_p = IPPROTO_TCP;
1688 ip->ip_src = sc->sc_inc.inc_laddr;
1689 ip->ip_dst = sc->sc_inc.inc_faddr;
1690 ip->ip_ttl = sc->sc_ip_ttl;
1691 ip->ip_tos = sc->sc_ip_tos;
1692
1693 /*
1694 * See if we should do MTU discovery. Route lookups are
1695 * expensive, so we will only unset the DF bit if:
1696 *
1697 * 1) path_mtu_discovery is disabled
1698 * 2) the SCF_UNREACH flag has been set
1699 */
1700 if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1701 ip->ip_off |= htons(IP_DF);
1702
1703 th = (struct tcphdr *)(ip + 1);
1704 }
1705 #endif /* INET */
1706 th->th_sport = sc->sc_inc.inc_lport;
1707 th->th_dport = sc->sc_inc.inc_fport;
1708
1709 th->th_seq = htonl(sc->sc_iss);
1710 th->th_ack = htonl(sc->sc_irs + 1);
1711 th->th_off = sizeof(struct tcphdr) >> 2;
1712 th->th_x2 = 0;
1713 th->th_flags = TH_SYN|TH_ACK;
1714 th->th_win = htons(sc->sc_wnd);
1715 th->th_urp = 0;
1716
1717 if (sc->sc_flags & SCF_ECN) {
1718 th->th_flags |= TH_ECE;
1719 TCPSTAT_INC(tcps_ecn_shs);
1720 }
1721
1722 /* Tack on the TCP options. */
1723 if ((sc->sc_flags & SCF_NOOPT) == 0) {
1724 to.to_flags = 0;
1725
1726 to.to_mss = mssopt;
1727 to.to_flags = TOF_MSS;
1728 if (sc->sc_flags & SCF_WINSCALE) {
1729 to.to_wscale = sc->sc_requested_r_scale;
1730 to.to_flags |= TOF_SCALE;
1731 }
1732 if (sc->sc_flags & SCF_TIMESTAMP) {
1733 /* Virgin timestamp or TCP cookie enhanced one. */
1734 to.to_tsval = sc->sc_ts;
1735 to.to_tsecr = sc->sc_tsreflect;
1736 to.to_flags |= TOF_TS;
1737 }
1738 if (sc->sc_flags & SCF_SACK)
1739 to.to_flags |= TOF_SACKPERM;
1740 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1741 if (sc->sc_flags & SCF_SIGNATURE)
1742 to.to_flags |= TOF_SIGNATURE;
1743 #endif
1744 #ifdef TCP_RFC7413
1745 if (sc->sc_tfo_cookie) {
1746 to.to_flags |= TOF_FASTOPEN;
1747 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
1748 to.to_tfo_cookie = sc->sc_tfo_cookie;
1749 /* don't send cookie again when retransmitting response */
1750 sc->sc_tfo_cookie = NULL;
1751 }
1752 #endif
1753 optlen = tcp_addoptions(&to, (u_char *)(th + 1));
1754
1755 /* Adjust headers by option size. */
1756 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1757 m->m_len += optlen;
1758 m->m_pkthdr.len += optlen;
1759 #ifdef INET6
1760 if (sc->sc_inc.inc_flags & INC_ISIPV6)
1761 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen);
1762 else
1763 #endif
1764 ip->ip_len = htons(ntohs(ip->ip_len) + optlen);
1765 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1766 if (sc->sc_flags & SCF_SIGNATURE) {
1767 KASSERT(to.to_flags & TOF_SIGNATURE,
1768 ("tcp_addoptions() didn't set tcp_signature"));
1769
1770 /* NOTE: to.to_signature is inside of mbuf */
1771 if (!TCPMD5_ENABLED() ||
1772 TCPMD5_OUTPUT(m, th, to.to_signature) != 0) {
1773 m_freem(m);
1774 return (EACCES);
1775 }
1776 }
1777 #endif
1778 } else
1779 optlen = 0;
1780
1781 M_SETFIB(m, sc->sc_inc.inc_fibnum);
1782 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1783 /*
1784 * If we have peer's SYN and it has a flowid, then let's assign it to
1785 * our SYN|ACK. ip6_output() and ip_output() will not assign flowid
1786 * to SYN|ACK due to lack of inp here.
1787 */
1788 if (m0 != NULL && M_HASHTYPE_GET(m0) != M_HASHTYPE_NONE) {
1789 m->m_pkthdr.flowid = m0->m_pkthdr.flowid;
1790 M_HASHTYPE_SET(m, M_HASHTYPE_GET(m0));
1791 }
1792 #ifdef INET6
1793 if (sc->sc_inc.inc_flags & INC_ISIPV6) {
1794 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1795 th->th_sum = in6_cksum_pseudo(ip6, tlen + optlen - hlen,
1796 IPPROTO_TCP, 0);
1797 ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
1798 #ifdef TCP_OFFLOAD
1799 if (ADDED_BY_TOE(sc)) {
1800 struct toedev *tod = sc->sc_tod;
1801
1802 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1803
1804 return (error);
1805 }
1806 #endif
1807 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1808 }
1809 #endif
1810 #if defined(INET6) && defined(INET)
1811 else
1812 #endif
1813 #ifdef INET
1814 {
1815 m->m_pkthdr.csum_flags = CSUM_TCP;
1816 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1817 htons(tlen + optlen - hlen + IPPROTO_TCP));
1818 #ifdef TCP_OFFLOAD
1819 if (ADDED_BY_TOE(sc)) {
1820 struct toedev *tod = sc->sc_tod;
1821
1822 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m);
1823
1824 return (error);
1825 }
1826 #endif
1827 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL);
1828 }
1829 #endif
1830 return (error);
1831 }
1832
1833 /*
1834 * The purpose of syncookies is to handle spoofed SYN flooding DoS attacks
1835 * that exceed the capacity of the syncache by avoiding the storage of any
1836 * of the SYNs we receive. Syncookies defend against blind SYN flooding
1837 * attacks where the attacker does not have access to our responses.
1838 *
1839 * Syncookies encode and include all necessary information about the
1840 * connection setup within the SYN|ACK that we send back. That way we
1841 * can avoid keeping any local state until the ACK to our SYN|ACK returns
1842 * (if ever). Normally the syncache and syncookies are running in parallel
1843 * with the latter taking over when the former is exhausted. When matching
1844 * syncache entry is found the syncookie is ignored.
1845 *
1846 * The only reliable information persisting the 3WHS is our initial sequence
1847 * number ISS of 32 bits. Syncookies embed a cryptographically sufficient
1848 * strong hash (MAC) value and a few bits of TCP SYN options in the ISS
1849 * of our SYN|ACK. The MAC can be recomputed when the ACK to our SYN|ACK
1850 * returns and signifies a legitimate connection if it matches the ACK.
1851 *
1852 * The available space of 32 bits to store the hash and to encode the SYN
1853 * option information is very tight and we should have at least 24 bits for
1854 * the MAC to keep the number of guesses by blind spoofing reasonably high.
1855 *
1856 * SYN option information we have to encode to fully restore a connection:
1857 * MSS: is imporant to chose an optimal segment size to avoid IP level
1858 * fragmentation along the path. The common MSS values can be encoded
1859 * in a 3-bit table. Uncommon values are captured by the next lower value
1860 * in the table leading to a slight increase in packetization overhead.
1861 * WSCALE: is necessary to allow large windows to be used for high delay-
1862 * bandwidth product links. Not scaling the window when it was initially
1863 * negotiated is bad for performance as lack of scaling further decreases
1864 * the apparent available send window. We only need to encode the WSCALE
1865 * we received from the remote end. Our end can be recalculated at any
1866 * time. The common WSCALE values can be encoded in a 3-bit table.
1867 * Uncommon values are captured by the next lower value in the table
1868 * making us under-estimate the available window size halving our
1869 * theoretically possible maximum throughput for that connection.
1870 * SACK: Greatly assists in packet loss recovery and requires 1 bit.
1871 * TIMESTAMP and SIGNATURE is not encoded because they are permanent options
1872 * that are included in all segments on a connection. We enable them when
1873 * the ACK has them.
1874 *
1875 * Security of syncookies and attack vectors:
1876 *
1877 * The MAC is computed over (faddr||laddr||fport||lport||irs||flags||secmod)
1878 * together with the gloabl secret to make it unique per connection attempt.
1879 * Thus any change of any of those parameters results in a different MAC output
1880 * in an unpredictable way unless a collision is encountered. 24 bits of the
1881 * MAC are embedded into the ISS.
1882 *
1883 * To prevent replay attacks two rotating global secrets are updated with a
1884 * new random value every 15 seconds. The life-time of a syncookie is thus
1885 * 15-30 seconds.
1886 *
1887 * Vector 1: Attacking the secret. This requires finding a weakness in the
1888 * MAC itself or the way it is used here. The attacker can do a chosen plain
1889 * text attack by varying and testing the all parameters under his control.
1890 * The strength depends on the size and randomness of the secret, and the
1891 * cryptographic security of the MAC function. Due to the constant updating
1892 * of the secret the attacker has at most 29.999 seconds to find the secret
1893 * and launch spoofed connections. After that he has to start all over again.
1894 *
1895 * Vector 2: Collision attack on the MAC of a single ACK. With a 24 bit MAC
1896 * size an average of 4,823 attempts are required for a 50% chance of success
1897 * to spoof a single syncookie (birthday collision paradox). However the
1898 * attacker is blind and doesn't know if one of his attempts succeeded unless
1899 * he has a side channel to interfere success from. A single connection setup
1900 * success average of 90% requires 8,790 packets, 99.99% requires 17,578 packets.
1901 * This many attempts are required for each one blind spoofed connection. For
1902 * every additional spoofed connection he has to launch another N attempts.
1903 * Thus for a sustained rate 100 spoofed connections per second approximately
1904 * 1,800,000 packets per second would have to be sent.
1905 *
1906 * NB: The MAC function should be fast so that it doesn't become a CPU
1907 * exhaustion attack vector itself.
1908 *
1909 * References:
1910 * RFC4987 TCP SYN Flooding Attacks and Common Mitigations
1911 * SYN cookies were first proposed by cryptographer Dan J. Bernstein in 1996
1912 * http://cr.yp.to/syncookies.html (overview)
1913 * http://cr.yp.to/syncookies/archive (details)
1914 *
1915 *
1916 * Schematic construction of a syncookie enabled Initial Sequence Number:
1917 * 0 1 2 3
1918 * 12345678901234567890123456789012
1919 * |xxxxxxxxxxxxxxxxxxxxxxxxWWWMMMSP|
1920 *
1921 * x 24 MAC (truncated)
1922 * W 3 Send Window Scale index
1923 * M 3 MSS index
1924 * S 1 SACK permitted
1925 * P 1 Odd/even secret
1926 */
1927
1928 /*
1929 * Distribution and probability of certain MSS values. Those in between are
1930 * rounded down to the next lower one.
1931 * [An Analysis of TCP Maximum Segment Sizes, S. Alcock and R. Nelson, 2011]
1932 * .2% .3% 5% 7% 7% 20% 15% 45%
1933 */
1934 static int tcp_sc_msstab[] = { 216, 536, 1200, 1360, 1400, 1440, 1452, 1460 };
1935
1936 /*
1937 * Distribution and probability of certain WSCALE values. We have to map the
1938 * (send) window scale (shift) option with a range of 0-14 from 4 bits into 3
1939 * bits based on prevalence of certain values. Where we don't have an exact
1940 * match for are rounded down to the next lower one letting us under-estimate
1941 * the true available window. At the moment this would happen only for the
1942 * very uncommon values 3, 5 and those above 8 (more than 16MB socket buffer
1943 * and window size). The absence of the WSCALE option (no scaling in either
1944 * direction) is encoded with index zero.
1945 * [WSCALE values histograms, Allman, 2012]
1946 * X 10 10 35 5 6 14 10% by host
1947 * X 11 4 5 5 18 49 3% by connections
1948 */
1949 static int tcp_sc_wstab[] = { 0, 0, 1, 2, 4, 6, 7, 8 };
1950
1951 /*
1952 * Compute the MAC for the SYN cookie. SIPHASH-2-4 is chosen for its speed
1953 * and good cryptographic properties.
1954 */
1955 static uint32_t
1956 syncookie_mac(struct in_conninfo *inc, tcp_seq irs, uint8_t flags,
1957 uint8_t *secbits, uintptr_t secmod)
1958 {
1959 SIPHASH_CTX ctx;
1960 uint32_t siphash[2];
1961
1962 SipHash24_Init(&ctx);
1963 SipHash_SetKey(&ctx, secbits);
1964 switch (inc->inc_flags & INC_ISIPV6) {
1965 #ifdef INET
1966 case 0:
1967 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(inc->inc_faddr));
1968 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(inc->inc_laddr));
1969 break;
1970 #endif
1971 #ifdef INET6
1972 case INC_ISIPV6:
1973 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(inc->inc6_faddr));
1974 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(inc->inc6_laddr));
1975 break;
1976 #endif
1977 }
1978 SipHash_Update(&ctx, &inc->inc_fport, sizeof(inc->inc_fport));
1979 SipHash_Update(&ctx, &inc->inc_lport, sizeof(inc->inc_lport));
1980 SipHash_Update(&ctx, &irs, sizeof(irs));
1981 SipHash_Update(&ctx, &flags, sizeof(flags));
1982 SipHash_Update(&ctx, &secmod, sizeof(secmod));
1983 SipHash_Final((u_int8_t *)&siphash, &ctx);
1984
1985 return (siphash[0] ^ siphash[1]);
1986 }
1987
1988 static tcp_seq
1989 syncookie_generate(struct syncache_head *sch, struct syncache *sc)
1990 {
1991 u_int i, secbit, wscale;
1992 uint32_t iss, hash;
1993 uint8_t *secbits;
1994 union syncookie cookie;
1995
1996 SCH_LOCK_ASSERT(sch);
1997
1998 cookie.cookie = 0;
1999
2000 /* Map our computed MSS into the 3-bit index. */
2001 for (i = nitems(tcp_sc_msstab) - 1;
2002 tcp_sc_msstab[i] > sc->sc_peer_mss && i > 0;
2003 i--)
2004 ;
2005 cookie.flags.mss_idx = i;
2006
2007 /*
2008 * Map the send window scale into the 3-bit index but only if
2009 * the wscale option was received.
2010 */
2011 if (sc->sc_flags & SCF_WINSCALE) {
2012 wscale = sc->sc_requested_s_scale;
2013 for (i = nitems(tcp_sc_wstab) - 1;
2014 tcp_sc_wstab[i] > wscale && i > 0;
2015 i--)
2016 ;
2017 cookie.flags.wscale_idx = i;
2018 }
2019
2020 /* Can we do SACK? */
2021 if (sc->sc_flags & SCF_SACK)
2022 cookie.flags.sack_ok = 1;
2023
2024 /* Which of the two secrets to use. */
2025 secbit = sch->sch_sc->secret.oddeven & 0x1;
2026 cookie.flags.odd_even = secbit;
2027
2028 secbits = sch->sch_sc->secret.key[secbit];
2029 hash = syncookie_mac(&sc->sc_inc, sc->sc_irs, cookie.cookie, secbits,
2030 (uintptr_t)sch);
2031
2032 /*
2033 * Put the flags into the hash and XOR them to get better ISS number
2034 * variance. This doesn't enhance the cryptographic strength and is
2035 * done to prevent the 8 cookie bits from showing up directly on the
2036 * wire.
2037 */
2038 iss = hash & ~0xff;
2039 iss |= cookie.cookie ^ (hash >> 24);
2040
2041 /* Randomize the timestamp. */
2042 if (sc->sc_flags & SCF_TIMESTAMP) {
2043 sc->sc_ts = arc4random();
2044 sc->sc_tsoff = sc->sc_ts - tcp_ts_getticks();
2045 }
2046
2047 TCPSTAT_INC(tcps_sc_sendcookie);
2048 return (iss);
2049 }
2050
2051 static struct syncache *
2052 syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
2053 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
2054 struct socket *lso)
2055 {
2056 uint32_t hash;
2057 uint8_t *secbits;
2058 tcp_seq ack, seq;
2059 int wnd, wscale = 0;
2060 union syncookie cookie;
2061
2062 SCH_LOCK_ASSERT(sch);
2063
2064 /*
2065 * Pull information out of SYN-ACK/ACK and revert sequence number
2066 * advances.
2067 */
2068 ack = th->th_ack - 1;
2069 seq = th->th_seq - 1;
2070
2071 /*
2072 * Unpack the flags containing enough information to restore the
2073 * connection.
2074 */
2075 cookie.cookie = (ack & 0xff) ^ (ack >> 24);
2076
2077 /* Which of the two secrets to use. */
2078 secbits = sch->sch_sc->secret.key[cookie.flags.odd_even];
2079
2080 hash = syncookie_mac(inc, seq, cookie.cookie, secbits, (uintptr_t)sch);
2081
2082 /* The recomputed hash matches the ACK if this was a genuine cookie. */
2083 if ((ack & ~0xff) != (hash & ~0xff))
2084 return (NULL);
2085
2086 /* Fill in the syncache values. */
2087 sc->sc_flags = 0;
2088 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
2089 sc->sc_ipopts = NULL;
2090
2091 sc->sc_irs = seq;
2092 sc->sc_iss = ack;
2093
2094 switch (inc->inc_flags & INC_ISIPV6) {
2095 #ifdef INET
2096 case 0:
2097 sc->sc_ip_ttl = sotoinpcb(lso)->inp_ip_ttl;
2098 sc->sc_ip_tos = sotoinpcb(lso)->inp_ip_tos;
2099 break;
2100 #endif
2101 #ifdef INET6
2102 case INC_ISIPV6:
2103 if (sotoinpcb(lso)->inp_flags & IN6P_AUTOFLOWLABEL)
2104 sc->sc_flowlabel = sc->sc_iss & IPV6_FLOWLABEL_MASK;
2105 break;
2106 #endif
2107 }
2108
2109 sc->sc_peer_mss = tcp_sc_msstab[cookie.flags.mss_idx];
2110
2111 /* We can simply recompute receive window scale we sent earlier. */
2112 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < sb_max)
2113 wscale++;
2114
2115 /* Only use wscale if it was enabled in the orignal SYN. */
2116 if (cookie.flags.wscale_idx > 0) {
2117 sc->sc_requested_r_scale = wscale;
2118 sc->sc_requested_s_scale = tcp_sc_wstab[cookie.flags.wscale_idx];
2119 sc->sc_flags |= SCF_WINSCALE;
2120 }
2121
2122 wnd = sbspace(&lso->so_rcv);
2123 wnd = imax(wnd, 0);
2124 wnd = imin(wnd, TCP_MAXWIN);
2125 sc->sc_wnd = wnd;
2126
2127 if (cookie.flags.sack_ok)
2128 sc->sc_flags |= SCF_SACK;
2129
2130 if (to->to_flags & TOF_TS) {
2131 sc->sc_flags |= SCF_TIMESTAMP;
2132 sc->sc_tsreflect = to->to_tsval;
2133 sc->sc_ts = to->to_tsecr;
2134 sc->sc_tsoff = to->to_tsecr - tcp_ts_getticks();
2135 }
2136
2137 if (to->to_flags & TOF_SIGNATURE)
2138 sc->sc_flags |= SCF_SIGNATURE;
2139
2140 sc->sc_rxmits = 0;
2141
2142 TCPSTAT_INC(tcps_sc_recvcookie);
2143 return (sc);
2144 }
2145
2146 #ifdef INVARIANTS
2147 static int
2148 syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch,
2149 struct syncache *sc, struct tcphdr *th, struct tcpopt *to,
2150 struct socket *lso)
2151 {
2152 struct syncache scs, *scx;
2153 char *s;
2154
2155 bzero(&scs, sizeof(scs));
2156 scx = syncookie_lookup(inc, sch, &scs, th, to, lso);
2157
2158 if ((s = tcp_log_addrs(inc, th, NULL, NULL)) == NULL)
2159 return (0);
2160
2161 if (scx != NULL) {
2162 if (sc->sc_peer_mss != scx->sc_peer_mss)
2163 log(LOG_DEBUG, "%s; %s: mss different %i vs %i\n",
2164 s, __func__, sc->sc_peer_mss, scx->sc_peer_mss);
2165
2166 if (sc->sc_requested_r_scale != scx->sc_requested_r_scale)
2167 log(LOG_DEBUG, "%s; %s: rwscale different %i vs %i\n",
2168 s, __func__, sc->sc_requested_r_scale,
2169 scx->sc_requested_r_scale);
2170
2171 if (sc->sc_requested_s_scale != scx->sc_requested_s_scale)
2172 log(LOG_DEBUG, "%s; %s: swscale different %i vs %i\n",
2173 s, __func__, sc->sc_requested_s_scale,
2174 scx->sc_requested_s_scale);
2175
2176 if ((sc->sc_flags & SCF_SACK) != (scx->sc_flags & SCF_SACK))
2177 log(LOG_DEBUG, "%s; %s: SACK different\n", s, __func__);
2178 }
2179
2180 if (s != NULL)
2181 free(s, M_TCPLOG);
2182 return (0);
2183 }
2184 #endif /* INVARIANTS */
2185
2186 static void
2187 syncookie_reseed(void *arg)
2188 {
2189 struct tcp_syncache *sc = arg;
2190 uint8_t *secbits;
2191 int secbit;
2192
2193 /*
2194 * Reseeding the secret doesn't have to be protected by a lock.
2195 * It only must be ensured that the new random values are visible
2196 * to all CPUs in a SMP environment. The atomic with release
2197 * semantics ensures that.
2198 */
2199 secbit = (sc->secret.oddeven & 0x1) ? 0 : 1;
2200 secbits = sc->secret.key[secbit];
2201 arc4rand(secbits, SYNCOOKIE_SECRET_SIZE, 0);
2202 atomic_add_rel_int(&sc->secret.oddeven, 1);
2203
2204 /* Reschedule ourself. */
2205 callout_schedule(&sc->secret.reseed, SYNCOOKIE_LIFETIME * hz);
2206 }
2207
2208 /*
2209 * Exports the syncache entries to userland so that netstat can display
2210 * them alongside the other sockets. This function is intended to be
2211 * called only from tcp_pcblist.
2212 *
2213 * Due to concurrency on an active system, the number of pcbs exported
2214 * may have no relation to max_pcbs. max_pcbs merely indicates the
2215 * amount of space the caller allocated for this function to use.
2216 */
2217 int
2218 syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported)
2219 {
2220 struct xtcpcb xt;
2221 struct syncache *sc;
2222 struct syncache_head *sch;
2223 int count, error, i;
2224
2225 for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
2226 sch = &V_tcp_syncache.hashbase[i];
2227 SCH_LOCK(sch);
2228 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
2229 if (count >= max_pcbs) {
2230 SCH_UNLOCK(sch);
2231 goto exit;
2232 }
2233 if (cr_cansee(req->td->td_ucred, sc->sc_cred) != 0)
2234 continue;
2235 bzero(&xt, sizeof(xt));
2236 xt.xt_len = sizeof(xt);
2237 if (sc->sc_inc.inc_flags & INC_ISIPV6)
2238 xt.xt_inp.inp_vflag = INP_IPV6;
2239 else
2240 xt.xt_inp.inp_vflag = INP_IPV4;
2241 bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc, sizeof (struct in_conninfo));
2242 xt.xt_tp.t_inpcb = &xt.xt_inp;
2243 xt.xt_tp.t_state = TCPS_SYN_RECEIVED;
2244 xt.xt_socket.xso_protocol = IPPROTO_TCP;
2245 xt.xt_socket.xso_len = sizeof (struct xsocket);
2246 xt.xt_socket.so_type = SOCK_STREAM;
2247 xt.xt_socket.so_state = SS_ISCONNECTING;
2248 error = SYSCTL_OUT(req, &xt, sizeof xt);
2249 if (error) {
2250 SCH_UNLOCK(sch);
2251 goto exit;
2252 }
2253 count++;
2254 }
2255 SCH_UNLOCK(sch);
2256 }
2257 exit:
2258 *pcbs_exported = count;
2259 return error;
2260 }
Cache object: 4892341395ba470649a7abe454e27abb
|