1 /*-
2 * Copyright (c) 2001 Networks Associates Technology, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Jonathan Lemon
6 * and NAI Labs, the Security Research Division of Network Associates, Inc.
7 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
8 * DARPA CHATS research program.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote
19 * products derived from this software without specific prior written
20 * permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $FreeBSD: releng/5.1/sys/netinet/tcp_syncache.c 111405 2003-02-24 03:15:48Z silby $
35 */
36
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 #include "opt_mac.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/malloc.h>
46 #include <sys/mac.h>
47 #include <sys/mbuf.h>
48 #include <sys/md5.h>
49 #include <sys/proc.h> /* for proc0 declaration */
50 #include <sys/random.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53
54 #include <net/if.h>
55 #include <net/route.h>
56
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 #include <netinet/in_var.h>
61 #include <netinet/in_pcb.h>
62 #include <netinet/ip_var.h>
63 #ifdef INET6
64 #include <netinet/ip6.h>
65 #include <netinet/icmp6.h>
66 #include <netinet6/nd6.h>
67 #include <netinet6/ip6_var.h>
68 #include <netinet6/in6_pcb.h>
69 #endif
70 #include <netinet/tcp.h>
71 #include <netinet/tcp_fsm.h>
72 #include <netinet/tcp_seq.h>
73 #include <netinet/tcp_timer.h>
74 #include <netinet/tcp_var.h>
75 #ifdef INET6
76 #include <netinet6/tcp6_var.h>
77 #endif
78
79 #ifdef IPSEC
80 #include <netinet6/ipsec.h>
81 #ifdef INET6
82 #include <netinet6/ipsec6.h>
83 #endif
84 #endif /*IPSEC*/
85
86 #ifdef FAST_IPSEC
87 #include <netipsec/ipsec.h>
88 #ifdef INET6
89 #include <netipsec/ipsec6.h>
90 #endif
91 #include <netipsec/key.h>
92 #define IPSEC
93 #endif /*FAST_IPSEC*/
94
95 #include <machine/in_cksum.h>
96 #include <vm/uma.h>
97
98 static int tcp_syncookies = 1;
99 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
100 &tcp_syncookies, 0,
101 "Use TCP SYN cookies if the syncache overflows");
102
103 static void syncache_drop(struct syncache *, struct syncache_head *);
104 static void syncache_free(struct syncache *);
105 static void syncache_insert(struct syncache *, struct syncache_head *);
106 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **);
107 static int syncache_respond(struct syncache *, struct mbuf *);
108 static struct socket *syncache_socket(struct syncache *, struct socket *,
109 struct mbuf *m);
110 static void syncache_timer(void *);
111 static u_int32_t syncookie_generate(struct syncache *);
112 static struct syncache *syncookie_lookup(struct in_conninfo *,
113 struct tcphdr *, struct socket *);
114
115 /*
116 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
117 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds,
118 * the odds are that the user has given up attempting to connect by then.
119 */
120 #define SYNCACHE_MAXREXMTS 3
121
122 /* Arbitrary values */
123 #define TCP_SYNCACHE_HASHSIZE 512
124 #define TCP_SYNCACHE_BUCKETLIMIT 30
125
126 struct tcp_syncache {
127 struct syncache_head *hashbase;
128 uma_zone_t zone;
129 u_int hashsize;
130 u_int hashmask;
131 u_int bucket_limit;
132 u_int cache_count;
133 u_int cache_limit;
134 u_int rexmt_limit;
135 u_int hash_secret;
136 u_int next_reseed;
137 TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1];
138 struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1];
139 };
140 static struct tcp_syncache tcp_syncache;
141
142 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache");
143
144 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RD,
145 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache");
146
147 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RD,
148 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache");
149
150 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD,
151 &tcp_syncache.cache_count, 0, "Current number of entries in syncache");
152
153 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RD,
154 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable");
155
156 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
157 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions");
158
159 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
160
161 #define SYNCACHE_HASH(inc, mask) \
162 ((tcp_syncache.hash_secret ^ \
163 (inc)->inc_faddr.s_addr ^ \
164 ((inc)->inc_faddr.s_addr >> 16) ^ \
165 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
166
167 #define SYNCACHE_HASH6(inc, mask) \
168 ((tcp_syncache.hash_secret ^ \
169 (inc)->inc6_faddr.s6_addr32[0] ^ \
170 (inc)->inc6_faddr.s6_addr32[3] ^ \
171 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
172
173 #define ENDPTS_EQ(a, b) ( \
174 (a)->ie_fport == (b)->ie_fport && \
175 (a)->ie_lport == (b)->ie_lport && \
176 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \
177 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \
178 )
179
180 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
181
182 #define SYNCACHE_TIMEOUT(sc, slot) do { \
183 sc->sc_rxtslot = (slot); \
184 sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[(slot)]; \
185 TAILQ_INSERT_TAIL(&tcp_syncache.timerq[(slot)], sc, sc_timerq); \
186 if (!callout_active(&tcp_syncache.tt_timerq[(slot)])) \
187 callout_reset(&tcp_syncache.tt_timerq[(slot)], \
188 TCPTV_RTOBASE * tcp_backoff[(slot)], \
189 syncache_timer, (void *)((intptr_t)(slot))); \
190 } while (0)
191
192 static void
193 syncache_free(struct syncache *sc)
194 {
195 struct rtentry *rt;
196
197 if (sc->sc_ipopts)
198 (void) m_free(sc->sc_ipopts);
199 #ifdef INET6
200 if (sc->sc_inc.inc_isipv6)
201 rt = sc->sc_route6.ro_rt;
202 else
203 #endif
204 rt = sc->sc_route.ro_rt;
205 if (rt != NULL) {
206 /*
207 * If this is the only reference to a protocol cloned
208 * route, remove it immediately.
209 */
210 if (rt->rt_flags & RTF_WASCLONED &&
211 (sc->sc_flags & SCF_KEEPROUTE) == 0 &&
212 rt->rt_refcnt == 1)
213 rtrequest(RTM_DELETE, rt_key(rt),
214 rt->rt_gateway, rt_mask(rt),
215 rt->rt_flags, NULL);
216 RTFREE(rt);
217 }
218 uma_zfree(tcp_syncache.zone, sc);
219 }
220
221 void
222 syncache_init(void)
223 {
224 int i;
225
226 tcp_syncache.cache_count = 0;
227 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
228 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
229 tcp_syncache.cache_limit =
230 tcp_syncache.hashsize * tcp_syncache.bucket_limit;
231 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
232 tcp_syncache.next_reseed = 0;
233 tcp_syncache.hash_secret = arc4random();
234
235 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
236 &tcp_syncache.hashsize);
237 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
238 &tcp_syncache.cache_limit);
239 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
240 &tcp_syncache.bucket_limit);
241 if (!powerof2(tcp_syncache.hashsize)) {
242 printf("WARNING: syncache hash size is not a power of 2.\n");
243 tcp_syncache.hashsize = 512; /* safe default */
244 }
245 tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
246
247 /* Allocate the hash table. */
248 MALLOC(tcp_syncache.hashbase, struct syncache_head *,
249 tcp_syncache.hashsize * sizeof(struct syncache_head),
250 M_SYNCACHE, M_WAITOK);
251
252 /* Initialize the hash buckets. */
253 for (i = 0; i < tcp_syncache.hashsize; i++) {
254 TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
255 tcp_syncache.hashbase[i].sch_length = 0;
256 }
257
258 /* Initialize the timer queues. */
259 for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) {
260 TAILQ_INIT(&tcp_syncache.timerq[i]);
261 callout_init(&tcp_syncache.tt_timerq[i], 0);
262 }
263
264 /*
265 * Allocate the syncache entries. Allow the zone to allocate one
266 * more entry than cache limit, so a new entry can bump out an
267 * older one.
268 */
269 tcp_syncache.cache_limit -= 1;
270 tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
271 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
272 uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
273 }
274
275 static void
276 syncache_insert(sc, sch)
277 struct syncache *sc;
278 struct syncache_head *sch;
279 {
280 struct syncache *sc2;
281 int s, i;
282
283 /*
284 * Make sure that we don't overflow the per-bucket
285 * limit or the total cache size limit.
286 */
287 s = splnet();
288 if (sch->sch_length >= tcp_syncache.bucket_limit) {
289 /*
290 * The bucket is full, toss the oldest element.
291 */
292 sc2 = TAILQ_FIRST(&sch->sch_bucket);
293 sc2->sc_tp->ts_recent = ticks;
294 syncache_drop(sc2, sch);
295 tcpstat.tcps_sc_bucketoverflow++;
296 } else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) {
297 /*
298 * The cache is full. Toss the oldest entry in the
299 * entire cache. This is the front entry in the
300 * first non-empty timer queue with the largest
301 * timeout value.
302 */
303 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
304 sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]);
305 if (sc2 != NULL)
306 break;
307 }
308 sc2->sc_tp->ts_recent = ticks;
309 syncache_drop(sc2, NULL);
310 tcpstat.tcps_sc_cacheoverflow++;
311 }
312
313 /* Initialize the entry's timer. */
314 SYNCACHE_TIMEOUT(sc, 0);
315
316 /* Put it into the bucket. */
317 TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash);
318 sch->sch_length++;
319 tcp_syncache.cache_count++;
320 tcpstat.tcps_sc_added++;
321 splx(s);
322 }
323
324 static void
325 syncache_drop(sc, sch)
326 struct syncache *sc;
327 struct syncache_head *sch;
328 {
329 int s;
330
331 if (sch == NULL) {
332 #ifdef INET6
333 if (sc->sc_inc.inc_isipv6) {
334 sch = &tcp_syncache.hashbase[
335 SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)];
336 } else
337 #endif
338 {
339 sch = &tcp_syncache.hashbase[
340 SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)];
341 }
342 }
343
344 s = splnet();
345
346 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
347 sch->sch_length--;
348 tcp_syncache.cache_count--;
349
350 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq);
351 if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot]))
352 callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]);
353 splx(s);
354
355 syncache_free(sc);
356 }
357
358 /*
359 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
360 * If we have retransmitted an entry the maximum number of times, expire it.
361 */
362 static void
363 syncache_timer(xslot)
364 void *xslot;
365 {
366 intptr_t slot = (intptr_t)xslot;
367 struct syncache *sc, *nsc;
368 struct inpcb *inp;
369 int s;
370
371 s = splnet();
372 INP_INFO_WLOCK(&tcbinfo);
373 if (callout_pending(&tcp_syncache.tt_timerq[slot]) ||
374 !callout_active(&tcp_syncache.tt_timerq[slot])) {
375 INP_INFO_WUNLOCK(&tcbinfo);
376 splx(s);
377 return;
378 }
379 callout_deactivate(&tcp_syncache.tt_timerq[slot]);
380
381 nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]);
382 while (nsc != NULL) {
383 if (ticks < nsc->sc_rxttime)
384 break;
385 sc = nsc;
386 inp = sc->sc_tp->t_inpcb;
387 if (slot == SYNCACHE_MAXREXMTS ||
388 slot >= tcp_syncache.rexmt_limit ||
389 inp == NULL || inp->inp_gencnt != sc->sc_inp_gencnt) {
390 nsc = TAILQ_NEXT(sc, sc_timerq);
391 syncache_drop(sc, NULL);
392 tcpstat.tcps_sc_stale++;
393 continue;
394 }
395 /*
396 * syncache_respond() may call back into the syncache to
397 * to modify another entry, so do not obtain the next
398 * entry on the timer chain until it has completed.
399 */
400 (void) syncache_respond(sc, NULL);
401 nsc = TAILQ_NEXT(sc, sc_timerq);
402 tcpstat.tcps_sc_retransmitted++;
403 TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq);
404 SYNCACHE_TIMEOUT(sc, slot + 1);
405 }
406 if (nsc != NULL)
407 callout_reset(&tcp_syncache.tt_timerq[slot],
408 nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot));
409 INP_INFO_WUNLOCK(&tcbinfo);
410 splx(s);
411 }
412
413 /*
414 * Find an entry in the syncache.
415 */
416 struct syncache *
417 syncache_lookup(inc, schp)
418 struct in_conninfo *inc;
419 struct syncache_head **schp;
420 {
421 struct syncache *sc;
422 struct syncache_head *sch;
423 int s;
424
425 #ifdef INET6
426 if (inc->inc_isipv6) {
427 sch = &tcp_syncache.hashbase[
428 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
429 *schp = sch;
430 s = splnet();
431 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
432 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) {
433 splx(s);
434 return (sc);
435 }
436 }
437 splx(s);
438 } else
439 #endif
440 {
441 sch = &tcp_syncache.hashbase[
442 SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
443 *schp = sch;
444 s = splnet();
445 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
446 #ifdef INET6
447 if (sc->sc_inc.inc_isipv6)
448 continue;
449 #endif
450 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) {
451 splx(s);
452 return (sc);
453 }
454 }
455 splx(s);
456 }
457 return (NULL);
458 }
459
460 /*
461 * This function is called when we get a RST for a
462 * non-existent connection, so that we can see if the
463 * connection is in the syn cache. If it is, zap it.
464 */
465 void
466 syncache_chkrst(inc, th)
467 struct in_conninfo *inc;
468 struct tcphdr *th;
469 {
470 struct syncache *sc;
471 struct syncache_head *sch;
472
473 sc = syncache_lookup(inc, &sch);
474 if (sc == NULL)
475 return;
476 /*
477 * If the RST bit is set, check the sequence number to see
478 * if this is a valid reset segment.
479 * RFC 793 page 37:
480 * In all states except SYN-SENT, all reset (RST) segments
481 * are validated by checking their SEQ-fields. A reset is
482 * valid if its sequence number is in the window.
483 *
484 * The sequence number in the reset segment is normally an
485 * echo of our outgoing acknowlegement numbers, but some hosts
486 * send a reset with the sequence number at the rightmost edge
487 * of our receive window, and we have to handle this case.
488 */
489 if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
490 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
491 syncache_drop(sc, sch);
492 tcpstat.tcps_sc_reset++;
493 }
494 }
495
496 void
497 syncache_badack(inc)
498 struct in_conninfo *inc;
499 {
500 struct syncache *sc;
501 struct syncache_head *sch;
502
503 sc = syncache_lookup(inc, &sch);
504 if (sc != NULL) {
505 syncache_drop(sc, sch);
506 tcpstat.tcps_sc_badack++;
507 }
508 }
509
510 void
511 syncache_unreach(inc, th)
512 struct in_conninfo *inc;
513 struct tcphdr *th;
514 {
515 struct syncache *sc;
516 struct syncache_head *sch;
517
518 /* we are called at splnet() here */
519 sc = syncache_lookup(inc, &sch);
520 if (sc == NULL)
521 return;
522
523 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
524 if (ntohl(th->th_seq) != sc->sc_iss)
525 return;
526
527 /*
528 * If we've rertransmitted 3 times and this is our second error,
529 * we remove the entry. Otherwise, we allow it to continue on.
530 * This prevents us from incorrectly nuking an entry during a
531 * spurious network outage.
532 *
533 * See tcp_notify().
534 */
535 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) {
536 sc->sc_flags |= SCF_UNREACH;
537 return;
538 }
539 syncache_drop(sc, sch);
540 tcpstat.tcps_sc_unreach++;
541 }
542
543 /*
544 * Build a new TCP socket structure from a syncache entry.
545 */
546 static struct socket *
547 syncache_socket(sc, lso, m)
548 struct syncache *sc;
549 struct socket *lso;
550 struct mbuf *m;
551 {
552 struct inpcb *inp = NULL;
553 struct socket *so;
554 struct tcpcb *tp;
555
556 /*
557 * Ok, create the full blown connection, and set things up
558 * as they would have been set up if we had created the
559 * connection when the SYN arrived. If we can't create
560 * the connection, abort it.
561 */
562 so = sonewconn(lso, SS_ISCONNECTED);
563 if (so == NULL) {
564 /*
565 * Drop the connection; we will send a RST if the peer
566 * retransmits the ACK,
567 */
568 tcpstat.tcps_listendrop++;
569 goto abort;
570 }
571 #ifdef MAC
572 mac_set_socket_peer_from_mbuf(m, so);
573 #endif
574
575 inp = sotoinpcb(so);
576
577 /*
578 * Insert new socket into hash list.
579 */
580 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6;
581 #ifdef INET6
582 if (sc->sc_inc.inc_isipv6) {
583 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
584 } else {
585 inp->inp_vflag &= ~INP_IPV6;
586 inp->inp_vflag |= INP_IPV4;
587 #endif
588 inp->inp_laddr = sc->sc_inc.inc_laddr;
589 #ifdef INET6
590 }
591 #endif
592 inp->inp_lport = sc->sc_inc.inc_lport;
593 if (in_pcbinshash(inp) != 0) {
594 /*
595 * Undo the assignments above if we failed to
596 * put the PCB on the hash lists.
597 */
598 #ifdef INET6
599 if (sc->sc_inc.inc_isipv6)
600 inp->in6p_laddr = in6addr_any;
601 else
602 #endif
603 inp->inp_laddr.s_addr = INADDR_ANY;
604 inp->inp_lport = 0;
605 goto abort;
606 }
607 #ifdef IPSEC
608 /* copy old policy into new socket's */
609 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
610 printf("syncache_expand: could not copy policy\n");
611 #endif
612 #ifdef INET6
613 if (sc->sc_inc.inc_isipv6) {
614 struct inpcb *oinp = sotoinpcb(lso);
615 struct in6_addr laddr6;
616 struct sockaddr_in6 *sin6;
617 /*
618 * Inherit socket options from the listening socket.
619 * Note that in6p_inputopts are not (and should not be)
620 * copied, since it stores previously received options and is
621 * used to detect if each new option is different than the
622 * previous one and hence should be passed to a user.
623 * If we copied in6p_inputopts, a user would not be able to
624 * receive options just after calling the accept system call.
625 */
626 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
627 if (oinp->in6p_outputopts)
628 inp->in6p_outputopts =
629 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
630 inp->in6p_route = sc->sc_route6;
631 sc->sc_route6.ro_rt = NULL;
632
633 MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6,
634 M_SONAME, M_NOWAIT | M_ZERO);
635 if (sin6 == NULL)
636 goto abort;
637 sin6->sin6_family = AF_INET6;
638 sin6->sin6_len = sizeof(*sin6);
639 sin6->sin6_addr = sc->sc_inc.inc6_faddr;
640 sin6->sin6_port = sc->sc_inc.inc_fport;
641 laddr6 = inp->in6p_laddr;
642 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
643 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
644 if (in6_pcbconnect(inp, (struct sockaddr *)sin6, &thread0)) {
645 inp->in6p_laddr = laddr6;
646 FREE(sin6, M_SONAME);
647 goto abort;
648 }
649 FREE(sin6, M_SONAME);
650 } else
651 #endif
652 {
653 struct in_addr laddr;
654 struct sockaddr_in *sin;
655
656 inp->inp_options = ip_srcroute();
657 if (inp->inp_options == NULL) {
658 inp->inp_options = sc->sc_ipopts;
659 sc->sc_ipopts = NULL;
660 }
661 inp->inp_route = sc->sc_route;
662 sc->sc_route.ro_rt = NULL;
663
664 MALLOC(sin, struct sockaddr_in *, sizeof *sin,
665 M_SONAME, M_NOWAIT | M_ZERO);
666 if (sin == NULL)
667 goto abort;
668 sin->sin_family = AF_INET;
669 sin->sin_len = sizeof(*sin);
670 sin->sin_addr = sc->sc_inc.inc_faddr;
671 sin->sin_port = sc->sc_inc.inc_fport;
672 bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero));
673 laddr = inp->inp_laddr;
674 if (inp->inp_laddr.s_addr == INADDR_ANY)
675 inp->inp_laddr = sc->sc_inc.inc_laddr;
676 if (in_pcbconnect(inp, (struct sockaddr *)sin, &thread0)) {
677 inp->inp_laddr = laddr;
678 FREE(sin, M_SONAME);
679 goto abort;
680 }
681 FREE(sin, M_SONAME);
682 }
683
684 tp = intotcpcb(inp);
685 tp->t_state = TCPS_SYN_RECEIVED;
686 tp->iss = sc->sc_iss;
687 tp->irs = sc->sc_irs;
688 tcp_rcvseqinit(tp);
689 tcp_sendseqinit(tp);
690 tp->snd_wl1 = sc->sc_irs;
691 tp->rcv_up = sc->sc_irs + 1;
692 tp->rcv_wnd = sc->sc_wnd;
693 tp->rcv_adv += tp->rcv_wnd;
694
695 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
696 if (sc->sc_flags & SCF_NOOPT)
697 tp->t_flags |= TF_NOOPT;
698 if (sc->sc_flags & SCF_WINSCALE) {
699 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
700 tp->requested_s_scale = sc->sc_requested_s_scale;
701 tp->request_r_scale = sc->sc_request_r_scale;
702 }
703 if (sc->sc_flags & SCF_TIMESTAMP) {
704 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
705 tp->ts_recent = sc->sc_tsrecent;
706 tp->ts_recent_age = ticks;
707 }
708 if (sc->sc_flags & SCF_CC) {
709 /*
710 * Initialization of the tcpcb for transaction;
711 * set SND.WND = SEG.WND,
712 * initialize CCsend and CCrecv.
713 */
714 tp->t_flags |= TF_REQ_CC|TF_RCVD_CC;
715 tp->cc_send = sc->sc_cc_send;
716 tp->cc_recv = sc->sc_cc_recv;
717 }
718
719 tcp_mss(tp, sc->sc_peer_mss);
720
721 /*
722 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
723 */
724 if (sc->sc_rxtslot != 0)
725 tp->snd_cwnd = tp->t_maxseg;
726 callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
727
728 tcpstat.tcps_accepts++;
729 return (so);
730
731 abort:
732 if (so != NULL)
733 (void) soabort(so);
734 return (NULL);
735 }
736
737 /*
738 * This function gets called when we receive an ACK for a
739 * socket in the LISTEN state. We look up the connection
740 * in the syncache, and if its there, we pull it out of
741 * the cache and turn it into a full-blown connection in
742 * the SYN-RECEIVED state.
743 */
744 int
745 syncache_expand(inc, th, sop, m)
746 struct in_conninfo *inc;
747 struct tcphdr *th;
748 struct socket **sop;
749 struct mbuf *m;
750 {
751 struct syncache *sc;
752 struct syncache_head *sch;
753 struct socket *so;
754
755 sc = syncache_lookup(inc, &sch);
756 if (sc == NULL) {
757 /*
758 * There is no syncache entry, so see if this ACK is
759 * a returning syncookie. To do this, first:
760 * A. See if this socket has had a syncache entry dropped in
761 * the past. We don't want to accept a bogus syncookie
762 * if we've never received a SYN.
763 * B. check that the syncookie is valid. If it is, then
764 * cobble up a fake syncache entry, and return.
765 */
766 if (!tcp_syncookies)
767 return (0);
768 sc = syncookie_lookup(inc, th, *sop);
769 if (sc == NULL)
770 return (0);
771 sch = NULL;
772 tcpstat.tcps_sc_recvcookie++;
773 }
774
775 /*
776 * If seg contains an ACK, but not for our SYN/ACK, send a RST.
777 */
778 if (th->th_ack != sc->sc_iss + 1)
779 return (0);
780
781 so = syncache_socket(sc, *sop, m);
782 if (so == NULL) {
783 #if 0
784 resetandabort:
785 /* XXXjlemon check this - is this correct? */
786 (void) tcp_respond(NULL, m, m, th,
787 th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK);
788 #endif
789 m_freem(m); /* XXX only needed for above */
790 tcpstat.tcps_sc_aborted++;
791 } else {
792 sc->sc_flags |= SCF_KEEPROUTE;
793 tcpstat.tcps_sc_completed++;
794 }
795 if (sch == NULL)
796 syncache_free(sc);
797 else
798 syncache_drop(sc, sch);
799 *sop = so;
800 return (1);
801 }
802
803 /*
804 * Given a LISTEN socket and an inbound SYN request, add
805 * this to the syn cache, and send back a segment:
806 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
807 * to the source.
808 *
809 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
810 * Doing so would require that we hold onto the data and deliver it
811 * to the application. However, if we are the target of a SYN-flood
812 * DoS attack, an attacker could send data which would eventually
813 * consume all available buffer space if it were ACKed. By not ACKing
814 * the data, we avoid this DoS scenario.
815 */
816 int
817 syncache_add(inc, to, th, sop, m)
818 struct in_conninfo *inc;
819 struct tcpopt *to;
820 struct tcphdr *th;
821 struct socket **sop;
822 struct mbuf *m;
823 {
824 struct tcpcb *tp;
825 struct socket *so;
826 struct syncache *sc = NULL;
827 struct syncache_head *sch;
828 struct mbuf *ipopts = NULL;
829 struct rmxp_tao *taop;
830 int i, s, win;
831
832 so = *sop;
833 tp = sototcpcb(so);
834
835 /*
836 * Remember the IP options, if any.
837 */
838 #ifdef INET6
839 if (!inc->inc_isipv6)
840 #endif
841 ipopts = ip_srcroute();
842
843 /*
844 * See if we already have an entry for this connection.
845 * If we do, resend the SYN,ACK, and reset the retransmit timer.
846 *
847 * XXX
848 * should the syncache be re-initialized with the contents
849 * of the new SYN here (which may have different options?)
850 */
851 sc = syncache_lookup(inc, &sch);
852 if (sc != NULL) {
853 tcpstat.tcps_sc_dupsyn++;
854 if (ipopts) {
855 /*
856 * If we were remembering a previous source route,
857 * forget it and use the new one we've been given.
858 */
859 if (sc->sc_ipopts)
860 (void) m_free(sc->sc_ipopts);
861 sc->sc_ipopts = ipopts;
862 }
863 /*
864 * Update timestamp if present.
865 */
866 if (sc->sc_flags & SCF_TIMESTAMP)
867 sc->sc_tsrecent = to->to_tsval;
868 /*
869 * PCB may have changed, pick up new values.
870 */
871 sc->sc_tp = tp;
872 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
873 if (syncache_respond(sc, m) == 0) {
874 s = splnet();
875 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot],
876 sc, sc_timerq);
877 SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot);
878 splx(s);
879 tcpstat.tcps_sndacks++;
880 tcpstat.tcps_sndtotal++;
881 }
882 *sop = NULL;
883 return (1);
884 }
885
886 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
887 if (sc == NULL) {
888 /*
889 * The zone allocator couldn't provide more entries.
890 * Treat this as if the cache was full; drop the oldest
891 * entry and insert the new one.
892 */
893 s = splnet();
894 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
895 sc = TAILQ_FIRST(&tcp_syncache.timerq[i]);
896 if (sc != NULL)
897 break;
898 }
899 sc->sc_tp->ts_recent = ticks;
900 syncache_drop(sc, NULL);
901 splx(s);
902 tcpstat.tcps_sc_zonefail++;
903 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
904 if (sc == NULL) {
905 if (ipopts)
906 (void) m_free(ipopts);
907 return (0);
908 }
909 }
910
911 /*
912 * Fill in the syncache values.
913 */
914 bzero(sc, sizeof(*sc));
915 sc->sc_tp = tp;
916 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
917 sc->sc_ipopts = ipopts;
918 sc->sc_inc.inc_fport = inc->inc_fport;
919 sc->sc_inc.inc_lport = inc->inc_lport;
920 #ifdef INET6
921 sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
922 if (inc->inc_isipv6) {
923 sc->sc_inc.inc6_faddr = inc->inc6_faddr;
924 sc->sc_inc.inc6_laddr = inc->inc6_laddr;
925 sc->sc_route6.ro_rt = NULL;
926 } else
927 #endif
928 {
929 sc->sc_inc.inc_faddr = inc->inc_faddr;
930 sc->sc_inc.inc_laddr = inc->inc_laddr;
931 sc->sc_route.ro_rt = NULL;
932 }
933 sc->sc_irs = th->th_seq;
934 sc->sc_flags = 0;
935 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0;
936 if (tcp_syncookies)
937 sc->sc_iss = syncookie_generate(sc);
938 else
939 sc->sc_iss = arc4random();
940
941 /* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */
942 win = sbspace(&so->so_rcv);
943 win = imax(win, 0);
944 win = imin(win, TCP_MAXWIN);
945 sc->sc_wnd = win;
946
947 if (tcp_do_rfc1323) {
948 /*
949 * A timestamp received in a SYN makes
950 * it ok to send timestamp requests and replies.
951 */
952 if (to->to_flags & TOF_TS) {
953 sc->sc_tsrecent = to->to_tsval;
954 sc->sc_flags |= SCF_TIMESTAMP;
955 }
956 if (to->to_flags & TOF_SCALE) {
957 int wscale = 0;
958
959 /* Compute proper scaling value from buffer space */
960 while (wscale < TCP_MAX_WINSHIFT &&
961 (TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat)
962 wscale++;
963 sc->sc_request_r_scale = wscale;
964 sc->sc_requested_s_scale = to->to_requested_s_scale;
965 sc->sc_flags |= SCF_WINSCALE;
966 }
967 }
968 if (tcp_do_rfc1644) {
969 /*
970 * A CC or CC.new option received in a SYN makes
971 * it ok to send CC in subsequent segments.
972 */
973 if (to->to_flags & (TOF_CC|TOF_CCNEW)) {
974 sc->sc_cc_recv = to->to_cc;
975 sc->sc_cc_send = CC_INC(tcp_ccgen);
976 sc->sc_flags |= SCF_CC;
977 }
978 }
979 if (tp->t_flags & TF_NOOPT)
980 sc->sc_flags = SCF_NOOPT;
981
982 /*
983 * XXX
984 * We have the option here of not doing TAO (even if the segment
985 * qualifies) and instead fall back to a normal 3WHS via the syncache.
986 * This allows us to apply synflood protection to TAO-qualifying SYNs
987 * also. However, there should be a hueristic to determine when to
988 * do this, and is not present at the moment.
989 */
990
991 /*
992 * Perform TAO test on incoming CC (SEG.CC) option, if any.
993 * - compare SEG.CC against cached CC from the same host, if any.
994 * - if SEG.CC > chached value, SYN must be new and is accepted
995 * immediately: save new CC in the cache, mark the socket
996 * connected, enter ESTABLISHED state, turn on flag to
997 * send a SYN in the next segment.
998 * A virtual advertised window is set in rcv_adv to
999 * initialize SWS prevention. Then enter normal segment
1000 * processing: drop SYN, process data and FIN.
1001 * - otherwise do a normal 3-way handshake.
1002 */
1003 taop = tcp_gettaocache(&sc->sc_inc);
1004 if ((to->to_flags & TOF_CC) != 0) {
1005 if (((tp->t_flags & TF_NOPUSH) != 0) &&
1006 sc->sc_flags & SCF_CC &&
1007 taop != NULL && taop->tao_cc != 0 &&
1008 CC_GT(to->to_cc, taop->tao_cc)) {
1009 sc->sc_rxtslot = 0;
1010 so = syncache_socket(sc, *sop, m);
1011 if (so != NULL) {
1012 sc->sc_flags |= SCF_KEEPROUTE;
1013 taop->tao_cc = to->to_cc;
1014 *sop = so;
1015 }
1016 syncache_free(sc);
1017 return (so != NULL);
1018 }
1019 } else {
1020 /*
1021 * No CC option, but maybe CC.NEW: invalidate cached value.
1022 */
1023 if (taop != NULL)
1024 taop->tao_cc = 0;
1025 }
1026 /*
1027 * TAO test failed or there was no CC option,
1028 * do a standard 3-way handshake.
1029 */
1030 if (syncache_respond(sc, m) == 0) {
1031 syncache_insert(sc, sch);
1032 tcpstat.tcps_sndacks++;
1033 tcpstat.tcps_sndtotal++;
1034 } else {
1035 syncache_free(sc);
1036 tcpstat.tcps_sc_dropped++;
1037 }
1038 *sop = NULL;
1039 return (1);
1040 }
1041
1042 static int
1043 syncache_respond(sc, m)
1044 struct syncache *sc;
1045 struct mbuf *m;
1046 {
1047 u_int8_t *optp;
1048 int optlen, error;
1049 u_int16_t tlen, hlen, mssopt;
1050 struct ip *ip = NULL;
1051 struct rtentry *rt;
1052 struct tcphdr *th;
1053 #ifdef INET6
1054 struct ip6_hdr *ip6 = NULL;
1055 #endif
1056
1057 #ifdef INET6
1058 if (sc->sc_inc.inc_isipv6) {
1059 rt = tcp_rtlookup6(&sc->sc_inc);
1060 if (rt != NULL)
1061 mssopt = rt->rt_ifp->if_mtu -
1062 (sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
1063 else
1064 mssopt = tcp_v6mssdflt;
1065 hlen = sizeof(struct ip6_hdr);
1066 } else
1067 #endif
1068 {
1069 rt = tcp_rtlookup(&sc->sc_inc);
1070 if (rt != NULL)
1071 mssopt = rt->rt_ifp->if_mtu -
1072 (sizeof(struct ip) + sizeof(struct tcphdr));
1073 else
1074 mssopt = tcp_mssdflt;
1075 hlen = sizeof(struct ip);
1076 }
1077
1078 /* Compute the size of the TCP options. */
1079 if (sc->sc_flags & SCF_NOOPT) {
1080 optlen = 0;
1081 } else {
1082 optlen = TCPOLEN_MAXSEG +
1083 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) +
1084 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) +
1085 ((sc->sc_flags & SCF_CC) ? TCPOLEN_CC_APPA * 2 : 0);
1086 }
1087 tlen = hlen + sizeof(struct tcphdr) + optlen;
1088
1089 /*
1090 * XXX
1091 * assume that the entire packet will fit in a header mbuf
1092 */
1093 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small"));
1094
1095 /*
1096 * XXX shouldn't this reuse the mbuf if possible ?
1097 * Create the IP+TCP header from scratch.
1098 */
1099 if (m)
1100 m_freem(m);
1101
1102 m = m_gethdr(M_DONTWAIT, MT_HEADER);
1103 if (m == NULL)
1104 return (ENOBUFS);
1105 m->m_data += max_linkhdr;
1106 m->m_len = tlen;
1107 m->m_pkthdr.len = tlen;
1108 m->m_pkthdr.rcvif = NULL;
1109 #ifdef MAC
1110 mac_create_mbuf_from_socket(sc->sc_tp->t_inpcb->inp_socket, m);
1111 #endif
1112
1113 #ifdef INET6
1114 if (sc->sc_inc.inc_isipv6) {
1115 ip6 = mtod(m, struct ip6_hdr *);
1116 ip6->ip6_vfc = IPV6_VERSION;
1117 ip6->ip6_nxt = IPPROTO_TCP;
1118 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1119 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1120 ip6->ip6_plen = htons(tlen - hlen);
1121 /* ip6_hlim is set after checksum */
1122 /* ip6_flow = ??? */
1123
1124 th = (struct tcphdr *)(ip6 + 1);
1125 } else
1126 #endif
1127 {
1128 ip = mtod(m, struct ip *);
1129 ip->ip_v = IPVERSION;
1130 ip->ip_hl = sizeof(struct ip) >> 2;
1131 ip->ip_len = tlen;
1132 ip->ip_id = 0;
1133 ip->ip_off = 0;
1134 ip->ip_sum = 0;
1135 ip->ip_p = IPPROTO_TCP;
1136 ip->ip_src = sc->sc_inc.inc_laddr;
1137 ip->ip_dst = sc->sc_inc.inc_faddr;
1138 ip->ip_ttl = sc->sc_tp->t_inpcb->inp_ip_ttl; /* XXX */
1139 ip->ip_tos = sc->sc_tp->t_inpcb->inp_ip_tos; /* XXX */
1140
1141 /*
1142 * See if we should do MTU discovery. Route lookups are
1143 * expensive, so we will only unset the DF bit if:
1144 *
1145 * 1) path_mtu_discovery is disabled
1146 * 2) the SCF_UNREACH flag has been set
1147 */
1148 if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1149 ip->ip_off |= IP_DF;
1150
1151 th = (struct tcphdr *)(ip + 1);
1152 }
1153 th->th_sport = sc->sc_inc.inc_lport;
1154 th->th_dport = sc->sc_inc.inc_fport;
1155
1156 th->th_seq = htonl(sc->sc_iss);
1157 th->th_ack = htonl(sc->sc_irs + 1);
1158 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1159 th->th_x2 = 0;
1160 th->th_flags = TH_SYN|TH_ACK;
1161 th->th_win = htons(sc->sc_wnd);
1162 th->th_urp = 0;
1163
1164 /* Tack on the TCP options. */
1165 if (optlen != 0) {
1166 optp = (u_int8_t *)(th + 1);
1167 *optp++ = TCPOPT_MAXSEG;
1168 *optp++ = TCPOLEN_MAXSEG;
1169 *optp++ = (mssopt >> 8) & 0xff;
1170 *optp++ = mssopt & 0xff;
1171
1172 if (sc->sc_flags & SCF_WINSCALE) {
1173 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 |
1174 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 |
1175 sc->sc_request_r_scale);
1176 optp += 4;
1177 }
1178
1179 if (sc->sc_flags & SCF_TIMESTAMP) {
1180 u_int32_t *lp = (u_int32_t *)(optp);
1181
1182 /* Form timestamp option per appendix A of RFC 1323. */
1183 *lp++ = htonl(TCPOPT_TSTAMP_HDR);
1184 *lp++ = htonl(ticks);
1185 *lp = htonl(sc->sc_tsrecent);
1186 optp += TCPOLEN_TSTAMP_APPA;
1187 }
1188
1189 /*
1190 * Send CC and CC.echo if we received CC from our peer.
1191 */
1192 if (sc->sc_flags & SCF_CC) {
1193 u_int32_t *lp = (u_int32_t *)(optp);
1194
1195 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC));
1196 *lp++ = htonl(sc->sc_cc_send);
1197 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CCECHO));
1198 *lp = htonl(sc->sc_cc_recv);
1199 optp += TCPOLEN_CC_APPA * 2;
1200 }
1201 }
1202
1203 #ifdef INET6
1204 if (sc->sc_inc.inc_isipv6) {
1205 struct route_in6 *ro6 = &sc->sc_route6;
1206
1207 th->th_sum = 0;
1208 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen);
1209 ip6->ip6_hlim = in6_selecthlim(NULL,
1210 ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL);
1211 error = ip6_output(m, NULL, ro6, 0, NULL, NULL,
1212 sc->sc_tp->t_inpcb);
1213 } else
1214 #endif
1215 {
1216 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1217 htons(tlen - hlen + IPPROTO_TCP));
1218 m->m_pkthdr.csum_flags = CSUM_TCP;
1219 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1220 error = ip_output(m, sc->sc_ipopts, &sc->sc_route, 0, NULL,
1221 sc->sc_tp->t_inpcb);
1222 }
1223 return (error);
1224 }
1225
1226 /*
1227 * cookie layers:
1228 *
1229 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .|
1230 * | peer iss |
1231 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .|
1232 * | 0 |(A)| |
1233 * (A): peer mss index
1234 */
1235
1236 /*
1237 * The values below are chosen to minimize the size of the tcp_secret
1238 * table, as well as providing roughly a 16 second lifetime for the cookie.
1239 */
1240
1241 #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */
1242 #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */
1243
1244 #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1)
1245 #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS)
1246 #define SYNCOOKIE_TIMEOUT \
1247 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT))
1248 #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
1249
1250 static struct {
1251 u_int32_t ts_secbits[4];
1252 u_int ts_expire;
1253 } tcp_secret[SYNCOOKIE_NSECRETS];
1254
1255 static int tcp_msstab[] = { 0, 536, 1460, 8960 };
1256
1257 static MD5_CTX syn_ctx;
1258
1259 #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v))
1260
1261 struct md5_add {
1262 u_int32_t laddr, faddr;
1263 u_int32_t secbits[4];
1264 u_int16_t lport, fport;
1265 };
1266
1267 #ifdef CTASSERT
1268 CTASSERT(sizeof(struct md5_add) == 28);
1269 #endif
1270
1271 /*
1272 * Consider the problem of a recreated (and retransmitted) cookie. If the
1273 * original SYN was accepted, the connection is established. The second
1274 * SYN is inflight, and if it arrives with an ISN that falls within the
1275 * receive window, the connection is killed.
1276 *
1277 * However, since cookies have other problems, this may not be worth
1278 * worrying about.
1279 */
1280
1281 static u_int32_t
1282 syncookie_generate(struct syncache *sc)
1283 {
1284 u_int32_t md5_buffer[4];
1285 u_int32_t data;
1286 int idx, i;
1287 struct md5_add add;
1288
1289 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK;
1290 if (tcp_secret[idx].ts_expire < ticks) {
1291 for (i = 0; i < 4; i++)
1292 tcp_secret[idx].ts_secbits[i] = arc4random();
1293 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT;
1294 }
1295 for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--)
1296 if (tcp_msstab[data] <= sc->sc_peer_mss)
1297 break;
1298 data = (data << SYNCOOKIE_WNDBITS) | idx;
1299 data ^= sc->sc_irs; /* peer's iss */
1300 MD5Init(&syn_ctx);
1301 #ifdef INET6
1302 if (sc->sc_inc.inc_isipv6) {
1303 MD5Add(sc->sc_inc.inc6_laddr);
1304 MD5Add(sc->sc_inc.inc6_faddr);
1305 add.laddr = 0;
1306 add.faddr = 0;
1307 } else
1308 #endif
1309 {
1310 add.laddr = sc->sc_inc.inc_laddr.s_addr;
1311 add.faddr = sc->sc_inc.inc_faddr.s_addr;
1312 }
1313 add.lport = sc->sc_inc.inc_lport;
1314 add.fport = sc->sc_inc.inc_fport;
1315 add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1316 add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1317 add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1318 add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1319 MD5Add(add);
1320 MD5Final((u_char *)&md5_buffer, &syn_ctx);
1321 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK);
1322 return (data);
1323 }
1324
1325 static struct syncache *
1326 syncookie_lookup(inc, th, so)
1327 struct in_conninfo *inc;
1328 struct tcphdr *th;
1329 struct socket *so;
1330 {
1331 u_int32_t md5_buffer[4];
1332 struct syncache *sc;
1333 u_int32_t data;
1334 int wnd, idx;
1335 struct md5_add add;
1336
1337 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */
1338 idx = data & SYNCOOKIE_WNDMASK;
1339 if (tcp_secret[idx].ts_expire < ticks ||
1340 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks)
1341 return (NULL);
1342 MD5Init(&syn_ctx);
1343 #ifdef INET6
1344 if (inc->inc_isipv6) {
1345 MD5Add(inc->inc6_laddr);
1346 MD5Add(inc->inc6_faddr);
1347 add.laddr = 0;
1348 add.faddr = 0;
1349 } else
1350 #endif
1351 {
1352 add.laddr = inc->inc_laddr.s_addr;
1353 add.faddr = inc->inc_faddr.s_addr;
1354 }
1355 add.lport = inc->inc_lport;
1356 add.fport = inc->inc_fport;
1357 add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1358 add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1359 add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1360 add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1361 MD5Add(add);
1362 MD5Final((u_char *)&md5_buffer, &syn_ctx);
1363 data ^= md5_buffer[0];
1364 if ((data & ~SYNCOOKIE_DATAMASK) != 0)
1365 return (NULL);
1366 data = data >> SYNCOOKIE_WNDBITS;
1367
1368 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
1369 if (sc == NULL)
1370 return (NULL);
1371 /*
1372 * Fill in the syncache values.
1373 * XXX duplicate code from syncache_add
1374 */
1375 sc->sc_ipopts = NULL;
1376 sc->sc_inc.inc_fport = inc->inc_fport;
1377 sc->sc_inc.inc_lport = inc->inc_lport;
1378 #ifdef INET6
1379 sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
1380 if (inc->inc_isipv6) {
1381 sc->sc_inc.inc6_faddr = inc->inc6_faddr;
1382 sc->sc_inc.inc6_laddr = inc->inc6_laddr;
1383 sc->sc_route6.ro_rt = NULL;
1384 } else
1385 #endif
1386 {
1387 sc->sc_inc.inc_faddr = inc->inc_faddr;
1388 sc->sc_inc.inc_laddr = inc->inc_laddr;
1389 sc->sc_route.ro_rt = NULL;
1390 }
1391 sc->sc_irs = th->th_seq - 1;
1392 sc->sc_iss = th->th_ack - 1;
1393 wnd = sbspace(&so->so_rcv);
1394 wnd = imax(wnd, 0);
1395 wnd = imin(wnd, TCP_MAXWIN);
1396 sc->sc_wnd = wnd;
1397 sc->sc_flags = 0;
1398 sc->sc_rxtslot = 0;
1399 sc->sc_peer_mss = tcp_msstab[data];
1400 return (sc);
1401 }
Cache object: f4dadc794bf75fafaf707d21d986742b
|