1 /*-
2 * Copyright (c) 2001 Networks Associates Technology, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Jonathan Lemon
6 * and NAI Labs, the Security Research Division of Network Associates, Inc.
7 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
8 * DARPA CHATS research program.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote
19 * products derived from this software without specific prior written
20 * permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $FreeBSD: releng/5.2/sys/netinet/tcp_syncache.c 122922 2003-11-20 20:07:39Z andre $
35 */
36
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 #include "opt_mac.h"
40 #include "opt_tcpdebug.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/malloc.h>
47 #include <sys/mac.h>
48 #include <sys/mbuf.h>
49 #include <sys/md5.h>
50 #include <sys/proc.h> /* for proc0 declaration */
51 #include <sys/random.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54
55 #include <net/if.h>
56 #include <net/route.h>
57
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/ip.h>
61 #include <netinet/in_var.h>
62 #include <netinet/in_pcb.h>
63 #include <netinet/ip_var.h>
64 #ifdef INET6
65 #include <netinet/ip6.h>
66 #include <netinet/icmp6.h>
67 #include <netinet6/nd6.h>
68 #include <netinet6/ip6_var.h>
69 #include <netinet6/in6_pcb.h>
70 #endif
71 #include <netinet/tcp.h>
72 #ifdef TCPDEBUG
73 #include <netinet/tcpip.h>
74 #endif
75 #include <netinet/tcp_fsm.h>
76 #include <netinet/tcp_seq.h>
77 #include <netinet/tcp_timer.h>
78 #include <netinet/tcp_var.h>
79 #ifdef TCPDEBUG
80 #include <netinet/tcp_debug.h>
81 #endif
82 #ifdef INET6
83 #include <netinet6/tcp6_var.h>
84 #endif
85
86 #ifdef IPSEC
87 #include <netinet6/ipsec.h>
88 #ifdef INET6
89 #include <netinet6/ipsec6.h>
90 #endif
91 #endif /*IPSEC*/
92
93 #ifdef FAST_IPSEC
94 #include <netipsec/ipsec.h>
95 #ifdef INET6
96 #include <netipsec/ipsec6.h>
97 #endif
98 #include <netipsec/key.h>
99 #endif /*FAST_IPSEC*/
100
101 #include <machine/in_cksum.h>
102 #include <vm/uma.h>
103
104 static int tcp_syncookies = 1;
105 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
106 &tcp_syncookies, 0,
107 "Use TCP SYN cookies if the syncache overflows");
108
109 static void syncache_drop(struct syncache *, struct syncache_head *);
110 static void syncache_free(struct syncache *);
111 static void syncache_insert(struct syncache *, struct syncache_head *);
112 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **);
113 #ifdef TCPDEBUG
114 static int syncache_respond(struct syncache *, struct mbuf *, struct socket *);
115 #else
116 static int syncache_respond(struct syncache *, struct mbuf *);
117 #endif
118 static struct socket *syncache_socket(struct syncache *, struct socket *,
119 struct mbuf *m);
120 static void syncache_timer(void *);
121 static u_int32_t syncookie_generate(struct syncache *);
122 static struct syncache *syncookie_lookup(struct in_conninfo *,
123 struct tcphdr *, struct socket *);
124
125 /*
126 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
127 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds,
128 * the odds are that the user has given up attempting to connect by then.
129 */
130 #define SYNCACHE_MAXREXMTS 3
131
132 /* Arbitrary values */
133 #define TCP_SYNCACHE_HASHSIZE 512
134 #define TCP_SYNCACHE_BUCKETLIMIT 30
135
136 struct tcp_syncache {
137 struct syncache_head *hashbase;
138 uma_zone_t zone;
139 u_int hashsize;
140 u_int hashmask;
141 u_int bucket_limit;
142 u_int cache_count;
143 u_int cache_limit;
144 u_int rexmt_limit;
145 u_int hash_secret;
146 u_int next_reseed;
147 TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1];
148 struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1];
149 };
150 static struct tcp_syncache tcp_syncache;
151
152 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache");
153
154 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
155 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache");
156
157 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
158 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache");
159
160 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD,
161 &tcp_syncache.cache_count, 0, "Current number of entries in syncache");
162
163 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
164 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable");
165
166 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
167 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions");
168
169 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
170
171 #define SYNCACHE_HASH(inc, mask) \
172 ((tcp_syncache.hash_secret ^ \
173 (inc)->inc_faddr.s_addr ^ \
174 ((inc)->inc_faddr.s_addr >> 16) ^ \
175 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
176
177 #define SYNCACHE_HASH6(inc, mask) \
178 ((tcp_syncache.hash_secret ^ \
179 (inc)->inc6_faddr.s6_addr32[0] ^ \
180 (inc)->inc6_faddr.s6_addr32[3] ^ \
181 (inc)->inc_fport ^ (inc)->inc_lport) & mask)
182
183 #define ENDPTS_EQ(a, b) ( \
184 (a)->ie_fport == (b)->ie_fport && \
185 (a)->ie_lport == (b)->ie_lport && \
186 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \
187 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \
188 )
189
190 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
191
192 #define SYNCACHE_TIMEOUT(sc, slot) do { \
193 sc->sc_rxtslot = (slot); \
194 sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[(slot)]; \
195 TAILQ_INSERT_TAIL(&tcp_syncache.timerq[(slot)], sc, sc_timerq); \
196 if (!callout_active(&tcp_syncache.tt_timerq[(slot)])) \
197 callout_reset(&tcp_syncache.tt_timerq[(slot)], \
198 TCPTV_RTOBASE * tcp_backoff[(slot)], \
199 syncache_timer, (void *)((intptr_t)(slot))); \
200 } while (0)
201
202 static void
203 syncache_free(struct syncache *sc)
204 {
205 if (sc->sc_ipopts)
206 (void) m_free(sc->sc_ipopts);
207
208 uma_zfree(tcp_syncache.zone, sc);
209 }
210
211 void
212 syncache_init(void)
213 {
214 int i;
215
216 tcp_syncache.cache_count = 0;
217 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
218 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
219 tcp_syncache.cache_limit =
220 tcp_syncache.hashsize * tcp_syncache.bucket_limit;
221 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
222 tcp_syncache.next_reseed = 0;
223 tcp_syncache.hash_secret = arc4random();
224
225 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
226 &tcp_syncache.hashsize);
227 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
228 &tcp_syncache.cache_limit);
229 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
230 &tcp_syncache.bucket_limit);
231 if (!powerof2(tcp_syncache.hashsize)) {
232 printf("WARNING: syncache hash size is not a power of 2.\n");
233 tcp_syncache.hashsize = 512; /* safe default */
234 }
235 tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
236
237 /* Allocate the hash table. */
238 MALLOC(tcp_syncache.hashbase, struct syncache_head *,
239 tcp_syncache.hashsize * sizeof(struct syncache_head),
240 M_SYNCACHE, M_WAITOK);
241
242 /* Initialize the hash buckets. */
243 for (i = 0; i < tcp_syncache.hashsize; i++) {
244 TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
245 tcp_syncache.hashbase[i].sch_length = 0;
246 }
247
248 /* Initialize the timer queues. */
249 for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) {
250 TAILQ_INIT(&tcp_syncache.timerq[i]);
251 callout_init(&tcp_syncache.tt_timerq[i],
252 debug_mpsafenet ? CALLOUT_MPSAFE : 0);
253 }
254
255 /*
256 * Allocate the syncache entries. Allow the zone to allocate one
257 * more entry than cache limit, so a new entry can bump out an
258 * older one.
259 */
260 tcp_syncache.cache_limit -= 1;
261 tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
262 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
263 uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
264 }
265
266 static void
267 syncache_insert(sc, sch)
268 struct syncache *sc;
269 struct syncache_head *sch;
270 {
271 struct syncache *sc2;
272 int i;
273
274 INP_INFO_WLOCK_ASSERT(&tcbinfo);
275
276 /*
277 * Make sure that we don't overflow the per-bucket
278 * limit or the total cache size limit.
279 */
280 if (sch->sch_length >= tcp_syncache.bucket_limit) {
281 /*
282 * The bucket is full, toss the oldest element.
283 */
284 sc2 = TAILQ_FIRST(&sch->sch_bucket);
285 sc2->sc_tp->ts_recent = ticks;
286 syncache_drop(sc2, sch);
287 tcpstat.tcps_sc_bucketoverflow++;
288 } else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) {
289 /*
290 * The cache is full. Toss the oldest entry in the
291 * entire cache. This is the front entry in the
292 * first non-empty timer queue with the largest
293 * timeout value.
294 */
295 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
296 sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]);
297 if (sc2 != NULL)
298 break;
299 }
300 sc2->sc_tp->ts_recent = ticks;
301 syncache_drop(sc2, NULL);
302 tcpstat.tcps_sc_cacheoverflow++;
303 }
304
305 /* Initialize the entry's timer. */
306 SYNCACHE_TIMEOUT(sc, 0);
307
308 /* Put it into the bucket. */
309 TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash);
310 sch->sch_length++;
311 tcp_syncache.cache_count++;
312 tcpstat.tcps_sc_added++;
313 }
314
315 static void
316 syncache_drop(sc, sch)
317 struct syncache *sc;
318 struct syncache_head *sch;
319 {
320 INP_INFO_WLOCK_ASSERT(&tcbinfo);
321
322 if (sch == NULL) {
323 #ifdef INET6
324 if (sc->sc_inc.inc_isipv6) {
325 sch = &tcp_syncache.hashbase[
326 SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)];
327 } else
328 #endif
329 {
330 sch = &tcp_syncache.hashbase[
331 SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)];
332 }
333 }
334
335 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
336 sch->sch_length--;
337 tcp_syncache.cache_count--;
338
339 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq);
340 if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot]))
341 callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]);
342
343 syncache_free(sc);
344 }
345
346 /*
347 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
348 * If we have retransmitted an entry the maximum number of times, expire it.
349 */
350 static void
351 syncache_timer(xslot)
352 void *xslot;
353 {
354 intptr_t slot = (intptr_t)xslot;
355 struct syncache *sc, *nsc;
356 struct inpcb *inp;
357
358 INP_INFO_WLOCK(&tcbinfo);
359 if (callout_pending(&tcp_syncache.tt_timerq[slot]) ||
360 !callout_active(&tcp_syncache.tt_timerq[slot])) {
361 /* XXX can this happen? */
362 INP_INFO_WUNLOCK(&tcbinfo);
363 return;
364 }
365 callout_deactivate(&tcp_syncache.tt_timerq[slot]);
366
367 nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]);
368 while (nsc != NULL) {
369 if (ticks < nsc->sc_rxttime)
370 break;
371 sc = nsc;
372 inp = sc->sc_tp->t_inpcb;
373 if (slot == SYNCACHE_MAXREXMTS ||
374 slot >= tcp_syncache.rexmt_limit ||
375 inp == NULL || inp->inp_gencnt != sc->sc_inp_gencnt) {
376 nsc = TAILQ_NEXT(sc, sc_timerq);
377 syncache_drop(sc, NULL);
378 tcpstat.tcps_sc_stale++;
379 continue;
380 }
381 /*
382 * syncache_respond() may call back into the syncache to
383 * to modify another entry, so do not obtain the next
384 * entry on the timer chain until it has completed.
385 */
386 #ifdef TCPDEBUG
387 (void) syncache_respond(sc, NULL, NULL);
388 #else
389 (void) syncache_respond(sc, NULL);
390 #endif
391 nsc = TAILQ_NEXT(sc, sc_timerq);
392 tcpstat.tcps_sc_retransmitted++;
393 TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq);
394 SYNCACHE_TIMEOUT(sc, slot + 1);
395 }
396 if (nsc != NULL)
397 callout_reset(&tcp_syncache.tt_timerq[slot],
398 nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot));
399 INP_INFO_WUNLOCK(&tcbinfo);
400 }
401
402 /*
403 * Find an entry in the syncache.
404 */
405 struct syncache *
406 syncache_lookup(inc, schp)
407 struct in_conninfo *inc;
408 struct syncache_head **schp;
409 {
410 struct syncache *sc;
411 struct syncache_head *sch;
412
413 INP_INFO_WLOCK_ASSERT(&tcbinfo);
414
415 #ifdef INET6
416 if (inc->inc_isipv6) {
417 sch = &tcp_syncache.hashbase[
418 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
419 *schp = sch;
420 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
421 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
422 return (sc);
423 }
424 } else
425 #endif
426 {
427 sch = &tcp_syncache.hashbase[
428 SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
429 *schp = sch;
430 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
431 #ifdef INET6
432 if (sc->sc_inc.inc_isipv6)
433 continue;
434 #endif
435 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
436 return (sc);
437 }
438 }
439 return (NULL);
440 }
441
442 /*
443 * This function is called when we get a RST for a
444 * non-existent connection, so that we can see if the
445 * connection is in the syn cache. If it is, zap it.
446 */
447 void
448 syncache_chkrst(inc, th)
449 struct in_conninfo *inc;
450 struct tcphdr *th;
451 {
452 struct syncache *sc;
453 struct syncache_head *sch;
454
455 INP_INFO_WLOCK_ASSERT(&tcbinfo);
456
457 sc = syncache_lookup(inc, &sch);
458 if (sc == NULL)
459 return;
460 /*
461 * If the RST bit is set, check the sequence number to see
462 * if this is a valid reset segment.
463 * RFC 793 page 37:
464 * In all states except SYN-SENT, all reset (RST) segments
465 * are validated by checking their SEQ-fields. A reset is
466 * valid if its sequence number is in the window.
467 *
468 * The sequence number in the reset segment is normally an
469 * echo of our outgoing acknowlegement numbers, but some hosts
470 * send a reset with the sequence number at the rightmost edge
471 * of our receive window, and we have to handle this case.
472 */
473 if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
474 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
475 syncache_drop(sc, sch);
476 tcpstat.tcps_sc_reset++;
477 }
478 }
479
480 void
481 syncache_badack(inc)
482 struct in_conninfo *inc;
483 {
484 struct syncache *sc;
485 struct syncache_head *sch;
486
487 INP_INFO_WLOCK_ASSERT(&tcbinfo);
488
489 sc = syncache_lookup(inc, &sch);
490 if (sc != NULL) {
491 syncache_drop(sc, sch);
492 tcpstat.tcps_sc_badack++;
493 }
494 }
495
496 void
497 syncache_unreach(inc, th)
498 struct in_conninfo *inc;
499 struct tcphdr *th;
500 {
501 struct syncache *sc;
502 struct syncache_head *sch;
503
504 INP_INFO_WLOCK_ASSERT(&tcbinfo);
505
506 /* we are called at splnet() here */
507 sc = syncache_lookup(inc, &sch);
508 if (sc == NULL)
509 return;
510
511 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
512 if (ntohl(th->th_seq) != sc->sc_iss)
513 return;
514
515 /*
516 * If we've rertransmitted 3 times and this is our second error,
517 * we remove the entry. Otherwise, we allow it to continue on.
518 * This prevents us from incorrectly nuking an entry during a
519 * spurious network outage.
520 *
521 * See tcp_notify().
522 */
523 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) {
524 sc->sc_flags |= SCF_UNREACH;
525 return;
526 }
527 syncache_drop(sc, sch);
528 tcpstat.tcps_sc_unreach++;
529 }
530
531 /*
532 * Build a new TCP socket structure from a syncache entry.
533 */
534 static struct socket *
535 syncache_socket(sc, lso, m)
536 struct syncache *sc;
537 struct socket *lso;
538 struct mbuf *m;
539 {
540 struct inpcb *inp = NULL;
541 struct socket *so;
542 struct tcpcb *tp;
543
544 GIANT_REQUIRED; /* XXX until socket locking */
545 INP_INFO_WLOCK_ASSERT(&tcbinfo);
546
547 /*
548 * Ok, create the full blown connection, and set things up
549 * as they would have been set up if we had created the
550 * connection when the SYN arrived. If we can't create
551 * the connection, abort it.
552 */
553 so = sonewconn(lso, SS_ISCONNECTED);
554 if (so == NULL) {
555 /*
556 * Drop the connection; we will send a RST if the peer
557 * retransmits the ACK,
558 */
559 tcpstat.tcps_listendrop++;
560 goto abort2;
561 }
562 #ifdef MAC
563 mac_set_socket_peer_from_mbuf(m, so);
564 #endif
565
566 inp = sotoinpcb(so);
567 INP_LOCK(inp);
568
569 /*
570 * Insert new socket into hash list.
571 */
572 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6;
573 #ifdef INET6
574 if (sc->sc_inc.inc_isipv6) {
575 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
576 } else {
577 inp->inp_vflag &= ~INP_IPV6;
578 inp->inp_vflag |= INP_IPV4;
579 #endif
580 inp->inp_laddr = sc->sc_inc.inc_laddr;
581 #ifdef INET6
582 }
583 #endif
584 inp->inp_lport = sc->sc_inc.inc_lport;
585 if (in_pcbinshash(inp) != 0) {
586 /*
587 * Undo the assignments above if we failed to
588 * put the PCB on the hash lists.
589 */
590 #ifdef INET6
591 if (sc->sc_inc.inc_isipv6)
592 inp->in6p_laddr = in6addr_any;
593 else
594 #endif
595 inp->inp_laddr.s_addr = INADDR_ANY;
596 inp->inp_lport = 0;
597 goto abort;
598 }
599 #ifdef IPSEC
600 /* copy old policy into new socket's */
601 if (ipsec_copy_pcbpolicy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
602 printf("syncache_expand: could not copy policy\n");
603 #endif
604 #ifdef FAST_IPSEC
605 /* copy old policy into new socket's */
606 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
607 printf("syncache_expand: could not copy policy\n");
608 #endif
609 #ifdef INET6
610 if (sc->sc_inc.inc_isipv6) {
611 struct inpcb *oinp = sotoinpcb(lso);
612 struct in6_addr laddr6;
613 struct sockaddr_in6 *sin6;
614 /*
615 * Inherit socket options from the listening socket.
616 * Note that in6p_inputopts are not (and should not be)
617 * copied, since it stores previously received options and is
618 * used to detect if each new option is different than the
619 * previous one and hence should be passed to a user.
620 * If we copied in6p_inputopts, a user would not be able to
621 * receive options just after calling the accept system call.
622 */
623 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
624 if (oinp->in6p_outputopts)
625 inp->in6p_outputopts =
626 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
627
628 MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6,
629 M_SONAME, M_NOWAIT | M_ZERO);
630 if (sin6 == NULL)
631 goto abort;
632 sin6->sin6_family = AF_INET6;
633 sin6->sin6_len = sizeof(*sin6);
634 sin6->sin6_addr = sc->sc_inc.inc6_faddr;
635 sin6->sin6_port = sc->sc_inc.inc_fport;
636 laddr6 = inp->in6p_laddr;
637 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
638 inp->in6p_laddr = sc->sc_inc.inc6_laddr;
639 if (in6_pcbconnect(inp, (struct sockaddr *)sin6, &thread0)) {
640 inp->in6p_laddr = laddr6;
641 FREE(sin6, M_SONAME);
642 goto abort;
643 }
644 FREE(sin6, M_SONAME);
645 } else
646 #endif
647 {
648 struct in_addr laddr;
649 struct sockaddr_in *sin;
650
651 inp->inp_options = ip_srcroute();
652 if (inp->inp_options == NULL) {
653 inp->inp_options = sc->sc_ipopts;
654 sc->sc_ipopts = NULL;
655 }
656
657 MALLOC(sin, struct sockaddr_in *, sizeof *sin,
658 M_SONAME, M_NOWAIT | M_ZERO);
659 if (sin == NULL)
660 goto abort;
661 sin->sin_family = AF_INET;
662 sin->sin_len = sizeof(*sin);
663 sin->sin_addr = sc->sc_inc.inc_faddr;
664 sin->sin_port = sc->sc_inc.inc_fport;
665 bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero));
666 laddr = inp->inp_laddr;
667 if (inp->inp_laddr.s_addr == INADDR_ANY)
668 inp->inp_laddr = sc->sc_inc.inc_laddr;
669 if (in_pcbconnect(inp, (struct sockaddr *)sin, &thread0)) {
670 inp->inp_laddr = laddr;
671 FREE(sin, M_SONAME);
672 goto abort;
673 }
674 FREE(sin, M_SONAME);
675 }
676
677 tp = intotcpcb(inp);
678 tp->t_state = TCPS_SYN_RECEIVED;
679 tp->iss = sc->sc_iss;
680 tp->irs = sc->sc_irs;
681 tcp_rcvseqinit(tp);
682 tcp_sendseqinit(tp);
683 tp->snd_wl1 = sc->sc_irs;
684 tp->rcv_up = sc->sc_irs + 1;
685 tp->rcv_wnd = sc->sc_wnd;
686 tp->rcv_adv += tp->rcv_wnd;
687
688 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
689 if (sc->sc_flags & SCF_NOOPT)
690 tp->t_flags |= TF_NOOPT;
691 if (sc->sc_flags & SCF_WINSCALE) {
692 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
693 tp->requested_s_scale = sc->sc_requested_s_scale;
694 tp->request_r_scale = sc->sc_request_r_scale;
695 }
696 if (sc->sc_flags & SCF_TIMESTAMP) {
697 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
698 tp->ts_recent = sc->sc_tsrecent;
699 tp->ts_recent_age = ticks;
700 }
701 if (sc->sc_flags & SCF_CC) {
702 /*
703 * Initialization of the tcpcb for transaction;
704 * set SND.WND = SEG.WND,
705 * initialize CCsend and CCrecv.
706 */
707 tp->t_flags |= TF_REQ_CC|TF_RCVD_CC;
708 tp->cc_send = sc->sc_cc_send;
709 tp->cc_recv = sc->sc_cc_recv;
710 }
711
712 /*
713 * Set up MSS and get cached values from tcp_hostcache.
714 * This might overwrite some of the defaults we just set.
715 */
716 tcp_mss(tp, sc->sc_peer_mss);
717
718 /*
719 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
720 */
721 if (sc->sc_rxtslot != 0)
722 tp->snd_cwnd = tp->t_maxseg;
723 callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
724
725 INP_UNLOCK(inp);
726
727 tcpstat.tcps_accepts++;
728 return (so);
729
730 abort:
731 INP_UNLOCK(inp);
732 abort2:
733 if (so != NULL)
734 (void) soabort(so);
735 return (NULL);
736 }
737
738 /*
739 * This function gets called when we receive an ACK for a
740 * socket in the LISTEN state. We look up the connection
741 * in the syncache, and if its there, we pull it out of
742 * the cache and turn it into a full-blown connection in
743 * the SYN-RECEIVED state.
744 */
745 int
746 syncache_expand(inc, th, sop, m)
747 struct in_conninfo *inc;
748 struct tcphdr *th;
749 struct socket **sop;
750 struct mbuf *m;
751 {
752 struct syncache *sc;
753 struct syncache_head *sch;
754 struct socket *so;
755
756 INP_INFO_WLOCK_ASSERT(&tcbinfo);
757
758 sc = syncache_lookup(inc, &sch);
759 if (sc == NULL) {
760 /*
761 * There is no syncache entry, so see if this ACK is
762 * a returning syncookie. To do this, first:
763 * A. See if this socket has had a syncache entry dropped in
764 * the past. We don't want to accept a bogus syncookie
765 * if we've never received a SYN.
766 * B. check that the syncookie is valid. If it is, then
767 * cobble up a fake syncache entry, and return.
768 */
769 if (!tcp_syncookies)
770 return (0);
771 sc = syncookie_lookup(inc, th, *sop);
772 if (sc == NULL)
773 return (0);
774 sch = NULL;
775 tcpstat.tcps_sc_recvcookie++;
776 }
777
778 /*
779 * If seg contains an ACK, but not for our SYN/ACK, send a RST.
780 */
781 if (th->th_ack != sc->sc_iss + 1)
782 return (0);
783
784 so = syncache_socket(sc, *sop, m);
785 if (so == NULL) {
786 #if 0
787 resetandabort:
788 /* XXXjlemon check this - is this correct? */
789 (void) tcp_respond(NULL, m, m, th,
790 th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK);
791 #endif
792 m_freem(m); /* XXX only needed for above */
793 tcpstat.tcps_sc_aborted++;
794 } else
795 tcpstat.tcps_sc_completed++;
796
797 if (sch == NULL)
798 syncache_free(sc);
799 else
800 syncache_drop(sc, sch);
801 *sop = so;
802 return (1);
803 }
804
805 /*
806 * Given a LISTEN socket and an inbound SYN request, add
807 * this to the syn cache, and send back a segment:
808 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
809 * to the source.
810 *
811 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
812 * Doing so would require that we hold onto the data and deliver it
813 * to the application. However, if we are the target of a SYN-flood
814 * DoS attack, an attacker could send data which would eventually
815 * consume all available buffer space if it were ACKed. By not ACKing
816 * the data, we avoid this DoS scenario.
817 */
818 int
819 syncache_add(inc, to, th, sop, m)
820 struct in_conninfo *inc;
821 struct tcpopt *to;
822 struct tcphdr *th;
823 struct socket **sop;
824 struct mbuf *m;
825 {
826 struct tcpcb *tp;
827 struct socket *so;
828 struct syncache *sc = NULL;
829 struct syncache_head *sch;
830 struct mbuf *ipopts = NULL;
831 struct rmxp_tao tao;
832 int i, win;
833
834 INP_INFO_WLOCK_ASSERT(&tcbinfo);
835
836 so = *sop;
837 tp = sototcpcb(so);
838 bzero(&tao, sizeof(tao));
839
840 /*
841 * Remember the IP options, if any.
842 */
843 #ifdef INET6
844 if (!inc->inc_isipv6)
845 #endif
846 ipopts = ip_srcroute();
847
848 /*
849 * See if we already have an entry for this connection.
850 * If we do, resend the SYN,ACK, and reset the retransmit timer.
851 *
852 * XXX
853 * should the syncache be re-initialized with the contents
854 * of the new SYN here (which may have different options?)
855 */
856 sc = syncache_lookup(inc, &sch);
857 if (sc != NULL) {
858 tcpstat.tcps_sc_dupsyn++;
859 if (ipopts) {
860 /*
861 * If we were remembering a previous source route,
862 * forget it and use the new one we've been given.
863 */
864 if (sc->sc_ipopts)
865 (void) m_free(sc->sc_ipopts);
866 sc->sc_ipopts = ipopts;
867 }
868 /*
869 * Update timestamp if present.
870 */
871 if (sc->sc_flags & SCF_TIMESTAMP)
872 sc->sc_tsrecent = to->to_tsval;
873 /*
874 * PCB may have changed, pick up new values.
875 */
876 sc->sc_tp = tp;
877 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
878 #ifdef TCPDEBUG
879 if (syncache_respond(sc, m, so) == 0) {
880 #else
881 if (syncache_respond(sc, m) == 0) {
882 #endif
883 /* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */
884 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot],
885 sc, sc_timerq);
886 SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot);
887 tcpstat.tcps_sndacks++;
888 tcpstat.tcps_sndtotal++;
889 }
890 *sop = NULL;
891 return (1);
892 }
893
894 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
895 if (sc == NULL) {
896 /*
897 * The zone allocator couldn't provide more entries.
898 * Treat this as if the cache was full; drop the oldest
899 * entry and insert the new one.
900 */
901 /* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */
902 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
903 sc = TAILQ_FIRST(&tcp_syncache.timerq[i]);
904 if (sc != NULL)
905 break;
906 }
907 sc->sc_tp->ts_recent = ticks;
908 syncache_drop(sc, NULL);
909 tcpstat.tcps_sc_zonefail++;
910 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
911 if (sc == NULL) {
912 if (ipopts)
913 (void) m_free(ipopts);
914 return (0);
915 }
916 }
917
918 /*
919 * Fill in the syncache values.
920 */
921 bzero(sc, sizeof(*sc));
922 sc->sc_tp = tp;
923 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
924 sc->sc_ipopts = ipopts;
925 sc->sc_inc.inc_fport = inc->inc_fport;
926 sc->sc_inc.inc_lport = inc->inc_lport;
927 #ifdef INET6
928 sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
929 if (inc->inc_isipv6) {
930 sc->sc_inc.inc6_faddr = inc->inc6_faddr;
931 sc->sc_inc.inc6_laddr = inc->inc6_laddr;
932 } else
933 #endif
934 {
935 sc->sc_inc.inc_faddr = inc->inc_faddr;
936 sc->sc_inc.inc_laddr = inc->inc_laddr;
937 }
938 sc->sc_irs = th->th_seq;
939 sc->sc_flags = 0;
940 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0;
941 if (tcp_syncookies)
942 sc->sc_iss = syncookie_generate(sc);
943 else
944 sc->sc_iss = arc4random();
945
946 /* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */
947 win = sbspace(&so->so_rcv);
948 win = imax(win, 0);
949 win = imin(win, TCP_MAXWIN);
950 sc->sc_wnd = win;
951
952 if (tcp_do_rfc1323) {
953 /*
954 * A timestamp received in a SYN makes
955 * it ok to send timestamp requests and replies.
956 */
957 if (to->to_flags & TOF_TS) {
958 sc->sc_tsrecent = to->to_tsval;
959 sc->sc_flags |= SCF_TIMESTAMP;
960 }
961 if (to->to_flags & TOF_SCALE) {
962 int wscale = 0;
963
964 /* Compute proper scaling value from buffer space */
965 while (wscale < TCP_MAX_WINSHIFT &&
966 (TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat)
967 wscale++;
968 sc->sc_request_r_scale = wscale;
969 sc->sc_requested_s_scale = to->to_requested_s_scale;
970 sc->sc_flags |= SCF_WINSCALE;
971 }
972 }
973 if (tcp_do_rfc1644) {
974 /*
975 * A CC or CC.new option received in a SYN makes
976 * it ok to send CC in subsequent segments.
977 */
978 if (to->to_flags & (TOF_CC|TOF_CCNEW)) {
979 sc->sc_cc_recv = to->to_cc;
980 sc->sc_cc_send = CC_INC(tcp_ccgen);
981 sc->sc_flags |= SCF_CC;
982 }
983 }
984 if (tp->t_flags & TF_NOOPT)
985 sc->sc_flags = SCF_NOOPT;
986
987 /*
988 * XXX
989 * We have the option here of not doing TAO (even if the segment
990 * qualifies) and instead fall back to a normal 3WHS via the syncache.
991 * This allows us to apply synflood protection to TAO-qualifying SYNs
992 * also. However, there should be a hueristic to determine when to
993 * do this, and is not present at the moment.
994 */
995
996 /*
997 * Perform TAO test on incoming CC (SEG.CC) option, if any.
998 * - compare SEG.CC against cached CC from the same host, if any.
999 * - if SEG.CC > chached value, SYN must be new and is accepted
1000 * immediately: save new CC in the cache, mark the socket
1001 * connected, enter ESTABLISHED state, turn on flag to
1002 * send a SYN in the next segment.
1003 * A virtual advertised window is set in rcv_adv to
1004 * initialize SWS prevention. Then enter normal segment
1005 * processing: drop SYN, process data and FIN.
1006 * - otherwise do a normal 3-way handshake.
1007 */
1008 if (tcp_do_rfc1644)
1009 tcp_hc_gettao(&sc->sc_inc, &tao);
1010
1011 if ((to->to_flags & TOF_CC) != 0) {
1012 if (((tp->t_flags & TF_NOPUSH) != 0) &&
1013 sc->sc_flags & SCF_CC && tao.tao_cc != 0 &&
1014 CC_GT(to->to_cc, tao.tao_cc)) {
1015 sc->sc_rxtslot = 0;
1016 so = syncache_socket(sc, *sop, m);
1017 if (so != NULL) {
1018 tao.tao_cc = to->to_cc;
1019 tcp_hc_updatetao(&sc->sc_inc, TCP_HC_TAO_CC,
1020 tao.tao_cc, 0);
1021 *sop = so;
1022 }
1023 syncache_free(sc);
1024 return (so != NULL);
1025 }
1026 } else {
1027 /*
1028 * No CC option, but maybe CC.NEW: invalidate cached value.
1029 */
1030 if (tcp_do_rfc1644) {
1031 tao.tao_cc = 0;
1032 tcp_hc_updatetao(&sc->sc_inc, TCP_HC_TAO_CC,
1033 tao.tao_cc, 0);
1034 }
1035 }
1036
1037 /*
1038 * TAO test failed or there was no CC option,
1039 * do a standard 3-way handshake.
1040 */
1041 #ifdef TCPDEBUG
1042 if (syncache_respond(sc, m, so) == 0) {
1043 #else
1044 if (syncache_respond(sc, m) == 0) {
1045 #endif
1046 syncache_insert(sc, sch);
1047 tcpstat.tcps_sndacks++;
1048 tcpstat.tcps_sndtotal++;
1049 } else {
1050 syncache_free(sc);
1051 tcpstat.tcps_sc_dropped++;
1052 }
1053 *sop = NULL;
1054 return (1);
1055 }
1056
1057 #ifdef TCPDEBUG
1058 static int
1059 syncache_respond(sc, m, so)
1060 struct syncache *sc;
1061 struct mbuf *m;
1062 struct socket *so;
1063 #else
1064 static int
1065 syncache_respond(sc, m)
1066 struct syncache *sc;
1067 struct mbuf *m;
1068 #endif
1069 {
1070 u_int8_t *optp;
1071 int optlen, error;
1072 u_int16_t tlen, hlen, mssopt;
1073 struct ip *ip = NULL;
1074 struct tcphdr *th;
1075 struct inpcb *inp;
1076 #ifdef INET6
1077 struct ip6_hdr *ip6 = NULL;
1078 #endif
1079
1080 hlen =
1081 #ifdef INET6
1082 (sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) :
1083 #endif
1084 sizeof(struct ip);
1085
1086 KASSERT((&sc->sc_inc) != NULL, ("syncache_respond with NULL in_conninfo pointer"));
1087
1088 /* Determine MSS we advertize to other end of connection */
1089 mssopt = tcp_mssopt(&sc->sc_inc);
1090
1091 /* Compute the size of the TCP options. */
1092 if (sc->sc_flags & SCF_NOOPT) {
1093 optlen = 0;
1094 } else {
1095 optlen = TCPOLEN_MAXSEG +
1096 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) +
1097 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) +
1098 ((sc->sc_flags & SCF_CC) ? TCPOLEN_CC_APPA * 2 : 0);
1099 }
1100 tlen = hlen + sizeof(struct tcphdr) + optlen;
1101
1102 /*
1103 * XXX
1104 * assume that the entire packet will fit in a header mbuf
1105 */
1106 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small"));
1107
1108 /*
1109 * XXX shouldn't this reuse the mbuf if possible ?
1110 * Create the IP+TCP header from scratch.
1111 */
1112 if (m)
1113 m_freem(m);
1114
1115 m = m_gethdr(M_DONTWAIT, MT_HEADER);
1116 if (m == NULL)
1117 return (ENOBUFS);
1118 m->m_data += max_linkhdr;
1119 m->m_len = tlen;
1120 m->m_pkthdr.len = tlen;
1121 m->m_pkthdr.rcvif = NULL;
1122 inp = sc->sc_tp->t_inpcb;
1123 INP_LOCK(inp);
1124 #ifdef MAC
1125 mac_create_mbuf_from_socket(inp->inp_socket, m);
1126 #endif
1127
1128 #ifdef INET6
1129 if (sc->sc_inc.inc_isipv6) {
1130 ip6 = mtod(m, struct ip6_hdr *);
1131 ip6->ip6_vfc = IPV6_VERSION;
1132 ip6->ip6_nxt = IPPROTO_TCP;
1133 ip6->ip6_src = sc->sc_inc.inc6_laddr;
1134 ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1135 ip6->ip6_plen = htons(tlen - hlen);
1136 /* ip6_hlim is set after checksum */
1137 /* ip6_flow = ??? */
1138
1139 th = (struct tcphdr *)(ip6 + 1);
1140 } else
1141 #endif
1142 {
1143 ip = mtod(m, struct ip *);
1144 ip->ip_v = IPVERSION;
1145 ip->ip_hl = sizeof(struct ip) >> 2;
1146 ip->ip_len = tlen;
1147 ip->ip_id = 0;
1148 ip->ip_off = 0;
1149 ip->ip_sum = 0;
1150 ip->ip_p = IPPROTO_TCP;
1151 ip->ip_src = sc->sc_inc.inc_laddr;
1152 ip->ip_dst = sc->sc_inc.inc_faddr;
1153 ip->ip_ttl = inp->inp_ip_ttl; /* XXX */
1154 ip->ip_tos = inp->inp_ip_tos; /* XXX */
1155
1156 /*
1157 * See if we should do MTU discovery. Route lookups are
1158 * expensive, so we will only unset the DF bit if:
1159 *
1160 * 1) path_mtu_discovery is disabled
1161 * 2) the SCF_UNREACH flag has been set
1162 */
1163 if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1164 ip->ip_off |= IP_DF;
1165
1166 th = (struct tcphdr *)(ip + 1);
1167 }
1168 th->th_sport = sc->sc_inc.inc_lport;
1169 th->th_dport = sc->sc_inc.inc_fport;
1170
1171 th->th_seq = htonl(sc->sc_iss);
1172 th->th_ack = htonl(sc->sc_irs + 1);
1173 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1174 th->th_x2 = 0;
1175 th->th_flags = TH_SYN|TH_ACK;
1176 th->th_win = htons(sc->sc_wnd);
1177 th->th_urp = 0;
1178
1179 /* Tack on the TCP options. */
1180 if (optlen != 0) {
1181 optp = (u_int8_t *)(th + 1);
1182 *optp++ = TCPOPT_MAXSEG;
1183 *optp++ = TCPOLEN_MAXSEG;
1184 *optp++ = (mssopt >> 8) & 0xff;
1185 *optp++ = mssopt & 0xff;
1186
1187 if (sc->sc_flags & SCF_WINSCALE) {
1188 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 |
1189 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 |
1190 sc->sc_request_r_scale);
1191 optp += 4;
1192 }
1193
1194 if (sc->sc_flags & SCF_TIMESTAMP) {
1195 u_int32_t *lp = (u_int32_t *)(optp);
1196
1197 /* Form timestamp option per appendix A of RFC 1323. */
1198 *lp++ = htonl(TCPOPT_TSTAMP_HDR);
1199 *lp++ = htonl(ticks);
1200 *lp = htonl(sc->sc_tsrecent);
1201 optp += TCPOLEN_TSTAMP_APPA;
1202 }
1203
1204 /*
1205 * Send CC and CC.echo if we received CC from our peer.
1206 */
1207 if (sc->sc_flags & SCF_CC) {
1208 u_int32_t *lp = (u_int32_t *)(optp);
1209
1210 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC));
1211 *lp++ = htonl(sc->sc_cc_send);
1212 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CCECHO));
1213 *lp = htonl(sc->sc_cc_recv);
1214 optp += TCPOLEN_CC_APPA * 2;
1215 }
1216 }
1217
1218 #ifdef INET6
1219 if (sc->sc_inc.inc_isipv6) {
1220 th->th_sum = 0;
1221 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen);
1222 ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
1223 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp);
1224 } else
1225 #endif
1226 {
1227 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1228 htons(tlen - hlen + IPPROTO_TCP));
1229 m->m_pkthdr.csum_flags = CSUM_TCP;
1230 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1231 #ifdef TCPDEBUG
1232 /*
1233 * Trace.
1234 */
1235 if (so != NULL && so->so_options & SO_DEBUG) {
1236 struct tcpcb *tp = sototcpcb(so);
1237 tcp_trace(TA_OUTPUT, tp->t_state, tp,
1238 mtod(m, void *), th, 0);
1239 }
1240 #endif
1241 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, inp);
1242 }
1243 INP_UNLOCK(inp);
1244 return (error);
1245 }
1246
1247 /*
1248 * cookie layers:
1249 *
1250 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .|
1251 * | peer iss |
1252 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .|
1253 * | 0 |(A)| |
1254 * (A): peer mss index
1255 */
1256
1257 /*
1258 * The values below are chosen to minimize the size of the tcp_secret
1259 * table, as well as providing roughly a 16 second lifetime for the cookie.
1260 */
1261
1262 #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */
1263 #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */
1264
1265 #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1)
1266 #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS)
1267 #define SYNCOOKIE_TIMEOUT \
1268 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT))
1269 #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
1270
1271 static struct {
1272 u_int32_t ts_secbits[4];
1273 u_int ts_expire;
1274 } tcp_secret[SYNCOOKIE_NSECRETS];
1275
1276 static int tcp_msstab[] = { 0, 536, 1460, 8960 };
1277
1278 static MD5_CTX syn_ctx;
1279
1280 #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v))
1281
1282 struct md5_add {
1283 u_int32_t laddr, faddr;
1284 u_int32_t secbits[4];
1285 u_int16_t lport, fport;
1286 };
1287
1288 #ifdef CTASSERT
1289 CTASSERT(sizeof(struct md5_add) == 28);
1290 #endif
1291
1292 /*
1293 * Consider the problem of a recreated (and retransmitted) cookie. If the
1294 * original SYN was accepted, the connection is established. The second
1295 * SYN is inflight, and if it arrives with an ISN that falls within the
1296 * receive window, the connection is killed.
1297 *
1298 * However, since cookies have other problems, this may not be worth
1299 * worrying about.
1300 */
1301
1302 static u_int32_t
1303 syncookie_generate(struct syncache *sc)
1304 {
1305 u_int32_t md5_buffer[4];
1306 u_int32_t data;
1307 int idx, i;
1308 struct md5_add add;
1309
1310 /* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */
1311
1312 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK;
1313 if (tcp_secret[idx].ts_expire < ticks) {
1314 for (i = 0; i < 4; i++)
1315 tcp_secret[idx].ts_secbits[i] = arc4random();
1316 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT;
1317 }
1318 for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--)
1319 if (tcp_msstab[data] <= sc->sc_peer_mss)
1320 break;
1321 data = (data << SYNCOOKIE_WNDBITS) | idx;
1322 data ^= sc->sc_irs; /* peer's iss */
1323 MD5Init(&syn_ctx);
1324 #ifdef INET6
1325 if (sc->sc_inc.inc_isipv6) {
1326 MD5Add(sc->sc_inc.inc6_laddr);
1327 MD5Add(sc->sc_inc.inc6_faddr);
1328 add.laddr = 0;
1329 add.faddr = 0;
1330 } else
1331 #endif
1332 {
1333 add.laddr = sc->sc_inc.inc_laddr.s_addr;
1334 add.faddr = sc->sc_inc.inc_faddr.s_addr;
1335 }
1336 add.lport = sc->sc_inc.inc_lport;
1337 add.fport = sc->sc_inc.inc_fport;
1338 add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1339 add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1340 add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1341 add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1342 MD5Add(add);
1343 MD5Final((u_char *)&md5_buffer, &syn_ctx);
1344 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK);
1345 return (data);
1346 }
1347
1348 static struct syncache *
1349 syncookie_lookup(inc, th, so)
1350 struct in_conninfo *inc;
1351 struct tcphdr *th;
1352 struct socket *so;
1353 {
1354 u_int32_t md5_buffer[4];
1355 struct syncache *sc;
1356 u_int32_t data;
1357 int wnd, idx;
1358 struct md5_add add;
1359
1360 /* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */
1361
1362 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */
1363 idx = data & SYNCOOKIE_WNDMASK;
1364 if (tcp_secret[idx].ts_expire < ticks ||
1365 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks)
1366 return (NULL);
1367 MD5Init(&syn_ctx);
1368 #ifdef INET6
1369 if (inc->inc_isipv6) {
1370 MD5Add(inc->inc6_laddr);
1371 MD5Add(inc->inc6_faddr);
1372 add.laddr = 0;
1373 add.faddr = 0;
1374 } else
1375 #endif
1376 {
1377 add.laddr = inc->inc_laddr.s_addr;
1378 add.faddr = inc->inc_faddr.s_addr;
1379 }
1380 add.lport = inc->inc_lport;
1381 add.fport = inc->inc_fport;
1382 add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1383 add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1384 add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1385 add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1386 MD5Add(add);
1387 MD5Final((u_char *)&md5_buffer, &syn_ctx);
1388 data ^= md5_buffer[0];
1389 if ((data & ~SYNCOOKIE_DATAMASK) != 0)
1390 return (NULL);
1391 data = data >> SYNCOOKIE_WNDBITS;
1392
1393 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
1394 if (sc == NULL)
1395 return (NULL);
1396 /*
1397 * Fill in the syncache values.
1398 * XXX duplicate code from syncache_add
1399 */
1400 sc->sc_ipopts = NULL;
1401 sc->sc_inc.inc_fport = inc->inc_fport;
1402 sc->sc_inc.inc_lport = inc->inc_lport;
1403 #ifdef INET6
1404 sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
1405 if (inc->inc_isipv6) {
1406 sc->sc_inc.inc6_faddr = inc->inc6_faddr;
1407 sc->sc_inc.inc6_laddr = inc->inc6_laddr;
1408 } else
1409 #endif
1410 {
1411 sc->sc_inc.inc_faddr = inc->inc_faddr;
1412 sc->sc_inc.inc_laddr = inc->inc_laddr;
1413 }
1414 sc->sc_irs = th->th_seq - 1;
1415 sc->sc_iss = th->th_ack - 1;
1416 wnd = sbspace(&so->so_rcv);
1417 wnd = imax(wnd, 0);
1418 wnd = imin(wnd, TCP_MAXWIN);
1419 sc->sc_wnd = wnd;
1420 sc->sc_flags = 0;
1421 sc->sc_rxtslot = 0;
1422 sc->sc_peer_mss = tcp_msstab[data];
1423 return (sc);
1424 }
Cache object: 87f8d8d25839f05a1a53c8fb4cda5172
|