FreeBSD/Linux Kernel Cross Reference
sys/netinet/ip_sync.c
1 /* $NetBSD: ip_sync.c,v 1.1.1.1 2004/03/28 08:56:52 martti Exp $ */
2
3 /*
4 * Copyright (C) 1995-1998 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if defined(KERNEL) || defined(_KERNEL)
9 # undef KERNEL
10 # undef _KERNEL
11 # define KERNEL 1
12 # define _KERNEL 1
13 #endif
14 #include <sys/errno.h>
15 #include <sys/types.h>
16 #include <sys/param.h>
17 #include <sys/file.h>
18 #if !defined(_KERNEL) && !defined(__KERNEL__)
19 # include <stdio.h>
20 # include <stdlib.h>
21 # include <string.h>
22 # define _KERNEL
23 # define KERNEL
24 # ifdef __OpenBSD__
25 struct file;
26 # endif
27 # include <sys/uio.h>
28 # undef _KERNEL
29 # undef KERNEL
30 #else
31 # include <sys/systm.h>
32 # if !defined(__SVR4) && !defined(__svr4__)
33 # include <sys/mbuf.h>
34 # endif
35 #endif
36 #if defined(__NetBSD__) && (__NetBSD_Version__ >= 104000000)
37 # include <sys/proc.h>
38 #endif
39 #if defined(_KERNEL) && (__FreeBSD_version >= 220000)
40 # include <sys/filio.h>
41 # include <sys/fcntl.h>
42 # if (__FreeBSD_version >= 300000) && !defined(IPFILTER_LKM)
43 # include "opt_ipfilter.h"
44 # endif
45 #else
46 # include <sys/ioctl.h>
47 #endif
48 #include <sys/time.h>
49 #if !defined(linux)
50 # include <sys/protosw.h>
51 #endif
52 #include <sys/socket.h>
53 #if defined(__SVR4) || defined(__svr4__)
54 # include <sys/filio.h>
55 # include <sys/byteorder.h>
56 # ifdef _KERNEL
57 # include <sys/dditypes.h>
58 # endif
59 # include <sys/stream.h>
60 # include <sys/kmem.h>
61 #endif
62
63 #include <net/if.h>
64 #ifdef sun
65 # include <net/af.h>
66 #endif
67 #include <net/route.h>
68 #include <netinet/in.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip.h>
71 #include <netinet/tcp.h>
72 #if !defined(linux)
73 # include <netinet/ip_var.h>
74 #endif
75 #if !defined(__hpux) && !defined(linux)
76 # include <netinet/tcp_fsm.h>
77 #endif
78 #include <netinet/udp.h>
79 #include <netinet/ip_icmp.h>
80 #include "netinet/ip_compat.h"
81 #include <netinet/tcpip.h>
82 #include "netinet/ip_fil.h"
83 #include "netinet/ip_nat.h"
84 #include "netinet/ip_frag.h"
85 #include "netinet/ip_state.h"
86 #include "netinet/ip_proxy.h"
87 #include "netinet/ip_sync.h"
88 #ifdef USE_INET6
89 #include <netinet/icmp6.h>
90 #endif
91 #if (__FreeBSD_version >= 300000)
92 # include <sys/malloc.h>
93 # if defined(_KERNEL) && !defined(IPFILTER_LKM)
94 # include <sys/libkern.h>
95 # include <sys/systm.h>
96 # endif
97 #endif
98 /* END OF INCLUDES */
99
100 #if !defined(lint)
101 static const char rcsid[] = "@(#)Id: ip_sync.c,v 2.40.2.1 2004/03/22 12:21:54 darrenr Exp";
102 #endif
103
104 #define SYNC_STATETABSZ 256
105 #define SYNC_NATTABSZ 256
106
107 #ifdef IPFILTER_SYNC
108 ipfmutex_t ipf_syncadd, ipsl_mutex;
109 ipfrwlock_t ipf_syncstate, ipf_syncnat;
110 #if SOLARIS && defined(_KERNEL)
111 kcondvar_t ipslwait;
112 #endif
113 synclist_t *syncstatetab[SYNC_STATETABSZ];
114 synclist_t *syncnattab[SYNC_NATTABSZ];
115 synclogent_t synclog[SYNCLOG_SZ];
116 syncupdent_t syncupd[SYNCLOG_SZ];
117 u_int ipf_syncnum = 1;
118 u_int ipf_syncwrap = 0;
119 u_int sl_idx = 0, /* next available sync log entry */
120 su_idx = 0, /* next available sync update entry */
121 sl_tail = 0, /* next sync log entry to read */
122 su_tail = 0; /* next sync update entry to read */
123
124
125 # if !defined(sparc) && !defined(__hppa)
126 void ipfsync_tcporder __P((int, struct tcpdata *));
127 void ipfsync_natorder __P((int, struct nat *));
128 void ipfsync_storder __P((int, struct ipstate *));
129 # endif
130
131
132 /* ------------------------------------------------------------------------ */
133 /* Function: ipfsync_init */
134 /* Returns: int - 0 == success, -1 == failure */
135 /* Parameters: Nil */
136 /* */
137 /* Initialise all of the locks required for the sync code and initialise */
138 /* any data structures, as required. */
139 /* ------------------------------------------------------------------------ */
140 int ipfsync_init()
141 {
142 RWLOCK_INIT(&ipf_syncstate, "add things to state sync table");
143 RWLOCK_INIT(&ipf_syncnat, "add things to nat sync table");
144 MUTEX_INIT(&ipf_syncadd, "add things to sync table");
145 MUTEX_INIT(&ipsl_mutex, "add things to sync table");
146 # if SOLARIS && defined(_KERNEL)
147 cv_init(&ipslwait, "ipsl condvar", CV_DRIVER, NULL);
148 # endif
149
150 bzero((char *)syncnattab, sizeof(syncnattab));
151 bzero((char *)syncstatetab, sizeof(syncstatetab));
152
153 return 0;
154 }
155
156
157 # if !defined(sparc) && !defined(__hppa)
158 /* ------------------------------------------------------------------------ */
159 /* Function: ipfsync_tcporder */
160 /* Returns: Nil */
161 /* Parameters: way(I) - direction of byte order conversion. */
162 /* td(IO) - pointer to data to be converted. */
163 /* */
164 /* Do byte swapping on values in the TCP state information structure that */
165 /* need to be used at both ends by the host in their native byte order. */
166 /* ------------------------------------------------------------------------ */
167 void ipfsync_tcporder(way, td)
168 int way;
169 tcpdata_t *td;
170 {
171 if (way) {
172 td->td_maxwin = htons(td->td_maxwin);
173 td->td_end = htonl(td->td_end);
174 td->td_maxend = htonl(td->td_maxend);
175 } else {
176 td->td_maxwin = ntohs(td->td_maxwin);
177 td->td_end = ntohl(td->td_end);
178 td->td_maxend = ntohl(td->td_maxend);
179 }
180 }
181
182
183 /* ------------------------------------------------------------------------ */
184 /* Function: ipfsync_natorder */
185 /* Returns: Nil */
186 /* Parameters: way(I) - direction of byte order conversion. */
187 /* nat(IO) - pointer to data to be converted. */
188 /* */
189 /* Do byte swapping on values in the NAT data structure that need to be */
190 /* used at both ends by the host in their native byte order. */
191 /* ------------------------------------------------------------------------ */
192 void ipfsync_natorder(way, n)
193 int way;
194 nat_t *n;
195 {
196 if (way) {
197 n->nat_age = htonl(n->nat_age);
198 n->nat_flags = htonl(n->nat_flags);
199 n->nat_ipsumd = htonl(n->nat_ipsumd);
200 n->nat_use = htonl(n->nat_use);
201 n->nat_dir = htonl(n->nat_dir);
202 } else {
203 n->nat_age = ntohl(n->nat_age);
204 n->nat_flags = ntohl(n->nat_flags);
205 n->nat_ipsumd = ntohl(n->nat_ipsumd);
206 n->nat_use = ntohl(n->nat_use);
207 n->nat_dir = ntohl(n->nat_dir);
208 }
209 }
210
211
212 /* ------------------------------------------------------------------------ */
213 /* Function: ipfsync_storder */
214 /* Returns: Nil */
215 /* Parameters: way(I) - direction of byte order conversion. */
216 /* ips(IO) - pointer to data to be converted. */
217 /* */
218 /* Do byte swapping on values in the IP state data structure that need to */
219 /* be used at both ends by the host in their native byte order. */
220 /* ------------------------------------------------------------------------ */
221 void ipfsync_storder(way, ips)
222 int way;
223 ipstate_t *ips;
224 {
225 ipfsync_tcporder(way, &ips->is_tcp.ts_data[0]);
226 ipfsync_tcporder(way, &ips->is_tcp.ts_data[1]);
227
228 if (way) {
229 ips->is_hv = htonl(ips->is_hv);
230 ips->is_die = htonl(ips->is_die);
231 ips->is_pass = htonl(ips->is_pass);
232 ips->is_flags = htonl(ips->is_flags);
233 ips->is_opt = htonl(ips->is_opt);
234 ips->is_optmsk = htonl(ips->is_optmsk);
235 ips->is_sec = htons(ips->is_sec);
236 ips->is_secmsk = htons(ips->is_secmsk);
237 ips->is_auth = htons(ips->is_auth);
238 ips->is_authmsk = htons(ips->is_authmsk);
239 ips->is_s0[0] = htonl(ips->is_s0[0]);
240 ips->is_s0[1] = htonl(ips->is_s0[1]);
241 ips->is_smsk[0] = htons(ips->is_smsk[0]);
242 ips->is_smsk[1] = htons(ips->is_smsk[1]);
243 } else {
244 ips->is_hv = ntohl(ips->is_hv);
245 ips->is_die = ntohl(ips->is_die);
246 ips->is_pass = ntohl(ips->is_pass);
247 ips->is_flags = ntohl(ips->is_flags);
248 ips->is_opt = ntohl(ips->is_opt);
249 ips->is_optmsk = ntohl(ips->is_optmsk);
250 ips->is_sec = ntohs(ips->is_sec);
251 ips->is_secmsk = ntohs(ips->is_secmsk);
252 ips->is_auth = ntohs(ips->is_auth);
253 ips->is_authmsk = ntohs(ips->is_authmsk);
254 ips->is_s0[0] = ntohl(ips->is_s0[0]);
255 ips->is_s0[1] = ntohl(ips->is_s0[1]);
256 ips->is_smsk[0] = ntohl(ips->is_smsk[0]);
257 ips->is_smsk[1] = ntohl(ips->is_smsk[1]);
258 }
259 }
260 # else /* !defined(sparc) && !defined(__hppa) */
261 # define ipfsync_tcporder(x,y)
262 # define ipfsync_natorder(x,y)
263 # define ipfsync_storder(x,y)
264 # endif /* !defined(sparc) && !defined(__hppa) */
265
266
267 # ifdef _KERNEL
268 /* ------------------------------------------------------------------------ */
269 /* Function: ipfsync_write */
270 /* Returns: int - 0 == success, else error value. */
271 /* Parameters: uio(I) - pointer to information about data to write */
272 /* */
273 /* Moves data from user space into the kernel and uses it for updating data */
274 /* structures in the state/NAT tables. */
275 /* ------------------------------------------------------------------------ */
276 int ipfsync_write(uio)
277 struct uio *uio;
278 {
279 synchdr_t sh;
280 int err = 0;
281
282 /*
283 * All writes will be in multiples of 4 and at least 8 bytes.
284 */
285 if ((uio->uio_resid & 3) || (uio->uio_resid < 8))
286 return EINVAL;
287
288 while ((uio->uio_resid > 0) &&
289 UIOMOVE((caddr_t)&sh, sizeof(sh), UIO_WRITE, uio) == 0) {
290 sh.sm_num = ntohl(sh.sm_num);
291 if (sh.sm_v != 4 && sh.sm_v != 6)
292 continue;
293
294 if ((sh.sm_cmd > SMC_MAXCMD) || (sh.sm_table > SMC_MAXTBL))
295 continue;
296
297 /*
298 * We currently only synchronise state information and NAT
299 * information - and even then, the NAT information is not
300 * yet sync'd for proxied connections.
301 */
302 if (sh.sm_table == SMC_STATE)
303 err = ipfsync_state(&sh, uio);
304 else if (sh.sm_table == SMC_NAT)
305 err = ipfsync_nat(&sh, uio);
306 if (err)
307 break;
308 }
309
310 return err;
311 }
312
313
314 /* ------------------------------------------------------------------------ */
315 /* Function: ipfsync_read */
316 /* Returns: int - 0 == success, else error value. */
317 /* Parameters: uio(O) - pointer to information about where to store data */
318 /* */
319 /* This function is called when a user program wants to read some data */
320 /* for pending state/NAT updates. If no data is available, the caller is */
321 /* put to sleep, pending a wakeup from the "lower half" of this code. */
322 /* ------------------------------------------------------------------------ */
323 int ipfsync_read(uio)
324 struct uio *uio;
325 {
326 syncupdent_t *su;
327 synclogent_t *sl;
328 int err = 0;
329
330 if ((uio->uio_resid & 3) || (uio->uio_resid < 8))
331 return EINVAL;
332
333 # if (BSD >= 199306) || defined(__FreeBSD__) || defined(__osf__)
334 uio->uio_rw = UIO_READ;
335 # endif
336
337 MUTEX_ENTER(&ipsl_mutex);
338 while ((sl_tail == sl_idx) && (su_tail == su_idx)) {
339 # if SOLARIS && defined(_KERNEL)
340 if (!cv_wait_sig(&ipslwait, &ipsl_mutex)) {
341 MUTEX_EXIT(&ipsl_mutex);
342 return EINTR;
343 }
344 # else
345 # ifdef __hpux
346 {
347 lock_t *l;
348
349 l = get_sleep_lock(&sl_tail);
350 err = sleep(&sl_tail, PZERO+1);
351 spinunlock(l);
352 }
353 # else /* __hpux */
354 # ifdef __osf__
355 err = mpsleep(&sl_tail, PSUSP|PCATCH, "ipl sleep", 0,
356 &ipsl_mutex, MS_LOCK_SIMPLE);
357 # else
358 MUTEX_EXIT(&ipsl_mutex);
359 err = SLEEP(&sl_tail, "ipl sleep");
360 # endif /* __osf__ */
361 # endif /* __hpux */
362 if (err) {
363 MUTEX_EXIT(&ipsl_mutex);
364 return err;
365 }
366 # endif /* SOLARIS */
367 }
368 MUTEX_EXIT(&ipsl_mutex);
369
370 READ_ENTER(&ipf_syncstate);
371 while ((sl_tail < sl_idx) && (uio->uio_resid > sizeof(*sl))) {
372 sl = synclog + sl_tail++;
373 err = UIOMOVE((caddr_t)sl, sizeof(*sl), UIO_READ, uio);
374 if (err != 0)
375 break;
376 }
377
378 while ((su_tail < su_idx) && (uio->uio_resid > sizeof(*su))) {
379 su = syncupd + su_tail;
380 su_tail++;
381 err = UIOMOVE((caddr_t)su, sizeof(*su), UIO_READ, uio);
382 if (err != 0)
383 break;
384 if (su->sup_hdr.sm_sl != NULL)
385 su->sup_hdr.sm_sl->sl_idx = -1;
386 }
387
388 MUTEX_ENTER(&ipf_syncadd);
389 if (su_tail == su_idx)
390 su_tail = su_idx = 0;
391 if (sl_tail == sl_idx)
392 sl_tail = sl_idx = 0;
393 MUTEX_EXIT(&ipf_syncadd);
394 RWLOCK_EXIT(&ipf_syncstate);
395 return err;
396 }
397
398
399 /* ------------------------------------------------------------------------ */
400 /* Function: ipfsync_state */
401 /* Returns: int - 0 == success, else error value. */
402 /* Parameters: sp(I) - pointer to sync packet data header */
403 /* uio(I) - pointer to user data for further information */
404 /* */
405 /* Updates the state table according to information passed in the sync */
406 /* header. As required, more data is fetched from the uio structure but */
407 /* varies depending on the contents of the sync header. This function can */
408 /* create a new state entry or update one. Deletion is left to the state */
409 /* structures being timed out correctly. */
410 /* ------------------------------------------------------------------------ */
411 int ipfsync_state(sp, uio)
412 synchdr_t *sp;
413 struct uio *uio;
414 {
415 synctcp_update_t su;
416 ipstate_t *is, sn;
417 synclist_t *sl;
418 frentry_t *fr;
419 u_int hv;
420 int err;
421
422 # if (BSD >= 199306) || defined(__FreeBSD__) || defined(__osf__)
423 uio->uio_rw = UIO_WRITE;
424 # endif
425 hv = sp->sm_num & (SYNC_STATETABSZ - 1);
426
427 switch (sp->sm_cmd)
428 {
429 case SMC_CREATE :
430 err = UIOMOVE((caddr_t)&sn, sizeof(sn), UIO_WRITE, uio);
431 if (err != 0)
432 break;
433 KMALLOC(is, ipstate_t *);
434 if (is == NULL) {
435 err = ENOMEM;
436 break;
437 }
438
439 KMALLOC(sl, synclist_t *);
440 if (sl == NULL) {
441 err = ENOMEM;
442 KFREE(is);
443 break;
444 }
445
446 bzero((char *)is, offsetof(ipstate_t, is_die));
447 bcopy((char *)&sn.is_die, (char *)&is->is_die,
448 sizeof(*is) - offsetof(ipstate_t, is_die));
449 ipfsync_storder(0, is);
450
451 /*
452 * We need to find the same rule on the slave as was used on
453 * the master to create this state entry.
454 */
455 READ_ENTER(&ipf_mutex);
456 fr = fr_getrulen(IPL_LOGIPF, sn.is_group, sn.is_rulen);
457 if (fr != NULL) {
458 MUTEX_ENTER(&fr->fr_lock);
459 fr->fr_ref++;
460 fr->fr_statecnt++;
461 MUTEX_EXIT(&fr->fr_lock);
462 }
463 RWLOCK_EXIT(&ipf_mutex);
464
465 is->is_rule = fr;
466 is->is_sync = sl;
467
468 sl->sl_idx = -1;
469 sl->sl_ips = is;
470 sl->sl_num = ntohl(sp->sm_num);
471
472 WRITE_ENTER(&ipf_syncstate);
473 WRITE_ENTER(&ipf_state);
474
475 sl->sl_pnext = syncstatetab + hv;
476 sl->sl_next = syncstatetab[hv];
477 if (syncstatetab[hv] != NULL)
478 syncstatetab[hv]->sl_pnext = &sl->sl_next;
479 syncstatetab[hv] = sl;
480 MUTEX_DOWNGRADE(&ipf_syncstate);
481 fr_stinsert(is, sp->sm_rev);
482 /*
483 * Do not initialise the interface pointers for the state
484 * entry as the full complement of interface names may not
485 * be present.
486 *
487 * Put this state entry on its timeout queue.
488 */
489 fr_setstatequeue(is, sp->sm_rev);
490 break;
491
492 case SMC_UPDATE :
493 err = UIOMOVE((caddr_t)&su, sizeof(su), UIO_WRITE, uio);
494 if (err != 0)
495 break;
496
497 READ_ENTER(&ipf_syncstate);
498 for (sl = syncstatetab[hv]; (sl != NULL); sl = sl->sl_next)
499 if (sl->sl_hdr.sm_num == sp->sm_num)
500 break;
501 if (sl == NULL) {
502 RWLOCK_EXIT(&ipf_syncstate);
503 err = ENOENT;
504 break;
505 }
506
507 READ_ENTER(&ipf_state);
508
509 is = sl->sl_ips;
510
511 MUTEX_ENTER(&is->is_lock);
512 switch (sp->sm_p)
513 {
514 case IPPROTO_TCP :
515 is->is_send = su.stu_data[0].td_end;
516 is->is_maxsend = su.stu_data[0].td_maxend;
517 is->is_maxswin = su.stu_data[0].td_maxwin;
518 is->is_state[0] = su.stu_state[0];
519 is->is_dend = su.stu_data[1].td_end;
520 is->is_maxdend = su.stu_data[1].td_maxend;
521 is->is_maxdwin = su.stu_data[1].td_maxwin;
522 is->is_state[1] = su.stu_state[1];
523 break;
524 default :
525 break;
526 }
527 fr_setstatequeue(is, sp->sm_rev);
528 MUTEX_EXIT(&is->is_lock);
529 break;
530
531 default :
532 err = EINVAL;
533 break;
534 }
535
536 if (err == 0) {
537 RWLOCK_EXIT(&ipf_state);
538 RWLOCK_EXIT(&ipf_syncstate);
539 }
540 return err;
541 }
542 # endif /* _KERNEL */
543
544
545 /* ------------------------------------------------------------------------ */
546 /* Function: ipfsync_del */
547 /* Returns: Nil */
548 /* Parameters: sl(I) - pointer to synclist object to delete */
549 /* */
550 /* Deletes an object from the synclist table and free's its memory. */
551 /* ------------------------------------------------------------------------ */
552 void ipfsync_del(sl)
553 synclist_t *sl;
554 {
555 WRITE_ENTER(&ipf_syncstate);
556 *sl->sl_pnext = sl->sl_next;
557 if (sl->sl_next != NULL)
558 sl->sl_next->sl_pnext = sl->sl_pnext;
559 if (sl->sl_idx != -1)
560 syncupd[sl->sl_idx].sup_hdr.sm_sl = NULL;
561 RWLOCK_EXIT(&ipf_syncstate);
562 KFREE(sl);
563 }
564
565
566 /* ------------------------------------------------------------------------ */
567 /* Function: ipfsync_nat */
568 /* Returns: int - 0 == success, else error value. */
569 /* Parameters: sp(I) - pointer to sync packet data header */
570 /* uio(I) - pointer to user data for further information */
571 /* */
572 /* Updates the NAT table according to information passed in the sync */
573 /* header. As required, more data is fetched from the uio structure but */
574 /* varies depending on the contents of the sync header. This function can */
575 /* create a new NAT entry or update one. Deletion is left to the NAT */
576 /* structures being timed out correctly. */
577 /* ------------------------------------------------------------------------ */
578 int ipfsync_nat(sp, uio)
579 synchdr_t *sp;
580 struct uio *uio;
581 {
582 synclogent_t sle;
583 syncupdent_t su;
584 nat_t *n, *nat;
585 synclist_t *sl;
586 u_int hv = 0;
587 int err;
588
589 # if (BSD >= 199306) || defined(__FreeBSD__) || defined(__osf__)
590 uio->uio_rw = UIO_WRITE;
591 # endif
592
593 READ_ENTER(&ipf_syncstate);
594
595 switch (sp->sm_cmd)
596 {
597 case SMC_CREATE :
598 err = UIOMOVE((caddr_t)&sle, sizeof(sle), UIO_WRITE, uio);
599 if (err != 0)
600 break;
601
602 KMALLOC(n, nat_t *);
603 if (n == NULL) {
604 err = ENOMEM;
605 break;
606 }
607
608 KMALLOC(sl, synclist_t *);
609 if (sl == NULL) {
610 err = ENOMEM;
611 KFREE(n);
612 break;
613 }
614
615 WRITE_ENTER(&ipf_nat);
616
617 nat = &sle.sle_un.sleu_ipn;
618 bzero((char *)n, offsetof(nat_t, nat_age));
619 bcopy((char *)&nat->nat_age, (char *)&n->nat_age,
620 sizeof(*n) - offsetof(nat_t, nat_age));
621 ipfsync_natorder(0, n);
622 n->nat_sync = sl;
623
624 sl->sl_idx = -1;
625 sl->sl_ipn = n;
626 sl->sl_num = ntohl(sp->sm_num);
627 sl->sl_pnext = syncstatetab + hv;
628 sl->sl_next = syncstatetab[hv];
629 if (syncstatetab[hv] != NULL)
630 syncstatetab[hv]->sl_pnext = &sl->sl_next;
631 syncstatetab[hv] = sl;
632 nat_insert(n, sl->sl_rev);
633 RWLOCK_EXIT(&ipf_nat);
634 break;
635
636 case SMC_UPDATE :
637 err = UIOMOVE((caddr_t)&su, sizeof(su), UIO_WRITE, uio);
638 if (err != 0)
639 break;
640
641 READ_ENTER(&ipf_syncstate);
642 for (sl = syncstatetab[hv]; (sl != NULL); sl = sl->sl_next)
643 if (sl->sl_hdr.sm_num == sp->sm_num)
644 break;
645 if (sl == NULL) {
646 err = ENOENT;
647 break;
648 }
649
650 READ_ENTER(&ipf_nat);
651
652 nat = sl->sl_ipn;
653
654 MUTEX_ENTER(&nat->nat_lock);
655 fr_setnatqueue(nat, sl->sl_rev);
656 MUTEX_EXIT(&nat->nat_lock);
657
658 RWLOCK_EXIT(&ipf_nat);
659
660 break;
661
662 default :
663 err = EINVAL;
664 break;
665 }
666
667 RWLOCK_EXIT(&ipf_syncstate);
668 return 0;
669 }
670
671
672 /* ------------------------------------------------------------------------ */
673 /* Function: ipfsync_new */
674 /* Returns: synclist_t* - NULL == failure, else pointer to new synclist */
675 /* data structure. */
676 /* Parameters: tab(I) - type of synclist_t to create */
677 /* fin(I) - pointer to packet information */
678 /* ptr(I) - pointer to owning object */
679 /* */
680 /* Creates a new sync table entry and notifies any sleepers that it's there */
681 /* waiting to be processed. */
682 /* ------------------------------------------------------------------------ */
683 synclist_t *ipfsync_new(tab, fin, ptr)
684 int tab;
685 fr_info_t *fin;
686 void *ptr;
687 {
688 synclist_t *sl, *ss;
689 synclogent_t *sle;
690 u_int hv, sz;
691
692 if (sl_idx == SYNCLOG_SZ)
693 return NULL;
694 KMALLOC(sl, synclist_t *);
695 if (sl == NULL)
696 return NULL;
697
698 MUTEX_ENTER(&ipf_syncadd);
699 /*
700 * Get a unique number for this synclist_t. The number is only meant
701 * to be unique for the lifetime of the structure and may be reused
702 * later.
703 */
704 ipf_syncnum++;
705 if (ipf_syncnum == 0) {
706 ipf_syncnum = 1;
707 ipf_syncwrap = 1;
708 }
709
710 hv = ipf_syncnum & (SYNC_STATETABSZ - 1);
711 while (ipf_syncwrap != 0) {
712 for (ss = syncstatetab[hv]; ss; ss = ss->sl_next)
713 if (ss->sl_hdr.sm_num == ipf_syncnum)
714 break;
715 if (ss == NULL)
716 break;
717 ipf_syncnum++;
718 hv = ipf_syncnum & (SYNC_STATETABSZ - 1);
719 }
720 /*
721 * Use the synch number of the object as the hash key. Should end up
722 * with relatively even distribution over time.
723 * XXX - an attacker could lunch an DoS attack, of sorts, if they are
724 * the only one causing new table entries by only keeping open every
725 * nth connection they make, where n is a value in the interval
726 * [0, SYNC_STATETABSZ-1].
727 */
728 sl->sl_pnext = syncstatetab + hv;
729 sl->sl_next = syncstatetab[hv];
730 syncstatetab[hv] = sl;
731 sl->sl_num = ipf_syncnum;
732 MUTEX_EXIT(&ipf_syncadd);
733
734 sl->sl_v = fin->fin_v;
735 sl->sl_p = fin->fin_p;
736 sl->sl_cmd = SMC_CREATE;
737 sl->sl_idx = -1;
738 sl->sl_table = tab;
739 sl->sl_rev = fin->fin_rev;
740 if (tab == SMC_STATE) {
741 sl->sl_ips = ptr;
742 sz = sizeof(*sl->sl_ips);
743 } else if (tab == SMC_NAT) {
744 sl->sl_ipn = ptr;
745 sz = sizeof(*sl->sl_ipn);
746 } else {
747 ptr = NULL;
748 sz = 0;
749 }
750
751 /*
752 * Create the log entry to be read by a user daemon. When it has been
753 * finished and put on the queue, send a signal to wakeup any waiters.
754 */
755 MUTEX_ENTER(&ipf_syncadd);
756 sle = synclog + sl_idx++;
757 bcopy((char *)&sl->sl_hdr, (char *)&sle->sle_hdr,
758 sizeof(sle->sle_hdr));
759 sle->sle_hdr.sm_num = htonl(sle->sle_hdr.sm_num);
760 if (ptr != NULL) {
761 bcopy((char *)ptr, (char *)&sle->sle_un, sz);
762 if (tab == SMC_STATE) {
763 ipfsync_storder(1, &sle->sle_un.sleu_ips);
764 } else if (tab == SMC_NAT) {
765 ipfsync_natorder(1, &sle->sle_un.sleu_ipn);
766 }
767 }
768 MUTEX_EXIT(&ipf_syncadd);
769
770 MUTEX_ENTER(&ipsl_mutex);
771 # if SOLARIS
772 # ifdef _KERNEL
773 cv_signal(&ipslwait);
774 # endif
775 MUTEX_EXIT(&ipsl_mutex);
776 # else
777 MUTEX_EXIT(&ipsl_mutex);
778 # ifdef _KERNEL
779 wakeup(&sl_tail);
780 # endif
781 # endif
782 return sl;
783 }
784
785
786 /* ------------------------------------------------------------------------ */
787 /* Function: ipfsync_update */
788 /* Returns: Nil */
789 /* Parameters: tab(I) - type of synclist_t to create */
790 /* fin(I) - pointer to packet information */
791 /* sl(I) - pointer to synchronisation object */
792 /* */
793 /* For outbound packets, only, create an sync update record for the user */
794 /* process to read. */
795 /* ------------------------------------------------------------------------ */
796 void ipfsync_update(tab, fin, sl)
797 int tab;
798 fr_info_t *fin;
799 synclist_t *sl;
800 {
801 synctcp_update_t *st;
802 syncupdent_t *slu;
803 ipstate_t *ips;
804 nat_t *nat;
805
806 if (fin->fin_out == 0 || sl == NULL)
807 return;
808
809 WRITE_ENTER(&ipf_syncstate);
810 MUTEX_ENTER(&ipf_syncadd);
811 if (sl->sl_idx == -1) {
812 slu = syncupd + su_idx;
813 sl->sl_idx = su_idx++;
814 bcopy((char *)&sl->sl_hdr, (char *)&slu->sup_hdr,
815 sizeof(slu->sup_hdr));
816 slu->sup_hdr.sm_sl = sl;
817 slu->sup_hdr.sm_cmd = SMC_UPDATE;
818 slu->sup_hdr.sm_table = tab;
819 slu->sup_hdr.sm_num = htonl(sl->sl_num);
820 slu->sup_hdr.sm_rev = fin->fin_rev;
821 # if 0
822 if (fin->fin_p == IPPROTO_TCP) {
823 st->stu_len[0] = 0;
824 st->stu_len[1] = 0;
825 }
826 # endif
827 } else
828 slu = syncupd + sl->sl_idx;
829 MUTEX_EXIT(&ipf_syncadd);
830 MUTEX_DOWNGRADE(&ipf_syncstate);
831
832 /*
833 * Only TCP has complex timeouts, others just use default timeouts.
834 * For TCP, we only need to track the connection state and window.
835 */
836 if (fin->fin_p == IPPROTO_TCP) {
837 st = &slu->sup_tcp;
838 if (tab == SMC_STATE) {
839 ips = sl->sl_ips;
840 st->stu_age = htonl(ips->is_die);
841 st->stu_data[0].td_end = ips->is_send;
842 st->stu_data[0].td_maxend = ips->is_maxsend;
843 st->stu_data[0].td_maxwin = ips->is_maxswin;
844 st->stu_state[0] = ips->is_state[0];
845 st->stu_data[1].td_end = ips->is_dend;
846 st->stu_data[1].td_maxend = ips->is_maxdend;
847 st->stu_data[1].td_maxwin = ips->is_maxdwin;
848 st->stu_state[1] = ips->is_state[1];
849 } else if (tab == SMC_NAT) {
850 nat = sl->sl_ipn;
851 st->stu_age = htonl(nat->nat_age);
852 }
853 }
854 RWLOCK_EXIT(&ipf_syncstate);
855
856 MUTEX_ENTER(&ipsl_mutex);
857 # if SOLARIS
858 # ifdef _KERNEL
859 cv_signal(&ipslwait);
860 # endif
861 MUTEX_EXIT(&ipsl_mutex);
862 # else
863 MUTEX_EXIT(&ipsl_mutex);
864 # ifdef _KERNEL
865 wakeup(&sl_tail);
866 # endif
867 # endif
868 }
869
870
871 /* ------------------------------------------------------------------------ */
872 /* Function: fr_sync_ioctl */
873 /* Returns: int - 0 == success, != 0 == failure */
874 /* Parameters: data(I) - pointer to ioctl data */
875 /* cmd(I) - ioctl command integer */
876 /* mode(I) - file mode bits used with open */
877 /* */
878 /* This function currently does not handle any ioctls and so just returns */
879 /* EINVAL on all occasions. */
880 /* ------------------------------------------------------------------------ */
881 int fr_sync_ioctl(data, cmd, mode)
882 caddr_t data;
883 ioctlcmd_t cmd;
884 int mode;
885 {
886 return EINVAL;
887 }
888 #endif /* IPFILTER_SYNC */
Cache object: d04d6a63ef3607754c72082dc48644a3
|