The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netpfil/pf/if_pfsync.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND ISC)
    3  *
    4  * Copyright (c) 2002 Michael Shalayeff
    5  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
   21  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   23  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
   25  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
   26  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   27  * THE POSSIBILITY OF SUCH DAMAGE.
   28  */
   29 
   30 /*-
   31  * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
   32  *
   33  * Permission to use, copy, modify, and distribute this software for any
   34  * purpose with or without fee is hereby granted, provided that the above
   35  * copyright notice and this permission notice appear in all copies.
   36  *
   37  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   38  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   39  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   40  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   41  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   42  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   43  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   44  */
   45 
   46 /*
   47  * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $
   48  *
   49  * Revisions picked from OpenBSD after revision 1.110 import:
   50  * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input()
   51  * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
   52  * 1.120, 1.175 - use monotonic time_uptime
   53  * 1.122 - reduce number of updates for non-TCP sessions
   54  * 1.125, 1.127 - rewrite merge or stale processing
   55  * 1.128 - cleanups
   56  * 1.146 - bzero() mbuf before sparsely filling it with data
   57  * 1.170 - SIOCSIFMTU checks
   58  * 1.126, 1.142 - deferred packets processing
   59  * 1.173 - correct expire time processing
   60  */
   61 
   62 #include <sys/cdefs.h>
   63 __FBSDID("$FreeBSD$");
   64 
   65 #include "opt_inet.h"
   66 #include "opt_inet6.h"
   67 #include "opt_pf.h"
   68 
   69 #include <sys/param.h>
   70 #include <sys/bus.h>
   71 #include <sys/endian.h>
   72 #include <sys/interrupt.h>
   73 #include <sys/kernel.h>
   74 #include <sys/lock.h>
   75 #include <sys/mbuf.h>
   76 #include <sys/module.h>
   77 #include <sys/mutex.h>
   78 #include <sys/nv.h>
   79 #include <sys/priv.h>
   80 #include <sys/smp.h>
   81 #include <sys/socket.h>
   82 #include <sys/sockio.h>
   83 #include <sys/sysctl.h>
   84 #include <sys/syslog.h>
   85 
   86 #include <net/bpf.h>
   87 #include <net/if.h>
   88 #include <net/if_var.h>
   89 #include <net/if_clone.h>
   90 #include <net/if_types.h>
   91 #include <net/vnet.h>
   92 #include <net/pfvar.h>
   93 #include <net/if_pfsync.h>
   94 
   95 #include <netinet/if_ether.h>
   96 #include <netinet/in.h>
   97 #include <netinet/in_var.h>
   98 #include <netinet/ip.h>
   99 #include <netinet/ip_carp.h>
  100 #include <netinet/ip_var.h>
  101 #include <netinet/tcp.h>
  102 #include <netinet/tcp_fsm.h>
  103 #include <netinet/tcp_seq.h>
  104 
  105 #include <netpfil/pf/pfsync_nv.h>
  106 
  107 struct pfsync_bucket;
  108 
  109 union inet_template {
  110         struct ip      ipv4;
  111 };
  112 
  113 #define PFSYNC_MINPKT ( \
  114         sizeof(union inet_template) + \
  115         sizeof(struct pfsync_header) + \
  116         sizeof(struct pfsync_subheader) )
  117 
  118 static int      pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *,
  119                     struct pfsync_state_peer *);
  120 static int      pfsync_in_clr(struct mbuf *, int, int, int);
  121 static int      pfsync_in_ins(struct mbuf *, int, int, int);
  122 static int      pfsync_in_iack(struct mbuf *, int, int, int);
  123 static int      pfsync_in_upd(struct mbuf *, int, int, int);
  124 static int      pfsync_in_upd_c(struct mbuf *, int, int, int);
  125 static int      pfsync_in_ureq(struct mbuf *, int, int, int);
  126 static int      pfsync_in_del(struct mbuf *, int, int, int);
  127 static int      pfsync_in_del_c(struct mbuf *, int, int, int);
  128 static int      pfsync_in_bus(struct mbuf *, int, int, int);
  129 static int      pfsync_in_tdb(struct mbuf *, int, int, int);
  130 static int      pfsync_in_eof(struct mbuf *, int, int, int);
  131 static int      pfsync_in_error(struct mbuf *, int, int, int);
  132 
  133 static int (*pfsync_acts[])(struct mbuf *, int, int, int) = {
  134         pfsync_in_clr,                  /* PFSYNC_ACT_CLR */
  135         pfsync_in_ins,                  /* PFSYNC_ACT_INS */
  136         pfsync_in_iack,                 /* PFSYNC_ACT_INS_ACK */
  137         pfsync_in_upd,                  /* PFSYNC_ACT_UPD */
  138         pfsync_in_upd_c,                /* PFSYNC_ACT_UPD_C */
  139         pfsync_in_ureq,                 /* PFSYNC_ACT_UPD_REQ */
  140         pfsync_in_del,                  /* PFSYNC_ACT_DEL */
  141         pfsync_in_del_c,                /* PFSYNC_ACT_DEL_C */
  142         pfsync_in_error,                /* PFSYNC_ACT_INS_F */
  143         pfsync_in_error,                /* PFSYNC_ACT_DEL_F */
  144         pfsync_in_bus,                  /* PFSYNC_ACT_BUS */
  145         pfsync_in_tdb,                  /* PFSYNC_ACT_TDB */
  146         pfsync_in_eof                   /* PFSYNC_ACT_EOF */
  147 };
  148 
  149 struct pfsync_q {
  150         void            (*write)(struct pf_kstate *, void *);
  151         size_t          len;
  152         u_int8_t        action;
  153 };
  154 
  155 /* we have one of these for every PFSYNC_S_ */
  156 static void     pfsync_out_state(struct pf_kstate *, void *);
  157 static void     pfsync_out_iack(struct pf_kstate *, void *);
  158 static void     pfsync_out_upd_c(struct pf_kstate *, void *);
  159 static void     pfsync_out_del(struct pf_kstate *, void *);
  160 
  161 static struct pfsync_q pfsync_qs[] = {
  162         { pfsync_out_state, sizeof(struct pfsync_state),   PFSYNC_ACT_INS },
  163         { pfsync_out_iack,  sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
  164         { pfsync_out_state, sizeof(struct pfsync_state),   PFSYNC_ACT_UPD },
  165         { pfsync_out_upd_c, sizeof(struct pfsync_upd_c),   PFSYNC_ACT_UPD_C },
  166         { pfsync_out_del,   sizeof(struct pfsync_del_c),   PFSYNC_ACT_DEL_C }
  167 };
  168 
  169 static void     pfsync_q_ins(struct pf_kstate *, int, bool);
  170 static void     pfsync_q_del(struct pf_kstate *, bool, struct pfsync_bucket *);
  171 
  172 static void     pfsync_update_state(struct pf_kstate *);
  173 
  174 struct pfsync_upd_req_item {
  175         TAILQ_ENTRY(pfsync_upd_req_item)        ur_entry;
  176         struct pfsync_upd_req                   ur_msg;
  177 };
  178 
  179 struct pfsync_deferral {
  180         struct pfsync_softc             *pd_sc;
  181         TAILQ_ENTRY(pfsync_deferral)    pd_entry;
  182         u_int                           pd_refs;
  183         struct callout                  pd_tmo;
  184 
  185         struct pf_kstate                *pd_st;
  186         struct mbuf                     *pd_m;
  187 };
  188 
  189 struct pfsync_sofct;
  190 
  191 struct pfsync_bucket
  192 {
  193         int                     b_id;
  194         struct pfsync_softc     *b_sc;
  195         struct mtx              b_mtx;
  196         struct callout          b_tmo;
  197         int                     b_flags;
  198 #define PFSYNCF_BUCKET_PUSH     0x00000001
  199 
  200         size_t                  b_len;
  201         TAILQ_HEAD(, pf_kstate)                 b_qs[PFSYNC_S_COUNT];
  202         TAILQ_HEAD(, pfsync_upd_req_item)       b_upd_req_list;
  203         TAILQ_HEAD(, pfsync_deferral)           b_deferrals;
  204         u_int                   b_deferred;
  205         void                    *b_plus;
  206         size_t                  b_pluslen;
  207 
  208         struct  ifaltq b_snd;
  209 };
  210 
  211 struct pfsync_softc {
  212         /* Configuration */
  213         struct ifnet            *sc_ifp;
  214         struct ifnet            *sc_sync_if;
  215         struct ip_moptions      sc_imo;
  216         struct sockaddr_storage sc_sync_peer;
  217         uint32_t                sc_flags;
  218         uint8_t                 sc_maxupdates;
  219         union inet_template     sc_template;
  220         struct mtx              sc_mtx;
  221 
  222         /* Queued data */
  223         struct pfsync_bucket    *sc_buckets;
  224 
  225         /* Bulk update info */
  226         struct mtx              sc_bulk_mtx;
  227         uint32_t                sc_ureq_sent;
  228         int                     sc_bulk_tries;
  229         uint32_t                sc_ureq_received;
  230         int                     sc_bulk_hashid;
  231         uint64_t                sc_bulk_stateid;
  232         uint32_t                sc_bulk_creatorid;
  233         struct callout          sc_bulk_tmo;
  234         struct callout          sc_bulkfail_tmo;
  235 };
  236 
  237 #define PFSYNC_LOCK(sc)         mtx_lock(&(sc)->sc_mtx)
  238 #define PFSYNC_UNLOCK(sc)       mtx_unlock(&(sc)->sc_mtx)
  239 #define PFSYNC_LOCK_ASSERT(sc)  mtx_assert(&(sc)->sc_mtx, MA_OWNED)
  240 
  241 #define PFSYNC_BUCKET_LOCK(b)           mtx_lock(&(b)->b_mtx)
  242 #define PFSYNC_BUCKET_UNLOCK(b)         mtx_unlock(&(b)->b_mtx)
  243 #define PFSYNC_BUCKET_LOCK_ASSERT(b)    mtx_assert(&(b)->b_mtx, MA_OWNED)
  244 
  245 #define PFSYNC_BLOCK(sc)        mtx_lock(&(sc)->sc_bulk_mtx)
  246 #define PFSYNC_BUNLOCK(sc)      mtx_unlock(&(sc)->sc_bulk_mtx)
  247 #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED)
  248 
  249 static const char pfsyncname[] = "pfsync";
  250 static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data");
  251 VNET_DEFINE_STATIC(struct pfsync_softc  *, pfsyncif) = NULL;
  252 #define V_pfsyncif              VNET(pfsyncif)
  253 VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL;
  254 #define V_pfsync_swi_cookie     VNET(pfsync_swi_cookie)
  255 VNET_DEFINE_STATIC(struct intr_event *, pfsync_swi_ie);
  256 #define V_pfsync_swi_ie         VNET(pfsync_swi_ie)
  257 VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats);
  258 #define V_pfsyncstats           VNET(pfsyncstats)
  259 VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW;
  260 #define V_pfsync_carp_adj       VNET(pfsync_carp_adj)
  261 
  262 static void     pfsync_timeout(void *);
  263 static void     pfsync_push(struct pfsync_bucket *);
  264 static void     pfsync_push_all(struct pfsync_softc *);
  265 static void     pfsyncintr(void *);
  266 static int      pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *,
  267                     struct in_mfilter *imf);
  268 static void     pfsync_multicast_cleanup(struct pfsync_softc *);
  269 static void     pfsync_pointers_init(void);
  270 static void     pfsync_pointers_uninit(void);
  271 static int      pfsync_init(void);
  272 static void     pfsync_uninit(void);
  273 
  274 static unsigned long pfsync_buckets;
  275 
  276 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
  277     "PFSYNC");
  278 SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW,
  279     &VNET_NAME(pfsyncstats), pfsyncstats,
  280     "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
  281 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW,
  282     &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
  283 SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN,
  284     &pfsync_buckets, 0, "Number of pfsync hash buckets");
  285 
  286 static int      pfsync_clone_create(struct if_clone *, int, caddr_t);
  287 static void     pfsync_clone_destroy(struct ifnet *);
  288 static int      pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
  289                     struct pf_state_peer *);
  290 static int      pfsyncoutput(struct ifnet *, struct mbuf *,
  291                     const struct sockaddr *, struct route *);
  292 static int      pfsyncioctl(struct ifnet *, u_long, caddr_t);
  293 
  294 static int      pfsync_defer(struct pf_kstate *, struct mbuf *);
  295 static void     pfsync_undefer(struct pfsync_deferral *, int);
  296 static void     pfsync_undefer_state(struct pf_kstate *, int);
  297 static void     pfsync_defer_tmo(void *);
  298 
  299 static void     pfsync_request_update(u_int32_t, u_int64_t);
  300 static bool     pfsync_update_state_req(struct pf_kstate *);
  301 
  302 static void     pfsync_drop(struct pfsync_softc *);
  303 static void     pfsync_sendout(int, int);
  304 static void     pfsync_send_plus(void *, size_t);
  305 
  306 static void     pfsync_bulk_start(void);
  307 static void     pfsync_bulk_status(u_int8_t);
  308 static void     pfsync_bulk_update(void *);
  309 static void     pfsync_bulk_fail(void *);
  310 
  311 static void     pfsync_detach_ifnet(struct ifnet *);
  312 
  313 static int pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *,
  314     struct pfsync_kstatus *);
  315 static int pfsync_kstatus_to_softc(struct pfsync_kstatus *,
  316     struct pfsync_softc *);
  317 
  318 #ifdef IPSEC
  319 static void     pfsync_update_net_tdb(struct pfsync_tdb *);
  320 #endif
  321 static struct pfsync_bucket     *pfsync_get_bucket(struct pfsync_softc *,
  322                     struct pf_kstate *);
  323 
  324 #define PFSYNC_MAX_BULKTRIES    12
  325 #define PFSYNC_DEFER_TIMEOUT    ((20 * hz) / 1000)
  326 
  327 VNET_DEFINE(struct if_clone *, pfsync_cloner);
  328 #define V_pfsync_cloner VNET(pfsync_cloner)
  329 
  330 static int
  331 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
  332 {
  333         struct pfsync_softc *sc;
  334         struct ifnet *ifp;
  335         struct pfsync_bucket *b;
  336         int c, q;
  337 
  338         if (unit != 0)
  339                 return (EINVAL);
  340 
  341         if (! pfsync_buckets)
  342                 pfsync_buckets = mp_ncpus * 2;
  343 
  344         sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
  345         sc->sc_flags |= PFSYNCF_OK;
  346         sc->sc_maxupdates = 128;
  347 
  348         ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
  349         if (ifp == NULL) {
  350                 free(sc, M_PFSYNC);
  351                 return (ENOSPC);
  352         }
  353         if_initname(ifp, pfsyncname, unit);
  354         ifp->if_softc = sc;
  355         ifp->if_ioctl = pfsyncioctl;
  356         ifp->if_output = pfsyncoutput;
  357         ifp->if_type = IFT_PFSYNC;
  358         ifp->if_hdrlen = sizeof(struct pfsync_header);
  359         ifp->if_mtu = ETHERMTU;
  360         mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF);
  361         mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF);
  362         callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0);
  363         callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0);
  364 
  365         if_attach(ifp);
  366 
  367         bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
  368 
  369         sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets),
  370             M_PFSYNC, M_ZERO | M_WAITOK);
  371         for (c = 0; c < pfsync_buckets; c++) {
  372                 b = &sc->sc_buckets[c];
  373                 mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF);
  374 
  375                 b->b_id = c;
  376                 b->b_sc = sc;
  377                 b->b_len = PFSYNC_MINPKT;
  378 
  379                 for (q = 0; q < PFSYNC_S_COUNT; q++)
  380                         TAILQ_INIT(&b->b_qs[q]);
  381 
  382                 TAILQ_INIT(&b->b_upd_req_list);
  383                 TAILQ_INIT(&b->b_deferrals);
  384 
  385                 callout_init(&b->b_tmo, 1);
  386 
  387                 b->b_snd.ifq_maxlen = ifqmaxlen;
  388         }
  389 
  390         V_pfsyncif = sc;
  391 
  392         return (0);
  393 }
  394 
  395 static void
  396 pfsync_clone_destroy(struct ifnet *ifp)
  397 {
  398         struct pfsync_softc *sc = ifp->if_softc;
  399         struct pfsync_bucket *b;
  400         int c;
  401 
  402         for (c = 0; c < pfsync_buckets; c++) {
  403                 b = &sc->sc_buckets[c];
  404                 /*
  405                  * At this stage, everything should have already been
  406                  * cleared by pfsync_uninit(), and we have only to
  407                  * drain callouts.
  408                  */
  409                 while (b->b_deferred > 0) {
  410                         struct pfsync_deferral *pd =
  411                             TAILQ_FIRST(&b->b_deferrals);
  412 
  413                         TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
  414                         b->b_deferred--;
  415                         if (callout_stop(&pd->pd_tmo) > 0) {
  416                                 pf_release_state(pd->pd_st);
  417                                 m_freem(pd->pd_m);
  418                                 free(pd, M_PFSYNC);
  419                         } else {
  420                                 pd->pd_refs++;
  421                                 callout_drain(&pd->pd_tmo);
  422                                 free(pd, M_PFSYNC);
  423                         }
  424                 }
  425 
  426                 callout_drain(&b->b_tmo);
  427         }
  428 
  429         callout_drain(&sc->sc_bulkfail_tmo);
  430         callout_drain(&sc->sc_bulk_tmo);
  431 
  432         if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
  433                 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
  434         bpfdetach(ifp);
  435         if_detach(ifp);
  436 
  437         pfsync_drop(sc);
  438 
  439         if_free(ifp);
  440         pfsync_multicast_cleanup(sc);
  441         mtx_destroy(&sc->sc_mtx);
  442         mtx_destroy(&sc->sc_bulk_mtx);
  443 
  444         free(sc->sc_buckets, M_PFSYNC);
  445         free(sc, M_PFSYNC);
  446 
  447         V_pfsyncif = NULL;
  448 }
  449 
  450 static int
  451 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
  452     struct pf_state_peer *d)
  453 {
  454         if (s->scrub.scrub_flag && d->scrub == NULL) {
  455                 d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO);
  456                 if (d->scrub == NULL)
  457                         return (ENOMEM);
  458         }
  459 
  460         return (0);
  461 }
  462 
  463 static int
  464 pfsync_state_import(struct pfsync_state *sp, int flags)
  465 {
  466         struct pfsync_softc *sc = V_pfsyncif;
  467 #ifndef __NO_STRICT_ALIGNMENT
  468         struct pfsync_state_key key[2];
  469 #endif
  470         struct pfsync_state_key *kw, *ks;
  471         struct pf_kstate        *st = NULL;
  472         struct pf_state_key *skw = NULL, *sks = NULL;
  473         struct pf_krule *r = NULL;
  474         struct pfi_kkif *kif;
  475         int error;
  476 
  477         PF_RULES_RASSERT();
  478 
  479         if (sp->creatorid == 0) {
  480                 if (V_pf_status.debug >= PF_DEBUG_MISC)
  481                         printf("%s: invalid creator id: %08x\n", __func__,
  482                             ntohl(sp->creatorid));
  483                 return (EINVAL);
  484         }
  485 
  486         if ((kif = pfi_kkif_find(sp->ifname)) == NULL) {
  487                 if (V_pf_status.debug >= PF_DEBUG_MISC)
  488                         printf("%s: unknown interface: %s\n", __func__,
  489                             sp->ifname);
  490                 if (flags & PFSYNC_SI_IOCTL)
  491                         return (EINVAL);
  492                 return (0);     /* skip this state */
  493         }
  494 
  495         /*
  496          * If the ruleset checksums match or the state is coming from the ioctl,
  497          * it's safe to associate the state with the rule of that number.
  498          */
  499         if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
  500             (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
  501             pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
  502                 r = pf_main_ruleset.rules[
  503                     PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
  504         else
  505                 r = &V_pf_default_rule;
  506 
  507         if ((r->max_states &&
  508             counter_u64_fetch(r->states_cur) >= r->max_states))
  509                 goto cleanup;
  510 
  511         /*
  512          * XXXGL: consider M_WAITOK in ioctl path after.
  513          */
  514         st = pf_alloc_state(M_NOWAIT);
  515         if (__predict_false(st == NULL))
  516                 goto cleanup;
  517 
  518         if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL)
  519                 goto cleanup;
  520 
  521 #ifndef __NO_STRICT_ALIGNMENT
  522         bcopy(&sp->key, key, sizeof(struct pfsync_state_key) * 2);
  523         kw = &key[PF_SK_WIRE];
  524         ks = &key[PF_SK_STACK];
  525 #else
  526         kw = &sp->key[PF_SK_WIRE];
  527         ks = &sp->key[PF_SK_STACK];
  528 #endif
  529 
  530         if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->af) ||
  531             PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->af) ||
  532             kw->port[0] != ks->port[0] ||
  533             kw->port[1] != ks->port[1]) {
  534                 sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
  535                 if (sks == NULL)
  536                         goto cleanup;
  537         } else
  538                 sks = skw;
  539 
  540         /* allocate memory for scrub info */
  541         if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
  542             pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
  543                 goto cleanup;
  544 
  545         /* Copy to state key(s). */
  546         skw->addr[0] = kw->addr[0];
  547         skw->addr[1] = kw->addr[1];
  548         skw->port[0] = kw->port[0];
  549         skw->port[1] = kw->port[1];
  550         skw->proto = sp->proto;
  551         skw->af = sp->af;
  552         if (sks != skw) {
  553                 sks->addr[0] = ks->addr[0];
  554                 sks->addr[1] = ks->addr[1];
  555                 sks->port[0] = ks->port[0];
  556                 sks->port[1] = ks->port[1];
  557                 sks->proto = sp->proto;
  558                 sks->af = sp->af;
  559         }
  560 
  561         /* copy to state */
  562         bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
  563         st->creation = time_uptime - ntohl(sp->creation);
  564         st->expire = time_uptime;
  565         if (sp->expire) {
  566                 uint32_t timeout;
  567 
  568                 timeout = r->timeout[sp->timeout];
  569                 if (!timeout)
  570                         timeout = V_pf_default_rule.timeout[sp->timeout];
  571 
  572                 /* sp->expire may have been adaptively scaled by export. */
  573                 st->expire -= timeout - ntohl(sp->expire);
  574         }
  575 
  576         st->direction = sp->direction;
  577         st->log = sp->log;
  578         st->timeout = sp->timeout;
  579         st->state_flags = sp->state_flags;
  580 
  581         st->id = sp->id;
  582         st->creatorid = sp->creatorid;
  583         pf_state_peer_ntoh(&sp->src, &st->src);
  584         pf_state_peer_ntoh(&sp->dst, &st->dst);
  585 
  586         st->rule.ptr = r;
  587         st->nat_rule.ptr = NULL;
  588         st->anchor.ptr = NULL;
  589         st->rt_kif = NULL;
  590 
  591         st->pfsync_time = time_uptime;
  592         st->sync_state = PFSYNC_S_NONE;
  593 
  594         if (!(flags & PFSYNC_SI_IOCTL))
  595                 st->state_flags |= PFSTATE_NOSYNC;
  596 
  597         if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0)
  598                 goto cleanup_state;
  599 
  600         /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
  601         counter_u64_add(r->states_cur, 1);
  602         counter_u64_add(r->states_tot, 1);
  603 
  604         if (!(flags & PFSYNC_SI_IOCTL)) {
  605                 st->state_flags &= ~PFSTATE_NOSYNC;
  606                 if (st->state_flags & PFSTATE_ACK) {
  607                         pfsync_q_ins(st, PFSYNC_S_IACK, true);
  608                         pfsync_push_all(sc);
  609                 }
  610         }
  611         st->state_flags &= ~PFSTATE_ACK;
  612         PF_STATE_UNLOCK(st);
  613 
  614         return (0);
  615 
  616 cleanup:
  617         error = ENOMEM;
  618         if (skw == sks)
  619                 sks = NULL;
  620         if (skw != NULL)
  621                 uma_zfree(V_pf_state_key_z, skw);
  622         if (sks != NULL)
  623                 uma_zfree(V_pf_state_key_z, sks);
  624 
  625 cleanup_state:  /* pf_state_insert() frees the state keys. */
  626         if (st) {
  627                 st->timeout = PFTM_UNLINKED; /* appease an assert */
  628                 pf_free_state(st);
  629         }
  630         return (error);
  631 }
  632 
  633 #ifdef INET
  634 static int
  635 pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused)
  636 {
  637         struct pfsync_softc *sc = V_pfsyncif;
  638         struct mbuf *m = *mp;
  639         struct ip *ip = mtod(m, struct ip *);
  640         struct pfsync_header *ph;
  641         struct pfsync_subheader subh;
  642 
  643         int offset, len, flags = 0;
  644         int rv;
  645         uint16_t count;
  646 
  647         PF_RULES_RLOCK_TRACKER;
  648 
  649         *mp = NULL;
  650         V_pfsyncstats.pfsyncs_ipackets++;
  651 
  652         /* Verify that we have a sync interface configured. */
  653         if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
  654             (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  655                 goto done;
  656 
  657         /* verify that the packet came in on the right interface */
  658         if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
  659                 V_pfsyncstats.pfsyncs_badif++;
  660                 goto done;
  661         }
  662 
  663         if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
  664         if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
  665         /* verify that the IP TTL is 255. */
  666         if (ip->ip_ttl != PFSYNC_DFLTTL) {
  667                 V_pfsyncstats.pfsyncs_badttl++;
  668                 goto done;
  669         }
  670 
  671         offset = ip->ip_hl << 2;
  672         if (m->m_pkthdr.len < offset + sizeof(*ph)) {
  673                 V_pfsyncstats.pfsyncs_hdrops++;
  674                 goto done;
  675         }
  676 
  677         if (offset + sizeof(*ph) > m->m_len) {
  678                 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
  679                         V_pfsyncstats.pfsyncs_hdrops++;
  680                         return (IPPROTO_DONE);
  681                 }
  682                 ip = mtod(m, struct ip *);
  683         }
  684         ph = (struct pfsync_header *)((char *)ip + offset);
  685 
  686         /* verify the version */
  687         if (ph->version != PFSYNC_VERSION) {
  688                 V_pfsyncstats.pfsyncs_badver++;
  689                 goto done;
  690         }
  691 
  692         len = ntohs(ph->len) + offset;
  693         if (m->m_pkthdr.len < len) {
  694                 V_pfsyncstats.pfsyncs_badlen++;
  695                 goto done;
  696         }
  697 
  698         /*
  699          * Trusting pf_chksum during packet processing, as well as seeking
  700          * in interface name tree, require holding PF_RULES_RLOCK().
  701          */
  702         PF_RULES_RLOCK();
  703         if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
  704                 flags = PFSYNC_SI_CKSUM;
  705 
  706         offset += sizeof(*ph);
  707         while (offset <= len - sizeof(subh)) {
  708                 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
  709                 offset += sizeof(subh);
  710 
  711                 if (subh.action >= PFSYNC_ACT_MAX) {
  712                         V_pfsyncstats.pfsyncs_badact++;
  713                         PF_RULES_RUNLOCK();
  714                         goto done;
  715                 }
  716 
  717                 count = ntohs(subh.count);
  718                 V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
  719                 rv = (*pfsync_acts[subh.action])(m, offset, count, flags);
  720                 if (rv == -1) {
  721                         PF_RULES_RUNLOCK();
  722                         return (IPPROTO_DONE);
  723                 }
  724 
  725                 offset += rv;
  726         }
  727         PF_RULES_RUNLOCK();
  728 
  729 done:
  730         m_freem(m);
  731         return (IPPROTO_DONE);
  732 }
  733 #endif
  734 
  735 static int
  736 pfsync_in_clr(struct mbuf *m, int offset, int count, int flags)
  737 {
  738         struct pfsync_clr *clr;
  739         struct mbuf *mp;
  740         int len = sizeof(*clr) * count;
  741         int i, offp;
  742         u_int32_t creatorid;
  743 
  744         mp = m_pulldown(m, offset, len, &offp);
  745         if (mp == NULL) {
  746                 V_pfsyncstats.pfsyncs_badlen++;
  747                 return (-1);
  748         }
  749         clr = (struct pfsync_clr *)(mp->m_data + offp);
  750 
  751         for (i = 0; i < count; i++) {
  752                 creatorid = clr[i].creatorid;
  753 
  754                 if (clr[i].ifname[0] != '\0' &&
  755                     pfi_kkif_find(clr[i].ifname) == NULL)
  756                         continue;
  757 
  758                 for (int i = 0; i <= pf_hashmask; i++) {
  759                         struct pf_idhash *ih = &V_pf_idhash[i];
  760                         struct pf_kstate *s;
  761 relock:
  762                         PF_HASHROW_LOCK(ih);
  763                         LIST_FOREACH(s, &ih->states, entry) {
  764                                 if (s->creatorid == creatorid) {
  765                                         s->state_flags |= PFSTATE_NOSYNC;
  766                                         pf_unlink_state(s);
  767                                         goto relock;
  768                                 }
  769                         }
  770                         PF_HASHROW_UNLOCK(ih);
  771                 }
  772         }
  773 
  774         return (len);
  775 }
  776 
  777 static int
  778 pfsync_in_ins(struct mbuf *m, int offset, int count, int flags)
  779 {
  780         struct mbuf *mp;
  781         struct pfsync_state *sa, *sp;
  782         int len = sizeof(*sp) * count;
  783         int i, offp;
  784 
  785         mp = m_pulldown(m, offset, len, &offp);
  786         if (mp == NULL) {
  787                 V_pfsyncstats.pfsyncs_badlen++;
  788                 return (-1);
  789         }
  790         sa = (struct pfsync_state *)(mp->m_data + offp);
  791 
  792         for (i = 0; i < count; i++) {
  793                 sp = &sa[i];
  794 
  795                 /* Check for invalid values. */
  796                 if (sp->timeout >= PFTM_MAX ||
  797                     sp->src.state > PF_TCPS_PROXY_DST ||
  798                     sp->dst.state > PF_TCPS_PROXY_DST ||
  799                     sp->direction > PF_OUT ||
  800                     (sp->af != AF_INET && sp->af != AF_INET6)) {
  801                         if (V_pf_status.debug >= PF_DEBUG_MISC)
  802                                 printf("%s: invalid value\n", __func__);
  803                         V_pfsyncstats.pfsyncs_badval++;
  804                         continue;
  805                 }
  806 
  807                 if (pfsync_state_import(sp, flags) == ENOMEM)
  808                         /* Drop out, but process the rest of the actions. */
  809                         break;
  810         }
  811 
  812         return (len);
  813 }
  814 
  815 static int
  816 pfsync_in_iack(struct mbuf *m, int offset, int count, int flags)
  817 {
  818         struct pfsync_ins_ack *ia, *iaa;
  819         struct pf_kstate *st;
  820 
  821         struct mbuf *mp;
  822         int len = count * sizeof(*ia);
  823         int offp, i;
  824 
  825         mp = m_pulldown(m, offset, len, &offp);
  826         if (mp == NULL) {
  827                 V_pfsyncstats.pfsyncs_badlen++;
  828                 return (-1);
  829         }
  830         iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
  831 
  832         for (i = 0; i < count; i++) {
  833                 ia = &iaa[i];
  834 
  835                 st = pf_find_state_byid(ia->id, ia->creatorid);
  836                 if (st == NULL)
  837                         continue;
  838 
  839                 if (st->state_flags & PFSTATE_ACK) {
  840                         pfsync_undefer_state(st, 0);
  841                 }
  842                 PF_STATE_UNLOCK(st);
  843         }
  844         /*
  845          * XXX this is not yet implemented, but we know the size of the
  846          * message so we can skip it.
  847          */
  848 
  849         return (count * sizeof(struct pfsync_ins_ack));
  850 }
  851 
  852 static int
  853 pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src,
  854     struct pfsync_state_peer *dst)
  855 {
  856         int sync = 0;
  857 
  858         PF_STATE_LOCK_ASSERT(st);
  859 
  860         /*
  861          * The state should never go backwards except
  862          * for syn-proxy states.  Neither should the
  863          * sequence window slide backwards.
  864          */
  865         if ((st->src.state > src->state &&
  866             (st->src.state < PF_TCPS_PROXY_SRC ||
  867             src->state >= PF_TCPS_PROXY_SRC)) ||
  868 
  869             (st->src.state == src->state &&
  870             SEQ_GT(st->src.seqlo, ntohl(src->seqlo))))
  871                 sync++;
  872         else
  873                 pf_state_peer_ntoh(src, &st->src);
  874 
  875         if ((st->dst.state > dst->state) ||
  876 
  877             (st->dst.state >= TCPS_SYN_SENT &&
  878             SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))))
  879                 sync++;
  880         else
  881                 pf_state_peer_ntoh(dst, &st->dst);
  882 
  883         return (sync);
  884 }
  885 
  886 static int
  887 pfsync_in_upd(struct mbuf *m, int offset, int count, int flags)
  888 {
  889         struct pfsync_softc *sc = V_pfsyncif;
  890         struct pfsync_state *sa, *sp;
  891         struct pf_kstate *st;
  892         int sync;
  893 
  894         struct mbuf *mp;
  895         int len = count * sizeof(*sp);
  896         int offp, i;
  897 
  898         mp = m_pulldown(m, offset, len, &offp);
  899         if (mp == NULL) {
  900                 V_pfsyncstats.pfsyncs_badlen++;
  901                 return (-1);
  902         }
  903         sa = (struct pfsync_state *)(mp->m_data + offp);
  904 
  905         for (i = 0; i < count; i++) {
  906                 sp = &sa[i];
  907 
  908                 /* check for invalid values */
  909                 if (sp->timeout >= PFTM_MAX ||
  910                     sp->src.state > PF_TCPS_PROXY_DST ||
  911                     sp->dst.state > PF_TCPS_PROXY_DST) {
  912                         if (V_pf_status.debug >= PF_DEBUG_MISC) {
  913                                 printf("pfsync_input: PFSYNC_ACT_UPD: "
  914                                     "invalid value\n");
  915                         }
  916                         V_pfsyncstats.pfsyncs_badval++;
  917                         continue;
  918                 }
  919 
  920                 st = pf_find_state_byid(sp->id, sp->creatorid);
  921                 if (st == NULL) {
  922                         /* insert the update */
  923                         if (pfsync_state_import(sp, flags))
  924                                 V_pfsyncstats.pfsyncs_badstate++;
  925                         continue;
  926                 }
  927 
  928                 if (st->state_flags & PFSTATE_ACK) {
  929                         pfsync_undefer_state(st, 1);
  930                 }
  931 
  932                 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
  933                         sync = pfsync_upd_tcp(st, &sp->src, &sp->dst);
  934                 else {
  935                         sync = 0;
  936 
  937                         /*
  938                          * Non-TCP protocol state machine always go
  939                          * forwards
  940                          */
  941                         if (st->src.state > sp->src.state)
  942                                 sync++;
  943                         else
  944                                 pf_state_peer_ntoh(&sp->src, &st->src);
  945                         if (st->dst.state > sp->dst.state)
  946                                 sync++;
  947                         else
  948                                 pf_state_peer_ntoh(&sp->dst, &st->dst);
  949                 }
  950                 if (sync < 2) {
  951                         pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
  952                         pf_state_peer_ntoh(&sp->dst, &st->dst);
  953                         st->expire = time_uptime;
  954                         st->timeout = sp->timeout;
  955                 }
  956                 st->pfsync_time = time_uptime;
  957 
  958                 if (sync) {
  959                         V_pfsyncstats.pfsyncs_stale++;
  960 
  961                         pfsync_update_state(st);
  962                         PF_STATE_UNLOCK(st);
  963                         pfsync_push_all(sc);
  964                         continue;
  965                 }
  966                 PF_STATE_UNLOCK(st);
  967         }
  968 
  969         return (len);
  970 }
  971 
  972 static int
  973 pfsync_in_upd_c(struct mbuf *m, int offset, int count, int flags)
  974 {
  975         struct pfsync_softc *sc = V_pfsyncif;
  976         struct pfsync_upd_c *ua, *up;
  977         struct pf_kstate *st;
  978         int len = count * sizeof(*up);
  979         int sync;
  980         struct mbuf *mp;
  981         int offp, i;
  982 
  983         mp = m_pulldown(m, offset, len, &offp);
  984         if (mp == NULL) {
  985                 V_pfsyncstats.pfsyncs_badlen++;
  986                 return (-1);
  987         }
  988         ua = (struct pfsync_upd_c *)(mp->m_data + offp);
  989 
  990         for (i = 0; i < count; i++) {
  991                 up = &ua[i];
  992 
  993                 /* check for invalid values */
  994                 if (up->timeout >= PFTM_MAX ||
  995                     up->src.state > PF_TCPS_PROXY_DST ||
  996                     up->dst.state > PF_TCPS_PROXY_DST) {
  997                         if (V_pf_status.debug >= PF_DEBUG_MISC) {
  998                                 printf("pfsync_input: "
  999                                     "PFSYNC_ACT_UPD_C: "
 1000                                     "invalid value\n");
 1001                         }
 1002                         V_pfsyncstats.pfsyncs_badval++;
 1003                         continue;
 1004                 }
 1005 
 1006                 st = pf_find_state_byid(up->id, up->creatorid);
 1007                 if (st == NULL) {
 1008                         /* We don't have this state. Ask for it. */
 1009                         PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
 1010                         pfsync_request_update(up->creatorid, up->id);
 1011                         PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
 1012                         continue;
 1013                 }
 1014 
 1015                 if (st->state_flags & PFSTATE_ACK) {
 1016                         pfsync_undefer_state(st, 1);
 1017                 }
 1018 
 1019                 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
 1020                         sync = pfsync_upd_tcp(st, &up->src, &up->dst);
 1021                 else {
 1022                         sync = 0;
 1023 
 1024                         /*
 1025                          * Non-TCP protocol state machine always go
 1026                          * forwards
 1027                          */
 1028                         if (st->src.state > up->src.state)
 1029                                 sync++;
 1030                         else
 1031                                 pf_state_peer_ntoh(&up->src, &st->src);
 1032                         if (st->dst.state > up->dst.state)
 1033                                 sync++;
 1034                         else
 1035                                 pf_state_peer_ntoh(&up->dst, &st->dst);
 1036                 }
 1037                 if (sync < 2) {
 1038                         pfsync_alloc_scrub_memory(&up->dst, &st->dst);
 1039                         pf_state_peer_ntoh(&up->dst, &st->dst);
 1040                         st->expire = time_uptime;
 1041                         st->timeout = up->timeout;
 1042                 }
 1043                 st->pfsync_time = time_uptime;
 1044 
 1045                 if (sync) {
 1046                         V_pfsyncstats.pfsyncs_stale++;
 1047 
 1048                         pfsync_update_state(st);
 1049                         PF_STATE_UNLOCK(st);
 1050                         pfsync_push_all(sc);
 1051                         continue;
 1052                 }
 1053                 PF_STATE_UNLOCK(st);
 1054         }
 1055 
 1056         return (len);
 1057 }
 1058 
 1059 static int
 1060 pfsync_in_ureq(struct mbuf *m, int offset, int count, int flags)
 1061 {
 1062         struct pfsync_upd_req *ur, *ura;
 1063         struct mbuf *mp;
 1064         int len = count * sizeof(*ur);
 1065         int i, offp;
 1066 
 1067         struct pf_kstate *st;
 1068 
 1069         mp = m_pulldown(m, offset, len, &offp);
 1070         if (mp == NULL) {
 1071                 V_pfsyncstats.pfsyncs_badlen++;
 1072                 return (-1);
 1073         }
 1074         ura = (struct pfsync_upd_req *)(mp->m_data + offp);
 1075 
 1076         for (i = 0; i < count; i++) {
 1077                 ur = &ura[i];
 1078 
 1079                 if (ur->id == 0 && ur->creatorid == 0)
 1080                         pfsync_bulk_start();
 1081                 else {
 1082                         st = pf_find_state_byid(ur->id, ur->creatorid);
 1083                         if (st == NULL) {
 1084                                 V_pfsyncstats.pfsyncs_badstate++;
 1085                                 continue;
 1086                         }
 1087                         if (st->state_flags & PFSTATE_NOSYNC) {
 1088                                 PF_STATE_UNLOCK(st);
 1089                                 continue;
 1090                         }
 1091 
 1092                         pfsync_update_state_req(st);
 1093                         PF_STATE_UNLOCK(st);
 1094                 }
 1095         }
 1096 
 1097         return (len);
 1098 }
 1099 
 1100 static int
 1101 pfsync_in_del(struct mbuf *m, int offset, int count, int flags)
 1102 {
 1103         struct mbuf *mp;
 1104         struct pfsync_state *sa, *sp;
 1105         struct pf_kstate *st;
 1106         int len = count * sizeof(*sp);
 1107         int offp, i;
 1108 
 1109         mp = m_pulldown(m, offset, len, &offp);
 1110         if (mp == NULL) {
 1111                 V_pfsyncstats.pfsyncs_badlen++;
 1112                 return (-1);
 1113         }
 1114         sa = (struct pfsync_state *)(mp->m_data + offp);
 1115 
 1116         for (i = 0; i < count; i++) {
 1117                 sp = &sa[i];
 1118 
 1119                 st = pf_find_state_byid(sp->id, sp->creatorid);
 1120                 if (st == NULL) {
 1121                         V_pfsyncstats.pfsyncs_badstate++;
 1122                         continue;
 1123                 }
 1124                 st->state_flags |= PFSTATE_NOSYNC;
 1125                 pf_unlink_state(st);
 1126         }
 1127 
 1128         return (len);
 1129 }
 1130 
 1131 static int
 1132 pfsync_in_del_c(struct mbuf *m, int offset, int count, int flags)
 1133 {
 1134         struct mbuf *mp;
 1135         struct pfsync_del_c *sa, *sp;
 1136         struct pf_kstate *st;
 1137         int len = count * sizeof(*sp);
 1138         int offp, i;
 1139 
 1140         mp = m_pulldown(m, offset, len, &offp);
 1141         if (mp == NULL) {
 1142                 V_pfsyncstats.pfsyncs_badlen++;
 1143                 return (-1);
 1144         }
 1145         sa = (struct pfsync_del_c *)(mp->m_data + offp);
 1146 
 1147         for (i = 0; i < count; i++) {
 1148                 sp = &sa[i];
 1149 
 1150                 st = pf_find_state_byid(sp->id, sp->creatorid);
 1151                 if (st == NULL) {
 1152                         V_pfsyncstats.pfsyncs_badstate++;
 1153                         continue;
 1154                 }
 1155 
 1156                 st->state_flags |= PFSTATE_NOSYNC;
 1157                 pf_unlink_state(st);
 1158         }
 1159 
 1160         return (len);
 1161 }
 1162 
 1163 static int
 1164 pfsync_in_bus(struct mbuf *m, int offset, int count, int flags)
 1165 {
 1166         struct pfsync_softc *sc = V_pfsyncif;
 1167         struct pfsync_bus *bus;
 1168         struct mbuf *mp;
 1169         int len = count * sizeof(*bus);
 1170         int offp;
 1171 
 1172         PFSYNC_BLOCK(sc);
 1173 
 1174         /* If we're not waiting for a bulk update, who cares. */
 1175         if (sc->sc_ureq_sent == 0) {
 1176                 PFSYNC_BUNLOCK(sc);
 1177                 return (len);
 1178         }
 1179 
 1180         mp = m_pulldown(m, offset, len, &offp);
 1181         if (mp == NULL) {
 1182                 PFSYNC_BUNLOCK(sc);
 1183                 V_pfsyncstats.pfsyncs_badlen++;
 1184                 return (-1);
 1185         }
 1186         bus = (struct pfsync_bus *)(mp->m_data + offp);
 1187 
 1188         switch (bus->status) {
 1189         case PFSYNC_BUS_START:
 1190                 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
 1191                     V_pf_limits[PF_LIMIT_STATES].limit /
 1192                     ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
 1193                     sizeof(struct pfsync_state)),
 1194                     pfsync_bulk_fail, sc);
 1195                 if (V_pf_status.debug >= PF_DEBUG_MISC)
 1196                         printf("pfsync: received bulk update start\n");
 1197                 break;
 1198 
 1199         case PFSYNC_BUS_END:
 1200                 if (time_uptime - ntohl(bus->endtime) >=
 1201                     sc->sc_ureq_sent) {
 1202                         /* that's it, we're happy */
 1203                         sc->sc_ureq_sent = 0;
 1204                         sc->sc_bulk_tries = 0;
 1205                         callout_stop(&sc->sc_bulkfail_tmo);
 1206                         if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
 1207                                 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
 1208                                     "pfsync bulk done");
 1209                         sc->sc_flags |= PFSYNCF_OK;
 1210                         if (V_pf_status.debug >= PF_DEBUG_MISC)
 1211                                 printf("pfsync: received valid "
 1212                                     "bulk update end\n");
 1213                 } else {
 1214                         if (V_pf_status.debug >= PF_DEBUG_MISC)
 1215                                 printf("pfsync: received invalid "
 1216                                     "bulk update end: bad timestamp\n");
 1217                 }
 1218                 break;
 1219         }
 1220         PFSYNC_BUNLOCK(sc);
 1221 
 1222         return (len);
 1223 }
 1224 
 1225 static int
 1226 pfsync_in_tdb(struct mbuf *m, int offset, int count, int flags)
 1227 {
 1228         int len = count * sizeof(struct pfsync_tdb);
 1229 
 1230 #if defined(IPSEC)
 1231         struct pfsync_tdb *tp;
 1232         struct mbuf *mp;
 1233         int offp;
 1234         int i;
 1235         int s;
 1236 
 1237         mp = m_pulldown(m, offset, len, &offp);
 1238         if (mp == NULL) {
 1239                 V_pfsyncstats.pfsyncs_badlen++;
 1240                 return (-1);
 1241         }
 1242         tp = (struct pfsync_tdb *)(mp->m_data + offp);
 1243 
 1244         for (i = 0; i < count; i++)
 1245                 pfsync_update_net_tdb(&tp[i]);
 1246 #endif
 1247 
 1248         return (len);
 1249 }
 1250 
 1251 #if defined(IPSEC)
 1252 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
 1253 static void
 1254 pfsync_update_net_tdb(struct pfsync_tdb *pt)
 1255 {
 1256         struct tdb              *tdb;
 1257         int                      s;
 1258 
 1259         /* check for invalid values */
 1260         if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
 1261             (pt->dst.sa.sa_family != AF_INET &&
 1262             pt->dst.sa.sa_family != AF_INET6))
 1263                 goto bad;
 1264 
 1265         tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
 1266         if (tdb) {
 1267                 pt->rpl = ntohl(pt->rpl);
 1268                 pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes);
 1269 
 1270                 /* Neither replay nor byte counter should ever decrease. */
 1271                 if (pt->rpl < tdb->tdb_rpl ||
 1272                     pt->cur_bytes < tdb->tdb_cur_bytes) {
 1273                         goto bad;
 1274                 }
 1275 
 1276                 tdb->tdb_rpl = pt->rpl;
 1277                 tdb->tdb_cur_bytes = pt->cur_bytes;
 1278         }
 1279         return;
 1280 
 1281 bad:
 1282         if (V_pf_status.debug >= PF_DEBUG_MISC)
 1283                 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
 1284                     "invalid value\n");
 1285         V_pfsyncstats.pfsyncs_badstate++;
 1286         return;
 1287 }
 1288 #endif
 1289 
 1290 static int
 1291 pfsync_in_eof(struct mbuf *m, int offset, int count, int flags)
 1292 {
 1293         /* check if we are at the right place in the packet */
 1294         if (offset != m->m_pkthdr.len)
 1295                 V_pfsyncstats.pfsyncs_badlen++;
 1296 
 1297         /* we're done. free and let the caller return */
 1298         m_freem(m);
 1299         return (-1);
 1300 }
 1301 
 1302 static int
 1303 pfsync_in_error(struct mbuf *m, int offset, int count, int flags)
 1304 {
 1305         V_pfsyncstats.pfsyncs_badact++;
 1306 
 1307         m_freem(m);
 1308         return (-1);
 1309 }
 1310 
 1311 static int
 1312 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
 1313         struct route *rt)
 1314 {
 1315         m_freem(m);
 1316         return (0);
 1317 }
 1318 
 1319 /* ARGSUSED */
 1320 static int
 1321 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1322 {
 1323         struct pfsync_softc *sc = ifp->if_softc;
 1324         struct ifreq *ifr = (struct ifreq *)data;
 1325         struct pfsyncreq pfsyncr;
 1326         size_t nvbuflen;
 1327         int error;
 1328         int c;
 1329 
 1330         switch (cmd) {
 1331         case SIOCSIFFLAGS:
 1332                 PFSYNC_LOCK(sc);
 1333                 if (ifp->if_flags & IFF_UP) {
 1334                         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1335                         PFSYNC_UNLOCK(sc);
 1336                         pfsync_pointers_init();
 1337                 } else {
 1338                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1339                         PFSYNC_UNLOCK(sc);
 1340                         pfsync_pointers_uninit();
 1341                 }
 1342                 break;
 1343         case SIOCSIFMTU:
 1344                 if (!sc->sc_sync_if ||
 1345                     ifr->ifr_mtu <= PFSYNC_MINPKT ||
 1346                     ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
 1347                         return (EINVAL);
 1348                 if (ifr->ifr_mtu < ifp->if_mtu) {
 1349                         for (c = 0; c < pfsync_buckets; c++) {
 1350                                 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
 1351                                 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT)
 1352                                         pfsync_sendout(1, c);
 1353                                 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
 1354                         }
 1355                 }
 1356                 ifp->if_mtu = ifr->ifr_mtu;
 1357                 break;
 1358         case SIOCGETPFSYNC:
 1359                 bzero(&pfsyncr, sizeof(pfsyncr));
 1360                 PFSYNC_LOCK(sc);
 1361                 if (sc->sc_sync_if) {
 1362                         strlcpy(pfsyncr.pfsyncr_syncdev,
 1363                             sc->sc_sync_if->if_xname, IFNAMSIZ);
 1364                 }
 1365                 pfsyncr.pfsyncr_syncpeer = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr;
 1366                 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
 1367                 pfsyncr.pfsyncr_defer = sc->sc_flags;
 1368                 PFSYNC_UNLOCK(sc);
 1369                 return (copyout(&pfsyncr, ifr_data_get_ptr(ifr),
 1370                     sizeof(pfsyncr)));
 1371 
 1372         case SIOCGETPFSYNCNV:
 1373             {
 1374                 nvlist_t *nvl_syncpeer;
 1375                 nvlist_t *nvl = nvlist_create(0);
 1376 
 1377                 if (nvl == NULL)
 1378                         return (ENOMEM);
 1379 
 1380                 if (sc->sc_sync_if)
 1381                         nvlist_add_string(nvl, "syncdev", sc->sc_sync_if->if_xname);
 1382                 nvlist_add_number(nvl, "maxupdates", sc->sc_maxupdates);
 1383                 nvlist_add_number(nvl, "flags", sc->sc_flags);
 1384                 if ((nvl_syncpeer = pfsync_sockaddr_to_syncpeer_nvlist(&sc->sc_sync_peer)) != NULL)
 1385                         nvlist_add_nvlist(nvl, "syncpeer", nvl_syncpeer);
 1386 
 1387                 void *packed = NULL;
 1388                 packed = nvlist_pack(nvl, &nvbuflen);
 1389                 if (packed == NULL) {
 1390                         free(packed, M_NVLIST);
 1391                         nvlist_destroy(nvl);
 1392                         return (ENOMEM);
 1393                 }
 1394 
 1395                 if (nvbuflen > ifr->ifr_cap_nv.buf_length) {
 1396                         ifr->ifr_cap_nv.length = nvbuflen;
 1397                         ifr->ifr_cap_nv.buffer = NULL;
 1398                         free(packed, M_NVLIST);
 1399                         nvlist_destroy(nvl);
 1400                         return (EFBIG);
 1401                 }
 1402 
 1403                 ifr->ifr_cap_nv.length = nvbuflen;
 1404                 error = copyout(packed, ifr->ifr_cap_nv.buffer, nvbuflen);
 1405 
 1406                 nvlist_destroy(nvl);
 1407                 nvlist_destroy(nvl_syncpeer);
 1408                 free(packed, M_NVLIST);
 1409                 break;
 1410             }
 1411 
 1412         case SIOCSETPFSYNC:
 1413             {
 1414                 struct pfsync_kstatus status;
 1415 
 1416                 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
 1417                         return (error);
 1418                 if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr,
 1419                     sizeof(pfsyncr))))
 1420                         return (error);
 1421 
 1422                 memset((char *)&status, 0, sizeof(struct pfsync_kstatus));
 1423                 pfsync_pfsyncreq_to_kstatus(&pfsyncr, &status);
 1424 
 1425                 error = pfsync_kstatus_to_softc(&status, sc);
 1426                 return (error);
 1427             }
 1428         case SIOCSETPFSYNCNV:
 1429             {
 1430                 struct pfsync_kstatus status;
 1431                 void *data;
 1432                 nvlist_t *nvl;
 1433 
 1434                 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
 1435                         return (error);
 1436                 if (ifr->ifr_cap_nv.length > IFR_CAP_NV_MAXBUFSIZE)
 1437                         return (EINVAL);
 1438 
 1439                 data = malloc(ifr->ifr_cap_nv.length, M_TEMP, M_WAITOK);
 1440 
 1441                 if ((error = copyin(ifr->ifr_cap_nv.buffer, data,
 1442                     ifr->ifr_cap_nv.length)) != 0) {
 1443                         free(data, M_TEMP);
 1444                         return (error);
 1445                 }
 1446 
 1447                 if ((nvl = nvlist_unpack(data, ifr->ifr_cap_nv.length, 0)) == NULL) {
 1448                         free(data, M_TEMP);
 1449                         return (EINVAL);
 1450                 }
 1451 
 1452                 memset((char *)&status, 0, sizeof(struct pfsync_kstatus));
 1453                 pfsync_nvstatus_to_kstatus(nvl, &status);
 1454 
 1455                 nvlist_destroy(nvl);
 1456                 free(data, M_TEMP);
 1457 
 1458                 error = pfsync_kstatus_to_softc(&status, sc);
 1459                 return (error);
 1460             }
 1461         default:
 1462                 return (ENOTTY);
 1463         }
 1464 
 1465         return (0);
 1466 }
 1467 
 1468 static void
 1469 pfsync_out_state(struct pf_kstate *st, void *buf)
 1470 {
 1471         struct pfsync_state *sp = buf;
 1472 
 1473         pfsync_state_export(sp, st);
 1474 }
 1475 
 1476 static void
 1477 pfsync_out_iack(struct pf_kstate *st, void *buf)
 1478 {
 1479         struct pfsync_ins_ack *iack = buf;
 1480 
 1481         iack->id = st->id;
 1482         iack->creatorid = st->creatorid;
 1483 }
 1484 
 1485 static void
 1486 pfsync_out_upd_c(struct pf_kstate *st, void *buf)
 1487 {
 1488         struct pfsync_upd_c *up = buf;
 1489 
 1490         bzero(up, sizeof(*up));
 1491         up->id = st->id;
 1492         pf_state_peer_hton(&st->src, &up->src);
 1493         pf_state_peer_hton(&st->dst, &up->dst);
 1494         up->creatorid = st->creatorid;
 1495         up->timeout = st->timeout;
 1496 }
 1497 
 1498 static void
 1499 pfsync_out_del(struct pf_kstate *st, void *buf)
 1500 {
 1501         struct pfsync_del_c *dp = buf;
 1502 
 1503         dp->id = st->id;
 1504         dp->creatorid = st->creatorid;
 1505         st->state_flags |= PFSTATE_NOSYNC;
 1506 }
 1507 
 1508 static void
 1509 pfsync_drop(struct pfsync_softc *sc)
 1510 {
 1511         struct pf_kstate *st, *next;
 1512         struct pfsync_upd_req_item *ur;
 1513         struct pfsync_bucket *b;
 1514         int c, q;
 1515 
 1516         for (c = 0; c < pfsync_buckets; c++) {
 1517                 b = &sc->sc_buckets[c];
 1518                 for (q = 0; q < PFSYNC_S_COUNT; q++) {
 1519                         if (TAILQ_EMPTY(&b->b_qs[q]))
 1520                                 continue;
 1521 
 1522                         TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) {
 1523                                 KASSERT(st->sync_state == q,
 1524                                         ("%s: st->sync_state == q",
 1525                                                 __func__));
 1526                                 st->sync_state = PFSYNC_S_NONE;
 1527                                 pf_release_state(st);
 1528                         }
 1529                         TAILQ_INIT(&b->b_qs[q]);
 1530                 }
 1531 
 1532                 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
 1533                         TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
 1534                         free(ur, M_PFSYNC);
 1535                 }
 1536 
 1537                 b->b_len = PFSYNC_MINPKT;
 1538                 b->b_plus = NULL;
 1539         }
 1540 }
 1541 
 1542 static void
 1543 pfsync_sendout(int schedswi, int c)
 1544 {
 1545         struct pfsync_softc *sc = V_pfsyncif;
 1546         struct ifnet *ifp = sc->sc_ifp;
 1547         struct mbuf *m;
 1548         struct pfsync_header *ph;
 1549         struct pfsync_subheader *subh;
 1550         struct pf_kstate *st, *st_next;
 1551         struct pfsync_upd_req_item *ur;
 1552         struct pfsync_bucket *b = &sc->sc_buckets[c];
 1553         int aflen, offset;
 1554         int q, count = 0;
 1555 
 1556         KASSERT(sc != NULL, ("%s: null sc", __func__));
 1557         KASSERT(b->b_len > PFSYNC_MINPKT,
 1558             ("%s: sc_len %zu", __func__, b->b_len));
 1559         PFSYNC_BUCKET_LOCK_ASSERT(b);
 1560 
 1561         if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
 1562                 pfsync_drop(sc);
 1563                 return;
 1564         }
 1565 
 1566         m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR);
 1567         if (m == NULL) {
 1568                 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
 1569                 V_pfsyncstats.pfsyncs_onomem++;
 1570                 return;
 1571         }
 1572         m->m_data += max_linkhdr;
 1573         m->m_len = m->m_pkthdr.len = b->b_len;
 1574 
 1575         /* build the ip header */
 1576         switch (sc->sc_sync_peer.ss_family) {
 1577 #ifdef INET
 1578         case AF_INET:
 1579             {
 1580                 struct ip *ip;
 1581 
 1582                 ip = mtod(m, struct ip *);
 1583                 bcopy(&sc->sc_template.ipv4, ip, sizeof(*ip));
 1584                 aflen = offset = sizeof(*ip);
 1585 
 1586                 ip->ip_len = htons(m->m_pkthdr.len);
 1587                 ip_fillid(ip);
 1588                 break;
 1589             }
 1590 #endif
 1591         default:
 1592                 m_freem(m);
 1593                 return;
 1594         }
 1595 
 1596 
 1597         /* build the pfsync header */
 1598         ph = (struct pfsync_header *)(m->m_data + offset);
 1599         bzero(ph, sizeof(*ph));
 1600         offset += sizeof(*ph);
 1601 
 1602         ph->version = PFSYNC_VERSION;
 1603         ph->len = htons(b->b_len - aflen);
 1604         bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
 1605 
 1606         /* walk the queues */
 1607         for (q = 0; q < PFSYNC_S_COUNT; q++) {
 1608                 if (TAILQ_EMPTY(&b->b_qs[q]))
 1609                         continue;
 1610 
 1611                 subh = (struct pfsync_subheader *)(m->m_data + offset);
 1612                 offset += sizeof(*subh);
 1613 
 1614                 count = 0;
 1615                 TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) {
 1616                         KASSERT(st->sync_state == q,
 1617                                 ("%s: st->sync_state == q",
 1618                                         __func__));
 1619                         /*
 1620                          * XXXGL: some of write methods do unlocked reads
 1621                          * of state data :(
 1622                          */
 1623                         pfsync_qs[q].write(st, m->m_data + offset);
 1624                         offset += pfsync_qs[q].len;
 1625                         st->sync_state = PFSYNC_S_NONE;
 1626                         pf_release_state(st);
 1627                         count++;
 1628                 }
 1629                 TAILQ_INIT(&b->b_qs[q]);
 1630 
 1631                 bzero(subh, sizeof(*subh));
 1632                 subh->action = pfsync_qs[q].action;
 1633                 subh->count = htons(count);
 1634                 V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count;
 1635         }
 1636 
 1637         if (!TAILQ_EMPTY(&b->b_upd_req_list)) {
 1638                 subh = (struct pfsync_subheader *)(m->m_data + offset);
 1639                 offset += sizeof(*subh);
 1640 
 1641                 count = 0;
 1642                 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
 1643                         TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
 1644 
 1645                         bcopy(&ur->ur_msg, m->m_data + offset,
 1646                             sizeof(ur->ur_msg));
 1647                         offset += sizeof(ur->ur_msg);
 1648                         free(ur, M_PFSYNC);
 1649                         count++;
 1650                 }
 1651 
 1652                 bzero(subh, sizeof(*subh));
 1653                 subh->action = PFSYNC_ACT_UPD_REQ;
 1654                 subh->count = htons(count);
 1655                 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count;
 1656         }
 1657 
 1658         /* has someone built a custom region for us to add? */
 1659         if (b->b_plus != NULL) {
 1660                 bcopy(b->b_plus, m->m_data + offset, b->b_pluslen);
 1661                 offset += b->b_pluslen;
 1662 
 1663                 b->b_plus = NULL;
 1664         }
 1665 
 1666         subh = (struct pfsync_subheader *)(m->m_data + offset);
 1667         offset += sizeof(*subh);
 1668 
 1669         bzero(subh, sizeof(*subh));
 1670         subh->action = PFSYNC_ACT_EOF;
 1671         subh->count = htons(1);
 1672         V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++;
 1673 
 1674         /* we're done, let's put it on the wire */
 1675         if (ifp->if_bpf) {
 1676                 m->m_data += aflen;
 1677                 m->m_len = m->m_pkthdr.len = b->b_len - aflen;
 1678                 BPF_MTAP(ifp, m);
 1679                 m->m_data -= aflen;
 1680                 m->m_len = m->m_pkthdr.len = b->b_len;
 1681         }
 1682 
 1683         if (sc->sc_sync_if == NULL) {
 1684                 b->b_len = PFSYNC_MINPKT;
 1685                 m_freem(m);
 1686                 return;
 1687         }
 1688 
 1689         if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
 1690         if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
 1691         b->b_len = PFSYNC_MINPKT;
 1692 
 1693         if (!_IF_QFULL(&b->b_snd))
 1694                 _IF_ENQUEUE(&b->b_snd, m);
 1695         else {
 1696                 m_freem(m);
 1697                 if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1);
 1698         }
 1699         if (schedswi)
 1700                 swi_sched(V_pfsync_swi_cookie, 0);
 1701 }
 1702 
 1703 static void
 1704 pfsync_insert_state(struct pf_kstate *st)
 1705 {
 1706         struct pfsync_softc *sc = V_pfsyncif;
 1707         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
 1708 
 1709         if (st->state_flags & PFSTATE_NOSYNC)
 1710                 return;
 1711 
 1712         if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) ||
 1713             st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
 1714                 st->state_flags |= PFSTATE_NOSYNC;
 1715                 return;
 1716         }
 1717 
 1718         KASSERT(st->sync_state == PFSYNC_S_NONE,
 1719                 ("%s: st->sync_state %u", __func__, st->sync_state));
 1720 
 1721         PFSYNC_BUCKET_LOCK(b);
 1722         if (b->b_len == PFSYNC_MINPKT)
 1723                 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
 1724 
 1725         pfsync_q_ins(st, PFSYNC_S_INS, true);
 1726         PFSYNC_BUCKET_UNLOCK(b);
 1727 
 1728         st->sync_updates = 0;
 1729 }
 1730 
 1731 static int
 1732 pfsync_defer(struct pf_kstate *st, struct mbuf *m)
 1733 {
 1734         struct pfsync_softc *sc = V_pfsyncif;
 1735         struct pfsync_deferral *pd;
 1736         struct pfsync_bucket *b;
 1737 
 1738         if (m->m_flags & (M_BCAST|M_MCAST))
 1739                 return (0);
 1740 
 1741         if (sc == NULL)
 1742                 return (0);
 1743 
 1744         b = pfsync_get_bucket(sc, st);
 1745 
 1746         PFSYNC_LOCK(sc);
 1747 
 1748         if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) ||
 1749             !(sc->sc_flags & PFSYNCF_DEFER)) {
 1750                 PFSYNC_UNLOCK(sc);
 1751                 return (0);
 1752         }
 1753 
 1754         PFSYNC_BUCKET_LOCK(b);
 1755         PFSYNC_UNLOCK(sc);
 1756 
 1757         if (b->b_deferred >= 128)
 1758                 pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0);
 1759 
 1760         pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT);
 1761         if (pd == NULL) {
 1762                 PFSYNC_BUCKET_UNLOCK(b);
 1763                 return (0);
 1764         }
 1765         b->b_deferred++;
 1766 
 1767         m->m_flags |= M_SKIP_FIREWALL;
 1768         st->state_flags |= PFSTATE_ACK;
 1769 
 1770         pd->pd_sc = sc;
 1771         pd->pd_refs = 0;
 1772         pd->pd_st = st;
 1773         pf_ref_state(st);
 1774         pd->pd_m = m;
 1775 
 1776         TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry);
 1777         callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED);
 1778         callout_reset(&pd->pd_tmo, PFSYNC_DEFER_TIMEOUT, pfsync_defer_tmo, pd);
 1779 
 1780         pfsync_push(b);
 1781         PFSYNC_BUCKET_UNLOCK(b);
 1782 
 1783         return (1);
 1784 }
 1785 
 1786 static void
 1787 pfsync_undefer(struct pfsync_deferral *pd, int drop)
 1788 {
 1789         struct pfsync_softc *sc = pd->pd_sc;
 1790         struct mbuf *m = pd->pd_m;
 1791         struct pf_kstate *st = pd->pd_st;
 1792         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
 1793 
 1794         PFSYNC_BUCKET_LOCK_ASSERT(b);
 1795 
 1796         TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
 1797         b->b_deferred--;
 1798         pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
 1799         free(pd, M_PFSYNC);
 1800         pf_release_state(st);
 1801 
 1802         if (drop)
 1803                 m_freem(m);
 1804         else {
 1805                 _IF_ENQUEUE(&b->b_snd, m);
 1806                 pfsync_push(b);
 1807         }
 1808 }
 1809 
 1810 static void
 1811 pfsync_defer_tmo(void *arg)
 1812 {
 1813         struct epoch_tracker et;
 1814         struct pfsync_deferral *pd = arg;
 1815         struct pfsync_softc *sc = pd->pd_sc;
 1816         struct mbuf *m = pd->pd_m;
 1817         struct pf_kstate *st = pd->pd_st;
 1818         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
 1819 
 1820         PFSYNC_BUCKET_LOCK_ASSERT(b);
 1821 
 1822         if (sc->sc_sync_if == NULL)
 1823                 return;
 1824 
 1825         NET_EPOCH_ENTER(et);
 1826         CURVNET_SET(sc->sc_sync_if->if_vnet);
 1827 
 1828         TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
 1829         b->b_deferred--;
 1830         pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
 1831         if (pd->pd_refs == 0)
 1832                 free(pd, M_PFSYNC);
 1833         PFSYNC_BUCKET_UNLOCK(b);
 1834 
 1835         switch (sc->sc_sync_peer.ss_family) {
 1836 #ifdef INET
 1837         case AF_INET:
 1838                 ip_output(m, NULL, NULL, 0, NULL, NULL);
 1839                 break;
 1840 #endif
 1841         }
 1842 
 1843         pf_release_state(st);
 1844 
 1845         CURVNET_RESTORE();
 1846         NET_EPOCH_EXIT(et);
 1847 }
 1848 
 1849 static void
 1850 pfsync_undefer_state(struct pf_kstate *st, int drop)
 1851 {
 1852         struct pfsync_softc *sc = V_pfsyncif;
 1853         struct pfsync_deferral *pd;
 1854         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
 1855 
 1856         PFSYNC_BUCKET_LOCK(b);
 1857 
 1858         TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) {
 1859                  if (pd->pd_st == st) {
 1860                         if (callout_stop(&pd->pd_tmo) > 0)
 1861                                 pfsync_undefer(pd, drop);
 1862 
 1863                         PFSYNC_BUCKET_UNLOCK(b);
 1864                         return;
 1865                 }
 1866         }
 1867         PFSYNC_BUCKET_UNLOCK(b);
 1868 
 1869         panic("%s: unable to find deferred state", __func__);
 1870 }
 1871 
 1872 static struct pfsync_bucket*
 1873 pfsync_get_bucket(struct pfsync_softc *sc, struct pf_kstate *st)
 1874 {
 1875         int c = PF_IDHASH(st) % pfsync_buckets;
 1876         return &sc->sc_buckets[c];
 1877 }
 1878 
 1879 static void
 1880 pfsync_update_state(struct pf_kstate *st)
 1881 {
 1882         struct pfsync_softc *sc = V_pfsyncif;
 1883         bool sync = false, ref = true;
 1884         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
 1885 
 1886         PF_STATE_LOCK_ASSERT(st);
 1887         PFSYNC_BUCKET_LOCK(b);
 1888 
 1889         if (st->state_flags & PFSTATE_ACK)
 1890                 pfsync_undefer_state(st, 0);
 1891         if (st->state_flags & PFSTATE_NOSYNC) {
 1892                 if (st->sync_state != PFSYNC_S_NONE)
 1893                         pfsync_q_del(st, true, b);
 1894                 PFSYNC_BUCKET_UNLOCK(b);
 1895                 return;
 1896         }
 1897 
 1898         if (b->b_len == PFSYNC_MINPKT)
 1899                 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
 1900 
 1901         switch (st->sync_state) {
 1902         case PFSYNC_S_UPD_C:
 1903         case PFSYNC_S_UPD:
 1904         case PFSYNC_S_INS:
 1905                 /* we're already handling it */
 1906 
 1907                 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
 1908                         st->sync_updates++;
 1909                         if (st->sync_updates >= sc->sc_maxupdates)
 1910                                 sync = true;
 1911                 }
 1912                 break;
 1913 
 1914         case PFSYNC_S_IACK:
 1915                 pfsync_q_del(st, false, b);
 1916                 ref = false;
 1917                 /* FALLTHROUGH */
 1918 
 1919         case PFSYNC_S_NONE:
 1920                 pfsync_q_ins(st, PFSYNC_S_UPD_C, ref);
 1921                 st->sync_updates = 0;
 1922                 break;
 1923 
 1924         default:
 1925                 panic("%s: unexpected sync state %d", __func__, st->sync_state);
 1926         }
 1927 
 1928         if (sync || (time_uptime - st->pfsync_time) < 2)
 1929                 pfsync_push(b);
 1930 
 1931         PFSYNC_BUCKET_UNLOCK(b);
 1932 }
 1933 
 1934 static void
 1935 pfsync_request_update(u_int32_t creatorid, u_int64_t id)
 1936 {
 1937         struct pfsync_softc *sc = V_pfsyncif;
 1938         struct pfsync_bucket *b = &sc->sc_buckets[0];
 1939         struct pfsync_upd_req_item *item;
 1940         size_t nlen = sizeof(struct pfsync_upd_req);
 1941 
 1942         PFSYNC_BUCKET_LOCK_ASSERT(b);
 1943 
 1944         /*
 1945          * This code does a bit to prevent multiple update requests for the
 1946          * same state being generated. It searches current subheader queue,
 1947          * but it doesn't lookup into queue of already packed datagrams.
 1948          */
 1949         TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry)
 1950                 if (item->ur_msg.id == id &&
 1951                     item->ur_msg.creatorid == creatorid)
 1952                         return;
 1953 
 1954         item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT);
 1955         if (item == NULL)
 1956                 return; /* XXX stats */
 1957 
 1958         item->ur_msg.id = id;
 1959         item->ur_msg.creatorid = creatorid;
 1960 
 1961         if (TAILQ_EMPTY(&b->b_upd_req_list))
 1962                 nlen += sizeof(struct pfsync_subheader);
 1963 
 1964         if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
 1965                 pfsync_sendout(0, 0);
 1966 
 1967                 nlen = sizeof(struct pfsync_subheader) +
 1968                     sizeof(struct pfsync_upd_req);
 1969         }
 1970 
 1971         TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry);
 1972         b->b_len += nlen;
 1973 
 1974         pfsync_push(b);
 1975 }
 1976 
 1977 static bool
 1978 pfsync_update_state_req(struct pf_kstate *st)
 1979 {
 1980         struct pfsync_softc *sc = V_pfsyncif;
 1981         bool ref = true, full = false;
 1982         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
 1983 
 1984         PF_STATE_LOCK_ASSERT(st);
 1985         PFSYNC_BUCKET_LOCK(b);
 1986 
 1987         if (st->state_flags & PFSTATE_NOSYNC) {
 1988                 if (st->sync_state != PFSYNC_S_NONE)
 1989                         pfsync_q_del(st, true, b);
 1990                 PFSYNC_BUCKET_UNLOCK(b);
 1991                 return (full);
 1992         }
 1993 
 1994         switch (st->sync_state) {
 1995         case PFSYNC_S_UPD_C:
 1996         case PFSYNC_S_IACK:
 1997                 pfsync_q_del(st, false, b);
 1998                 ref = false;
 1999                 /* FALLTHROUGH */
 2000 
 2001         case PFSYNC_S_NONE:
 2002                 pfsync_q_ins(st, PFSYNC_S_UPD, ref);
 2003                 pfsync_push(b);
 2004                 break;
 2005 
 2006         case PFSYNC_S_INS:
 2007         case PFSYNC_S_UPD:
 2008         case PFSYNC_S_DEL:
 2009                 /* we're already handling it */
 2010                 break;
 2011 
 2012         default:
 2013                 panic("%s: unexpected sync state %d", __func__, st->sync_state);
 2014         }
 2015 
 2016         if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(struct pfsync_state))
 2017                 full = true;
 2018 
 2019         PFSYNC_BUCKET_UNLOCK(b);
 2020 
 2021         return (full);
 2022 }
 2023 
 2024 static void
 2025 pfsync_delete_state(struct pf_kstate *st)
 2026 {
 2027         struct pfsync_softc *sc = V_pfsyncif;
 2028         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
 2029         bool ref = true;
 2030 
 2031         PFSYNC_BUCKET_LOCK(b);
 2032         if (st->state_flags & PFSTATE_ACK)
 2033                 pfsync_undefer_state(st, 1);
 2034         if (st->state_flags & PFSTATE_NOSYNC) {
 2035                 if (st->sync_state != PFSYNC_S_NONE)
 2036                         pfsync_q_del(st, true, b);
 2037                 PFSYNC_BUCKET_UNLOCK(b);
 2038                 return;
 2039         }
 2040 
 2041         if (b->b_len == PFSYNC_MINPKT)
 2042                 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
 2043 
 2044         switch (st->sync_state) {
 2045         case PFSYNC_S_INS:
 2046                 /* We never got to tell the world so just forget about it. */
 2047                 pfsync_q_del(st, true, b);
 2048                 break;
 2049 
 2050         case PFSYNC_S_UPD_C:
 2051         case PFSYNC_S_UPD:
 2052         case PFSYNC_S_IACK:
 2053                 pfsync_q_del(st, false, b);
 2054                 ref = false;
 2055                 /* FALLTHROUGH */
 2056 
 2057         case PFSYNC_S_NONE:
 2058                 pfsync_q_ins(st, PFSYNC_S_DEL, ref);
 2059                 break;
 2060 
 2061         default:
 2062                 panic("%s: unexpected sync state %d", __func__, st->sync_state);
 2063         }
 2064 
 2065         PFSYNC_BUCKET_UNLOCK(b);
 2066 }
 2067 
 2068 static void
 2069 pfsync_clear_states(u_int32_t creatorid, const char *ifname)
 2070 {
 2071         struct {
 2072                 struct pfsync_subheader subh;
 2073                 struct pfsync_clr clr;
 2074         } __packed r;
 2075 
 2076         bzero(&r, sizeof(r));
 2077 
 2078         r.subh.action = PFSYNC_ACT_CLR;
 2079         r.subh.count = htons(1);
 2080         V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++;
 2081 
 2082         strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
 2083         r.clr.creatorid = creatorid;
 2084 
 2085         pfsync_send_plus(&r, sizeof(r));
 2086 }
 2087 
 2088 static void
 2089 pfsync_q_ins(struct pf_kstate *st, int q, bool ref)
 2090 {
 2091         struct pfsync_softc *sc = V_pfsyncif;
 2092         size_t nlen = pfsync_qs[q].len;
 2093         struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
 2094 
 2095         PFSYNC_BUCKET_LOCK_ASSERT(b);
 2096 
 2097         KASSERT(st->sync_state == PFSYNC_S_NONE,
 2098                 ("%s: st->sync_state %u", __func__, st->sync_state));
 2099         KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu",
 2100             b->b_len));
 2101 
 2102         if (TAILQ_EMPTY(&b->b_qs[q]))
 2103                 nlen += sizeof(struct pfsync_subheader);
 2104 
 2105         if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
 2106                 pfsync_sendout(1, b->b_id);
 2107 
 2108                 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
 2109         }
 2110 
 2111         b->b_len += nlen;
 2112         TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list);
 2113         st->sync_state = q;
 2114         if (ref)
 2115                 pf_ref_state(st);
 2116 }
 2117 
 2118 static void
 2119 pfsync_q_del(struct pf_kstate *st, bool unref, struct pfsync_bucket *b)
 2120 {
 2121         int q = st->sync_state;
 2122 
 2123         PFSYNC_BUCKET_LOCK_ASSERT(b);
 2124         KASSERT(st->sync_state != PFSYNC_S_NONE,
 2125                 ("%s: st->sync_state != PFSYNC_S_NONE", __func__));
 2126 
 2127         b->b_len -= pfsync_qs[q].len;
 2128         TAILQ_REMOVE(&b->b_qs[q], st, sync_list);
 2129         st->sync_state = PFSYNC_S_NONE;
 2130         if (unref)
 2131                 pf_release_state(st);
 2132 
 2133         if (TAILQ_EMPTY(&b->b_qs[q]))
 2134                 b->b_len -= sizeof(struct pfsync_subheader);
 2135 }
 2136 
 2137 static void
 2138 pfsync_bulk_start(void)
 2139 {
 2140         struct pfsync_softc *sc = V_pfsyncif;
 2141 
 2142         if (V_pf_status.debug >= PF_DEBUG_MISC)
 2143                 printf("pfsync: received bulk update request\n");
 2144 
 2145         PFSYNC_BLOCK(sc);
 2146 
 2147         sc->sc_ureq_received = time_uptime;
 2148         sc->sc_bulk_hashid = 0;
 2149         sc->sc_bulk_stateid = 0;
 2150         pfsync_bulk_status(PFSYNC_BUS_START);
 2151         callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
 2152         PFSYNC_BUNLOCK(sc);
 2153 }
 2154 
 2155 static void
 2156 pfsync_bulk_update(void *arg)
 2157 {
 2158         struct pfsync_softc *sc = arg;
 2159         struct pf_kstate *s;
 2160         int i;
 2161 
 2162         PFSYNC_BLOCK_ASSERT(sc);
 2163         CURVNET_SET(sc->sc_ifp->if_vnet);
 2164 
 2165         /*
 2166          * Start with last state from previous invocation.
 2167          * It may had gone, in this case start from the
 2168          * hash slot.
 2169          */
 2170         s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid);
 2171 
 2172         if (s != NULL)
 2173                 i = PF_IDHASH(s);
 2174         else
 2175                 i = sc->sc_bulk_hashid;
 2176 
 2177         for (; i <= pf_hashmask; i++) {
 2178                 struct pf_idhash *ih = &V_pf_idhash[i];
 2179 
 2180                 if (s != NULL)
 2181                         PF_HASHROW_ASSERT(ih);
 2182                 else {
 2183                         PF_HASHROW_LOCK(ih);
 2184                         s = LIST_FIRST(&ih->states);
 2185                 }
 2186 
 2187                 for (; s; s = LIST_NEXT(s, entry)) {
 2188                         if (s->sync_state == PFSYNC_S_NONE &&
 2189                             s->timeout < PFTM_MAX &&
 2190                             s->pfsync_time <= sc->sc_ureq_received) {
 2191                                 if (pfsync_update_state_req(s)) {
 2192                                         /* We've filled a packet. */
 2193                                         sc->sc_bulk_hashid = i;
 2194                                         sc->sc_bulk_stateid = s->id;
 2195                                         sc->sc_bulk_creatorid = s->creatorid;
 2196                                         PF_HASHROW_UNLOCK(ih);
 2197                                         callout_reset(&sc->sc_bulk_tmo, 1,
 2198                                             pfsync_bulk_update, sc);
 2199                                         goto full;
 2200                                 }
 2201                         }
 2202                 }
 2203                 PF_HASHROW_UNLOCK(ih);
 2204         }
 2205 
 2206         /* We're done. */
 2207         pfsync_bulk_status(PFSYNC_BUS_END);
 2208 full:
 2209         CURVNET_RESTORE();
 2210 }
 2211 
 2212 static void
 2213 pfsync_bulk_status(u_int8_t status)
 2214 {
 2215         struct {
 2216                 struct pfsync_subheader subh;
 2217                 struct pfsync_bus bus;
 2218         } __packed r;
 2219 
 2220         struct pfsync_softc *sc = V_pfsyncif;
 2221 
 2222         bzero(&r, sizeof(r));
 2223 
 2224         r.subh.action = PFSYNC_ACT_BUS;
 2225         r.subh.count = htons(1);
 2226         V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++;
 2227 
 2228         r.bus.creatorid = V_pf_status.hostid;
 2229         r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
 2230         r.bus.status = status;
 2231 
 2232         pfsync_send_plus(&r, sizeof(r));
 2233 }
 2234 
 2235 static void
 2236 pfsync_bulk_fail(void *arg)
 2237 {
 2238         struct pfsync_softc *sc = arg;
 2239         struct pfsync_bucket *b = &sc->sc_buckets[0];
 2240 
 2241         CURVNET_SET(sc->sc_ifp->if_vnet);
 2242 
 2243         PFSYNC_BLOCK_ASSERT(sc);
 2244 
 2245         if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
 2246                 /* Try again */
 2247                 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
 2248                     pfsync_bulk_fail, V_pfsyncif);
 2249                 PFSYNC_BUCKET_LOCK(b);
 2250                 pfsync_request_update(0, 0);
 2251                 PFSYNC_BUCKET_UNLOCK(b);
 2252         } else {
 2253                 /* Pretend like the transfer was ok. */
 2254                 sc->sc_ureq_sent = 0;
 2255                 sc->sc_bulk_tries = 0;
 2256                 PFSYNC_LOCK(sc);
 2257                 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
 2258                         (*carp_demote_adj_p)(-V_pfsync_carp_adj,
 2259                             "pfsync bulk fail");
 2260                 sc->sc_flags |= PFSYNCF_OK;
 2261                 PFSYNC_UNLOCK(sc);
 2262                 if (V_pf_status.debug >= PF_DEBUG_MISC)
 2263                         printf("pfsync: failed to receive bulk update\n");
 2264         }
 2265 
 2266         CURVNET_RESTORE();
 2267 }
 2268 
 2269 static void
 2270 pfsync_send_plus(void *plus, size_t pluslen)
 2271 {
 2272         struct pfsync_softc *sc = V_pfsyncif;
 2273         struct pfsync_bucket *b = &sc->sc_buckets[0];
 2274 
 2275         PFSYNC_BUCKET_LOCK(b);
 2276 
 2277         if (b->b_len + pluslen > sc->sc_ifp->if_mtu)
 2278                 pfsync_sendout(1, b->b_id);
 2279 
 2280         b->b_plus = plus;
 2281         b->b_len += (b->b_pluslen = pluslen);
 2282 
 2283         pfsync_sendout(1, b->b_id);
 2284         PFSYNC_BUCKET_UNLOCK(b);
 2285 }
 2286 
 2287 static void
 2288 pfsync_timeout(void *arg)
 2289 {
 2290         struct pfsync_bucket *b = arg;
 2291 
 2292         CURVNET_SET(b->b_sc->sc_ifp->if_vnet);
 2293         PFSYNC_BUCKET_LOCK(b);
 2294         pfsync_push(b);
 2295         PFSYNC_BUCKET_UNLOCK(b);
 2296         CURVNET_RESTORE();
 2297 }
 2298 
 2299 static void
 2300 pfsync_push(struct pfsync_bucket *b)
 2301 {
 2302 
 2303         PFSYNC_BUCKET_LOCK_ASSERT(b);
 2304 
 2305         b->b_flags |= PFSYNCF_BUCKET_PUSH;
 2306         swi_sched(V_pfsync_swi_cookie, 0);
 2307 }
 2308 
 2309 static void
 2310 pfsync_push_all(struct pfsync_softc *sc)
 2311 {
 2312         int c;
 2313         struct pfsync_bucket *b;
 2314 
 2315         for (c = 0; c < pfsync_buckets; c++) {
 2316                 b = &sc->sc_buckets[c];
 2317 
 2318                 PFSYNC_BUCKET_LOCK(b);
 2319                 pfsync_push(b);
 2320                 PFSYNC_BUCKET_UNLOCK(b);
 2321         }
 2322 }
 2323 
 2324 static void
 2325 pfsyncintr(void *arg)
 2326 {
 2327         struct epoch_tracker et;
 2328         struct pfsync_softc *sc = arg;
 2329         struct pfsync_bucket *b;
 2330         struct mbuf *m, *n;
 2331         int c, error;
 2332 
 2333         NET_EPOCH_ENTER(et);
 2334         CURVNET_SET(sc->sc_ifp->if_vnet);
 2335 
 2336         for (c = 0; c < pfsync_buckets; c++) {
 2337                 b = &sc->sc_buckets[c];
 2338 
 2339                 PFSYNC_BUCKET_LOCK(b);
 2340                 if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) {
 2341                         pfsync_sendout(0, b->b_id);
 2342                         b->b_flags &= ~PFSYNCF_BUCKET_PUSH;
 2343                 }
 2344                 _IF_DEQUEUE_ALL(&b->b_snd, m);
 2345                 PFSYNC_BUCKET_UNLOCK(b);
 2346 
 2347                 for (; m != NULL; m = n) {
 2348                         n = m->m_nextpkt;
 2349                         m->m_nextpkt = NULL;
 2350 
 2351                         /*
 2352                          * We distinguish between a deferral packet and our
 2353                          * own pfsync packet based on M_SKIP_FIREWALL
 2354                          * flag. This is XXX.
 2355                          */
 2356                         switch (sc->sc_sync_peer.ss_family) {
 2357 #ifdef INET
 2358                         case AF_INET:
 2359                                 if (m->m_flags & M_SKIP_FIREWALL) {
 2360                                         error = ip_output(m, NULL, NULL, 0,
 2361                                             NULL, NULL);
 2362                                 } else {
 2363                                         error = ip_output(m, NULL, NULL,
 2364                                             IP_RAWOUTPUT, &sc->sc_imo, NULL);
 2365                                 }
 2366                                 break;
 2367 #endif
 2368                         }
 2369 
 2370                         if (error == 0)
 2371                                 V_pfsyncstats.pfsyncs_opackets++;
 2372                         else
 2373                                 V_pfsyncstats.pfsyncs_oerrors++;
 2374                 }
 2375         }
 2376         CURVNET_RESTORE();
 2377         NET_EPOCH_EXIT(et);
 2378 }
 2379 
 2380 static int
 2381 pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp,
 2382     struct in_mfilter *imf)
 2383 {
 2384         struct ip_moptions *imo = &sc->sc_imo;
 2385         int error;
 2386 
 2387         if (!(ifp->if_flags & IFF_MULTICAST))
 2388                 return (EADDRNOTAVAIL);
 2389 
 2390         switch (sc->sc_sync_peer.ss_family) {
 2391 #ifdef INET
 2392         case AF_INET:
 2393             {
 2394                 ip_mfilter_init(&imo->imo_head);
 2395                 imo->imo_multicast_vif = -1;
 2396                 if ((error = in_joingroup(ifp, &((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr, NULL,
 2397                     &imf->imf_inm)) != 0)
 2398                         return (error);
 2399 
 2400                 ip_mfilter_insert(&imo->imo_head, imf);
 2401                 imo->imo_multicast_ifp = ifp;
 2402                 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
 2403                 imo->imo_multicast_loop = 0;
 2404                 break;
 2405             }
 2406 #endif
 2407         }
 2408 
 2409         return (0);
 2410 }
 2411 
 2412 static void
 2413 pfsync_multicast_cleanup(struct pfsync_softc *sc)
 2414 {
 2415         struct ip_moptions *imo = &sc->sc_imo;
 2416         struct in_mfilter *imf;
 2417 
 2418         while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
 2419                 ip_mfilter_remove(&imo->imo_head, imf);
 2420                 in_leavegroup(imf->imf_inm, NULL);
 2421                 ip_mfilter_free(imf);
 2422         }
 2423         imo->imo_multicast_ifp = NULL;
 2424 }
 2425 
 2426 void
 2427 pfsync_detach_ifnet(struct ifnet *ifp)
 2428 {
 2429         struct pfsync_softc *sc = V_pfsyncif;
 2430 
 2431         if (sc == NULL)
 2432                 return;
 2433 
 2434         PFSYNC_LOCK(sc);
 2435 
 2436         if (sc->sc_sync_if == ifp) {
 2437                 /* We don't need mutlicast cleanup here, because the interface
 2438                  * is going away. We do need to ensure we don't try to do
 2439                  * cleanup later.
 2440                  */
 2441                 ip_mfilter_init(&sc->sc_imo.imo_head);
 2442                 sc->sc_imo.imo_multicast_ifp = NULL;
 2443                 sc->sc_sync_if = NULL;
 2444         }
 2445 
 2446         PFSYNC_UNLOCK(sc);
 2447 }
 2448 
 2449 static int
 2450 pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *pfsyncr, struct pfsync_kstatus *status)
 2451 {
 2452         struct sockaddr_storage sa;
 2453         status->maxupdates = pfsyncr->pfsyncr_maxupdates;
 2454         status->flags = pfsyncr->pfsyncr_defer;
 2455 
 2456         strlcpy(status->syncdev, pfsyncr->pfsyncr_syncdev, IFNAMSIZ);
 2457 
 2458         memset(&sa, 0, sizeof(sa));
 2459         if (pfsyncr->pfsyncr_syncpeer.s_addr != 0) {
 2460                 struct sockaddr_in *in = (struct sockaddr_in *)&sa;
 2461                 in->sin_family = AF_INET;
 2462                 in->sin_len = sizeof(*in);
 2463                 in->sin_addr.s_addr = pfsyncr->pfsyncr_syncpeer.s_addr;
 2464         }
 2465         status->syncpeer = sa;
 2466 
 2467         return 0;
 2468 }
 2469 
 2470 static int
 2471 pfsync_kstatus_to_softc(struct pfsync_kstatus *status, struct pfsync_softc *sc)
 2472 {
 2473         struct in_mfilter *imf = NULL;
 2474         struct ifnet *sifp;
 2475         struct ip *ip;
 2476         int error;
 2477         int c;
 2478 
 2479         if ((status->maxupdates < 0) || (status->maxupdates > 255))
 2480                 return (EINVAL);
 2481 
 2482         if (status->syncdev[0] == '\0')
 2483                 sifp = NULL;
 2484         else if ((sifp = ifunit_ref(status->syncdev)) == NULL)
 2485                 return (EINVAL);
 2486 
 2487         struct sockaddr_in *status_sin =
 2488             (struct sockaddr_in *)&(status->syncpeer);
 2489         if (sifp != NULL && (status_sin->sin_addr.s_addr == 0 ||
 2490                                 status_sin->sin_addr.s_addr ==
 2491                                     htonl(INADDR_PFSYNC_GROUP)))
 2492                 imf = ip_mfilter_alloc(M_WAITOK, 0, 0);
 2493 
 2494         PFSYNC_LOCK(sc);
 2495         struct sockaddr_in *sc_sin = (struct sockaddr_in *)&sc->sc_sync_peer;
 2496         sc_sin->sin_family = AF_INET;
 2497         sc_sin->sin_len = sizeof(*sc_sin);
 2498         if (status_sin->sin_addr.s_addr == 0) {
 2499                 sc_sin->sin_addr.s_addr = htonl(INADDR_PFSYNC_GROUP);
 2500         } else {
 2501                 sc_sin->sin_addr.s_addr = status_sin->sin_addr.s_addr;
 2502         }
 2503 
 2504         sc->sc_maxupdates = status->maxupdates;
 2505         if (status->flags & PFSYNCF_DEFER) {
 2506                 sc->sc_flags |= PFSYNCF_DEFER;
 2507                 V_pfsync_defer_ptr = pfsync_defer;
 2508         } else {
 2509                 sc->sc_flags &= ~PFSYNCF_DEFER;
 2510                 V_pfsync_defer_ptr = NULL;
 2511         }
 2512 
 2513         if (sifp == NULL) {
 2514                 if (sc->sc_sync_if)
 2515                         if_rele(sc->sc_sync_if);
 2516                 sc->sc_sync_if = NULL;
 2517                 pfsync_multicast_cleanup(sc);
 2518                 PFSYNC_UNLOCK(sc);
 2519                 return (0);
 2520         }
 2521 
 2522         for (c = 0; c < pfsync_buckets; c++) {
 2523                 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
 2524                 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT &&
 2525                     (sifp->if_mtu < sc->sc_ifp->if_mtu ||
 2526                         (sc->sc_sync_if != NULL &&
 2527                             sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
 2528                         sifp->if_mtu < MCLBYTES - sizeof(struct ip)))
 2529                         pfsync_sendout(1, c);
 2530                 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
 2531         }
 2532 
 2533         pfsync_multicast_cleanup(sc);
 2534 
 2535         if (sc_sin->sin_addr.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
 2536                 error = pfsync_multicast_setup(sc, sifp, imf);
 2537                 if (error) {
 2538                         if_rele(sifp);
 2539                         ip_mfilter_free(imf);
 2540                         PFSYNC_UNLOCK(sc);
 2541                         return (error);
 2542                 }
 2543         }
 2544         if (sc->sc_sync_if)
 2545                 if_rele(sc->sc_sync_if);
 2546         sc->sc_sync_if = sifp;
 2547 
 2548         ip = &sc->sc_template.ipv4;
 2549         bzero(ip, sizeof(*ip));
 2550         ip->ip_v = IPVERSION;
 2551         ip->ip_hl = sizeof(sc->sc_template.ipv4) >> 2;
 2552         ip->ip_tos = IPTOS_LOWDELAY;
 2553         /* len and id are set later. */
 2554         ip->ip_off = htons(IP_DF);
 2555         ip->ip_ttl = PFSYNC_DFLTTL;
 2556         ip->ip_p = IPPROTO_PFSYNC;
 2557         ip->ip_src.s_addr = INADDR_ANY;
 2558         ip->ip_dst.s_addr = sc_sin->sin_addr.s_addr;
 2559 
 2560         /* Request a full state table update. */
 2561         if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
 2562                 (*carp_demote_adj_p)(V_pfsync_carp_adj,
 2563                     "pfsync bulk start");
 2564         sc->sc_flags &= ~PFSYNCF_OK;
 2565         if (V_pf_status.debug >= PF_DEBUG_MISC)
 2566                 printf("pfsync: requesting bulk update\n");
 2567         PFSYNC_UNLOCK(sc);
 2568         PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
 2569         pfsync_request_update(0, 0);
 2570         PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
 2571         PFSYNC_BLOCK(sc);
 2572         sc->sc_ureq_sent = time_uptime;
 2573         callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, sc);
 2574         PFSYNC_BUNLOCK(sc);
 2575         return (0);
 2576 }
 2577 
 2578 static void
 2579 pfsync_pointers_init(void)
 2580 {
 2581 
 2582         PF_RULES_WLOCK();
 2583         V_pfsync_state_import_ptr = pfsync_state_import;
 2584         V_pfsync_insert_state_ptr = pfsync_insert_state;
 2585         V_pfsync_update_state_ptr = pfsync_update_state;
 2586         V_pfsync_delete_state_ptr = pfsync_delete_state;
 2587         V_pfsync_clear_states_ptr = pfsync_clear_states;
 2588         V_pfsync_defer_ptr = pfsync_defer;
 2589         PF_RULES_WUNLOCK();
 2590 }
 2591 
 2592 static void
 2593 pfsync_pointers_uninit(void)
 2594 {
 2595 
 2596         PF_RULES_WLOCK();
 2597         V_pfsync_state_import_ptr = NULL;
 2598         V_pfsync_insert_state_ptr = NULL;
 2599         V_pfsync_update_state_ptr = NULL;
 2600         V_pfsync_delete_state_ptr = NULL;
 2601         V_pfsync_clear_states_ptr = NULL;
 2602         V_pfsync_defer_ptr = NULL;
 2603         PF_RULES_WUNLOCK();
 2604 }
 2605 
 2606 static void
 2607 vnet_pfsync_init(const void *unused __unused)
 2608 {
 2609         int error;
 2610 
 2611         V_pfsync_cloner = if_clone_simple(pfsyncname,
 2612             pfsync_clone_create, pfsync_clone_destroy, 1);
 2613         error = swi_add(&V_pfsync_swi_ie, pfsyncname, pfsyncintr, V_pfsyncif,
 2614             SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
 2615         if (error) {
 2616                 if_clone_detach(V_pfsync_cloner);
 2617                 log(LOG_INFO, "swi_add() failed in %s\n", __func__);
 2618         }
 2619 
 2620         pfsync_pointers_init();
 2621 }
 2622 VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY,
 2623     vnet_pfsync_init, NULL);
 2624 
 2625 static void
 2626 vnet_pfsync_uninit(const void *unused __unused)
 2627 {
 2628         int ret __diagused;
 2629 
 2630         pfsync_pointers_uninit();
 2631 
 2632         if_clone_detach(V_pfsync_cloner);
 2633         ret = swi_remove(V_pfsync_swi_cookie);
 2634         MPASS(ret == 0);
 2635         ret = intr_event_destroy(V_pfsync_swi_ie);
 2636         MPASS(ret == 0);
 2637 }
 2638 
 2639 VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH,
 2640     vnet_pfsync_uninit, NULL);
 2641 
 2642 static int
 2643 pfsync_init(void)
 2644 {
 2645 #ifdef INET
 2646         int error;
 2647 
 2648         pfsync_detach_ifnet_ptr = pfsync_detach_ifnet;
 2649 
 2650         error = ipproto_register(IPPROTO_PFSYNC, pfsync_input, NULL);
 2651         if (error)
 2652                 return (error);
 2653 #endif
 2654 
 2655         return (0);
 2656 }
 2657 
 2658 static void
 2659 pfsync_uninit(void)
 2660 {
 2661         pfsync_detach_ifnet_ptr = NULL;
 2662 
 2663 #ifdef INET
 2664         ipproto_unregister(IPPROTO_PFSYNC);
 2665 #endif
 2666 }
 2667 
 2668 static int
 2669 pfsync_modevent(module_t mod, int type, void *data)
 2670 {
 2671         int error = 0;
 2672 
 2673         switch (type) {
 2674         case MOD_LOAD:
 2675                 error = pfsync_init();
 2676                 break;
 2677         case MOD_UNLOAD:
 2678                 pfsync_uninit();
 2679                 break;
 2680         default:
 2681                 error = EINVAL;
 2682                 break;
 2683         }
 2684 
 2685         return (error);
 2686 }
 2687 
 2688 static moduledata_t pfsync_mod = {
 2689         pfsyncname,
 2690         pfsync_modevent,
 2691         0
 2692 };
 2693 
 2694 #define PFSYNC_MODVER 1
 2695 
 2696 /* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */
 2697 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY);
 2698 MODULE_VERSION(pfsync, PFSYNC_MODVER);
 2699 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);

Cache object: 1d15e8f016f51e620013a70f1f8c2be0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.