The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netpfil/pf/pf_ioctl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2001 Daniel Hartmeier
    5  * Copyright (c) 2002,2003 Henning Brauer
    6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  *
   13  *    - Redistributions of source code must retain the above copyright
   14  *      notice, this list of conditions and the following disclaimer.
   15  *    - Redistributions in binary form must reproduce the above
   16  *      copyright notice, this list of conditions and the following
   17  *      disclaimer in the documentation and/or other materials provided
   18  *      with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
   30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   31  * POSSIBILITY OF SUCH DAMAGE.
   32  *
   33  * Effort sponsored in part by the Defense Advanced Research Projects
   34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
   35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
   36  *
   37  *      $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD$");
   42 
   43 #include "opt_inet.h"
   44 #include "opt_inet6.h"
   45 #include "opt_bpf.h"
   46 #include "opt_pf.h"
   47 
   48 #include <sys/param.h>
   49 #include <sys/_bitset.h>
   50 #include <sys/bitset.h>
   51 #include <sys/bus.h>
   52 #include <sys/conf.h>
   53 #include <sys/endian.h>
   54 #include <sys/fcntl.h>
   55 #include <sys/filio.h>
   56 #include <sys/hash.h>
   57 #include <sys/interrupt.h>
   58 #include <sys/jail.h>
   59 #include <sys/kernel.h>
   60 #include <sys/kthread.h>
   61 #include <sys/lock.h>
   62 #include <sys/mbuf.h>
   63 #include <sys/module.h>
   64 #include <sys/nv.h>
   65 #include <sys/proc.h>
   66 #include <sys/sdt.h>
   67 #include <sys/smp.h>
   68 #include <sys/socket.h>
   69 #include <sys/sysctl.h>
   70 #include <sys/md5.h>
   71 #include <sys/ucred.h>
   72 
   73 #include <net/if.h>
   74 #include <net/if_var.h>
   75 #include <net/vnet.h>
   76 #include <net/route.h>
   77 #include <net/pfil.h>
   78 #include <net/pfvar.h>
   79 #include <net/if_pfsync.h>
   80 #include <net/if_pflog.h>
   81 
   82 #include <netinet/in.h>
   83 #include <netinet/ip.h>
   84 #include <netinet/ip_var.h>
   85 #include <netinet6/ip6_var.h>
   86 #include <netinet/ip_icmp.h>
   87 #include <netpfil/pf/pf_nv.h>
   88 
   89 #ifdef INET6
   90 #include <netinet/ip6.h>
   91 #endif /* INET6 */
   92 
   93 #ifdef ALTQ
   94 #include <net/altq/altq.h>
   95 #endif
   96 
   97 SDT_PROVIDER_DECLARE(pf);
   98 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
   99 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
  100 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
  101 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
  102 
  103 static struct pf_kpool  *pf_get_kpool(const char *, u_int32_t, u_int8_t,
  104                             u_int32_t, u_int8_t, u_int8_t, u_int8_t);
  105 
  106 static void              pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
  107 static void              pf_empty_kpool(struct pf_kpalist *);
  108 static int               pfioctl(struct cdev *, u_long, caddr_t, int,
  109                             struct thread *);
  110 #ifdef ALTQ
  111 static int               pf_begin_altq(u_int32_t *);
  112 static int               pf_rollback_altq(u_int32_t);
  113 static int               pf_commit_altq(u_int32_t);
  114 static int               pf_enable_altq(struct pf_altq *);
  115 static int               pf_disable_altq(struct pf_altq *);
  116 static uint16_t          pf_qname2qid(const char *);
  117 static void              pf_qid_unref(uint16_t);
  118 #endif /* ALTQ */
  119 static int               pf_begin_rules(u_int32_t *, int, const char *);
  120 static int               pf_rollback_rules(u_int32_t, int, char *);
  121 static int               pf_setup_pfsync_matching(struct pf_kruleset *);
  122 static void              pf_hash_rule(MD5_CTX *, struct pf_krule *);
  123 static void              pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
  124 static int               pf_commit_rules(u_int32_t, int, char *);
  125 static int               pf_addr_setup(struct pf_kruleset *,
  126                             struct pf_addr_wrap *, sa_family_t);
  127 static void              pf_addr_copyout(struct pf_addr_wrap *);
  128 static void              pf_src_node_copy(const struct pf_ksrc_node *,
  129                             struct pf_src_node *);
  130 #ifdef ALTQ
  131 static int               pf_export_kaltq(struct pf_altq *,
  132                             struct pfioc_altq_v1 *, size_t);
  133 static int               pf_import_kaltq(struct pfioc_altq_v1 *,
  134                             struct pf_altq *, size_t);
  135 #endif /* ALTQ */
  136 
  137 VNET_DEFINE(struct pf_krule,    pf_default_rule);
  138 
  139 #ifdef ALTQ
  140 VNET_DEFINE_STATIC(int,         pf_altq_running);
  141 #define V_pf_altq_running       VNET(pf_altq_running)
  142 #endif
  143 
  144 #define TAGID_MAX        50000
  145 struct pf_tagname {
  146         TAILQ_ENTRY(pf_tagname) namehash_entries;
  147         TAILQ_ENTRY(pf_tagname) taghash_entries;
  148         char                    name[PF_TAG_NAME_SIZE];
  149         uint16_t                tag;
  150         int                     ref;
  151 };
  152 
  153 struct pf_tagset {
  154         TAILQ_HEAD(, pf_tagname)        *namehash;
  155         TAILQ_HEAD(, pf_tagname)        *taghash;
  156         unsigned int                     mask;
  157         uint32_t                         seed;
  158         BITSET_DEFINE(, TAGID_MAX)       avail;
  159 };
  160 
  161 VNET_DEFINE(struct pf_tagset, pf_tags);
  162 #define V_pf_tags       VNET(pf_tags)
  163 static unsigned int     pf_rule_tag_hashsize;
  164 #define PF_RULE_TAG_HASH_SIZE_DEFAULT   128
  165 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
  166     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
  167     "Size of pf(4) rule tag hashtable");
  168 
  169 #ifdef ALTQ
  170 VNET_DEFINE(struct pf_tagset, pf_qids);
  171 #define V_pf_qids       VNET(pf_qids)
  172 static unsigned int     pf_queue_tag_hashsize;
  173 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT  128
  174 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
  175     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
  176     "Size of pf(4) queue tag hashtable");
  177 #endif
  178 VNET_DEFINE(uma_zone_t,  pf_tag_z);
  179 #define V_pf_tag_z               VNET(pf_tag_z)
  180 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
  181 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
  182 
  183 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
  184 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
  185 #endif
  186 
  187 static void              pf_init_tagset(struct pf_tagset *, unsigned int *,
  188                             unsigned int);
  189 static void              pf_cleanup_tagset(struct pf_tagset *);
  190 static uint16_t          tagname2hashindex(const struct pf_tagset *, const char *);
  191 static uint16_t          tag2hashindex(const struct pf_tagset *, uint16_t);
  192 static u_int16_t         tagname2tag(struct pf_tagset *, const char *);
  193 static u_int16_t         pf_tagname2tag(const char *);
  194 static void              tag_unref(struct pf_tagset *, u_int16_t);
  195 
  196 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
  197 
  198 struct cdev *pf_dev;
  199 
  200 /*
  201  * XXX - These are new and need to be checked when moveing to a new version
  202  */
  203 static void              pf_clear_all_states(void);
  204 static unsigned int      pf_clear_states(const struct pf_kstate_kill *);
  205 static void              pf_killstates(struct pf_kstate_kill *,
  206                             unsigned int *);
  207 static int               pf_killstates_row(struct pf_kstate_kill *,
  208                             struct pf_idhash *);
  209 static int               pf_killstates_nv(struct pfioc_nv *);
  210 static int               pf_clearstates_nv(struct pfioc_nv *);
  211 static int               pf_getstate(struct pfioc_nv *);
  212 static int               pf_getstatus(struct pfioc_nv *);
  213 static int               pf_clear_tables(void);
  214 static void              pf_clear_srcnodes(struct pf_ksrc_node *);
  215 static void              pf_kill_srcnodes(struct pfioc_src_node_kill *);
  216 static int               pf_keepcounters(struct pfioc_nv *);
  217 static void              pf_tbladdr_copyout(struct pf_addr_wrap *);
  218 
  219 /*
  220  * Wrapper functions for pfil(9) hooks
  221  */
  222 #ifdef INET
  223 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
  224     int flags, void *ruleset __unused, struct inpcb *inp);
  225 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
  226     int flags, void *ruleset __unused, struct inpcb *inp);
  227 #endif
  228 #ifdef INET6
  229 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
  230     int flags, void *ruleset __unused, struct inpcb *inp);
  231 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
  232     int flags, void *ruleset __unused, struct inpcb *inp);
  233 #endif
  234 
  235 static void             hook_pf(void);
  236 static void             dehook_pf(void);
  237 static int              shutdown_pf(void);
  238 static int              pf_load(void);
  239 static void             pf_unload(void);
  240 
  241 static struct cdevsw pf_cdevsw = {
  242         .d_ioctl =      pfioctl,
  243         .d_name =       PF_NAME,
  244         .d_version =    D_VERSION,
  245 };
  246 
  247 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked);
  248 #define V_pf_pfil_hooked        VNET(pf_pfil_hooked)
  249 
  250 /*
  251  * We need a flag that is neither hooked nor running to know when
  252  * the VNET is "valid".  We primarily need this to control (global)
  253  * external event, e.g., eventhandlers.
  254  */
  255 VNET_DEFINE(int, pf_vnet_active);
  256 #define V_pf_vnet_active        VNET(pf_vnet_active)
  257 
  258 int pf_end_threads;
  259 struct proc *pf_purge_proc;
  260 
  261 struct rmlock                   pf_rules_lock;
  262 struct sx                       pf_ioctl_lock;
  263 struct sx                       pf_end_lock;
  264 
  265 /* pfsync */
  266 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
  267 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
  268 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
  269 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
  270 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
  271 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
  272 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
  273 
  274 /* pflog */
  275 pflog_packet_t                  *pflog_packet_ptr = NULL;
  276 
  277 #define ERROUT_FUNCTION(target, x)                                      \
  278         do {                                                            \
  279                 error = (x);                                            \
  280                 SDT_PROBE3(pf, ioctl, function, error, __func__, error, \
  281                     __LINE__);                                          \
  282                 goto target;                                            \
  283         } while (0)
  284 
  285 /*
  286  * Copy a user-provided string, returning an error if truncation would occur.
  287  * Avoid scanning past "sz" bytes in the source string since there's no
  288  * guarantee that it's nul-terminated.
  289  */
  290 static int
  291 pf_user_strcpy(char *dst, const char *src, size_t sz)
  292 {
  293         if (strnlen(src, sz) == sz)
  294                 return (EINVAL);
  295         (void)strlcpy(dst, src, sz);
  296         return (0);
  297 }
  298 
  299 static void
  300 pfattach_vnet(void)
  301 {
  302         u_int32_t *my_timeout = V_pf_default_rule.timeout;
  303 
  304         pf_initialize();
  305         pfr_initialize();
  306         pfi_initialize_vnet();
  307         pf_normalize_init();
  308         pf_syncookies_init();
  309 
  310         V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
  311         V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
  312 
  313         RB_INIT(&V_pf_anchors);
  314         pf_init_kruleset(&pf_main_ruleset);
  315 
  316         /* default rule should never be garbage collected */
  317         V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
  318 #ifdef PF_DEFAULT_TO_DROP
  319         V_pf_default_rule.action = PF_DROP;
  320 #else
  321         V_pf_default_rule.action = PF_PASS;
  322 #endif
  323         V_pf_default_rule.nr = -1;
  324         V_pf_default_rule.rtableid = -1;
  325 
  326         pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
  327         for (int i = 0; i < 2; i++) {
  328                 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
  329                 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
  330         }
  331         V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
  332         V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
  333         V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
  334 
  335 #ifdef PF_WANT_32_TO_64_COUNTER
  336         V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
  337         V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
  338         PF_RULES_WLOCK();
  339         LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
  340         LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
  341         V_pf_allrulecount++;
  342         LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
  343         PF_RULES_WUNLOCK();
  344 #endif
  345 
  346         /* initialize default timeouts */
  347         my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
  348         my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
  349         my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
  350         my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
  351         my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
  352         my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
  353         my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
  354         my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
  355         my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
  356         my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
  357         my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
  358         my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
  359         my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
  360         my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
  361         my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
  362         my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
  363         my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
  364         my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
  365         my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
  366         my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
  367 
  368         bzero(&V_pf_status, sizeof(V_pf_status));
  369         V_pf_status.debug = PF_DEBUG_URGENT;
  370 
  371         V_pf_pfil_hooked = 0;
  372 
  373         /* XXX do our best to avoid a conflict */
  374         V_pf_status.hostid = arc4random();
  375 
  376         for (int i = 0; i < PFRES_MAX; i++)
  377                 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
  378         for (int i = 0; i < KLCNT_MAX; i++)
  379                 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
  380         for (int i = 0; i < FCNT_MAX; i++)
  381                 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
  382         for (int i = 0; i < SCNT_MAX; i++)
  383                 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
  384 
  385         if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
  386             INTR_MPSAFE, &V_pf_swi_cookie) != 0)
  387                 /* XXXGL: leaked all above. */
  388                 return;
  389 }
  390 
  391 static struct pf_kpool *
  392 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
  393     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
  394     u_int8_t check_ticket)
  395 {
  396         struct pf_kruleset      *ruleset;
  397         struct pf_krule         *rule;
  398         int                      rs_num;
  399 
  400         ruleset = pf_find_kruleset(anchor);
  401         if (ruleset == NULL)
  402                 return (NULL);
  403         rs_num = pf_get_ruleset_number(rule_action);
  404         if (rs_num >= PF_RULESET_MAX)
  405                 return (NULL);
  406         if (active) {
  407                 if (check_ticket && ticket !=
  408                     ruleset->rules[rs_num].active.ticket)
  409                         return (NULL);
  410                 if (r_last)
  411                         rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
  412                             pf_krulequeue);
  413                 else
  414                         rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
  415         } else {
  416                 if (check_ticket && ticket !=
  417                     ruleset->rules[rs_num].inactive.ticket)
  418                         return (NULL);
  419                 if (r_last)
  420                         rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
  421                             pf_krulequeue);
  422                 else
  423                         rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
  424         }
  425         if (!r_last) {
  426                 while ((rule != NULL) && (rule->nr != rule_number))
  427                         rule = TAILQ_NEXT(rule, entries);
  428         }
  429         if (rule == NULL)
  430                 return (NULL);
  431 
  432         return (&rule->rpool);
  433 }
  434 
  435 static void
  436 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
  437 {
  438         struct pf_kpooladdr     *mv_pool_pa;
  439 
  440         while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
  441                 TAILQ_REMOVE(poola, mv_pool_pa, entries);
  442                 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
  443         }
  444 }
  445 
  446 static void
  447 pf_empty_kpool(struct pf_kpalist *poola)
  448 {
  449         struct pf_kpooladdr *pa;
  450 
  451         while ((pa = TAILQ_FIRST(poola)) != NULL) {
  452                 switch (pa->addr.type) {
  453                 case PF_ADDR_DYNIFTL:
  454                         pfi_dynaddr_remove(pa->addr.p.dyn);
  455                         break;
  456                 case PF_ADDR_TABLE:
  457                         /* XXX: this could be unfinished pooladdr on pabuf */
  458                         if (pa->addr.p.tbl != NULL)
  459                                 pfr_detach_table(pa->addr.p.tbl);
  460                         break;
  461                 }
  462                 if (pa->kif)
  463                         pfi_kkif_unref(pa->kif);
  464                 TAILQ_REMOVE(poola, pa, entries);
  465                 free(pa, M_PFRULE);
  466         }
  467 }
  468 
  469 static void
  470 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
  471 {
  472 
  473         PF_RULES_WASSERT();
  474         PF_UNLNKDRULES_ASSERT();
  475 
  476         TAILQ_REMOVE(rulequeue, rule, entries);
  477 
  478         rule->rule_ref |= PFRULE_REFS;
  479         TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
  480 }
  481 
  482 static void
  483 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
  484 {
  485 
  486         PF_RULES_WASSERT();
  487 
  488         PF_UNLNKDRULES_LOCK();
  489         pf_unlink_rule_locked(rulequeue, rule);
  490         PF_UNLNKDRULES_UNLOCK();
  491 }
  492 
  493 void
  494 pf_free_rule(struct pf_krule *rule)
  495 {
  496 
  497         PF_RULES_WASSERT();
  498 
  499         if (rule->tag)
  500                 tag_unref(&V_pf_tags, rule->tag);
  501         if (rule->match_tag)
  502                 tag_unref(&V_pf_tags, rule->match_tag);
  503 #ifdef ALTQ
  504         if (rule->pqid != rule->qid)
  505                 pf_qid_unref(rule->pqid);
  506         pf_qid_unref(rule->qid);
  507 #endif
  508         switch (rule->src.addr.type) {
  509         case PF_ADDR_DYNIFTL:
  510                 pfi_dynaddr_remove(rule->src.addr.p.dyn);
  511                 break;
  512         case PF_ADDR_TABLE:
  513                 pfr_detach_table(rule->src.addr.p.tbl);
  514                 break;
  515         }
  516         switch (rule->dst.addr.type) {
  517         case PF_ADDR_DYNIFTL:
  518                 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
  519                 break;
  520         case PF_ADDR_TABLE:
  521                 pfr_detach_table(rule->dst.addr.p.tbl);
  522                 break;
  523         }
  524         if (rule->overload_tbl)
  525                 pfr_detach_table(rule->overload_tbl);
  526         if (rule->kif)
  527                 pfi_kkif_unref(rule->kif);
  528         pf_kanchor_remove(rule);
  529         pf_empty_kpool(&rule->rpool.list);
  530 
  531         pf_krule_free(rule);
  532 }
  533 
  534 static void
  535 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
  536     unsigned int default_size)
  537 {
  538         unsigned int i;
  539         unsigned int hashsize;
  540 
  541         if (*tunable_size == 0 || !powerof2(*tunable_size))
  542                 *tunable_size = default_size;
  543 
  544         hashsize = *tunable_size;
  545         ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
  546             M_WAITOK);
  547         ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
  548             M_WAITOK);
  549         ts->mask = hashsize - 1;
  550         ts->seed = arc4random();
  551         for (i = 0; i < hashsize; i++) {
  552                 TAILQ_INIT(&ts->namehash[i]);
  553                 TAILQ_INIT(&ts->taghash[i]);
  554         }
  555         BIT_FILL(TAGID_MAX, &ts->avail);
  556 }
  557 
  558 static void
  559 pf_cleanup_tagset(struct pf_tagset *ts)
  560 {
  561         unsigned int i;
  562         unsigned int hashsize;
  563         struct pf_tagname *t, *tmp;
  564 
  565         /*
  566          * Only need to clean up one of the hashes as each tag is hashed
  567          * into each table.
  568          */
  569         hashsize = ts->mask + 1;
  570         for (i = 0; i < hashsize; i++)
  571                 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
  572                         uma_zfree(V_pf_tag_z, t);
  573 
  574         free(ts->namehash, M_PFHASH);
  575         free(ts->taghash, M_PFHASH);
  576 }
  577 
  578 static uint16_t
  579 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
  580 {
  581         size_t len;
  582 
  583         len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
  584         return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
  585 }
  586 
  587 static uint16_t
  588 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
  589 {
  590 
  591         return (tag & ts->mask);
  592 }
  593 
  594 static u_int16_t
  595 tagname2tag(struct pf_tagset *ts, const char *tagname)
  596 {
  597         struct pf_tagname       *tag;
  598         u_int32_t                index;
  599         u_int16_t                new_tagid;
  600 
  601         PF_RULES_WASSERT();
  602 
  603         index = tagname2hashindex(ts, tagname);
  604         TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
  605                 if (strcmp(tagname, tag->name) == 0) {
  606                         tag->ref++;
  607                         return (tag->tag);
  608                 }
  609 
  610         /*
  611          * new entry
  612          *
  613          * to avoid fragmentation, we do a linear search from the beginning
  614          * and take the first free slot we find.
  615          */
  616         new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
  617         /*
  618          * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
  619          * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
  620          * set.  It may also return a bit number greater than TAGID_MAX due
  621          * to rounding of the number of bits in the vector up to a multiple
  622          * of the vector word size at declaration/allocation time.
  623          */
  624         if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
  625                 return (0);
  626 
  627         /* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
  628         BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
  629 
  630         /* allocate and fill new struct pf_tagname */
  631         tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
  632         if (tag == NULL)
  633                 return (0);
  634         strlcpy(tag->name, tagname, sizeof(tag->name));
  635         tag->tag = new_tagid;
  636         tag->ref = 1;
  637 
  638         /* Insert into namehash */
  639         TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
  640 
  641         /* Insert into taghash */
  642         index = tag2hashindex(ts, new_tagid);
  643         TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
  644 
  645         return (tag->tag);
  646 }
  647 
  648 static void
  649 tag_unref(struct pf_tagset *ts, u_int16_t tag)
  650 {
  651         struct pf_tagname       *t;
  652         uint16_t                 index;
  653 
  654         PF_RULES_WASSERT();
  655 
  656         index = tag2hashindex(ts, tag);
  657         TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
  658                 if (tag == t->tag) {
  659                         if (--t->ref == 0) {
  660                                 TAILQ_REMOVE(&ts->taghash[index], t,
  661                                     taghash_entries);
  662                                 index = tagname2hashindex(ts, t->name);
  663                                 TAILQ_REMOVE(&ts->namehash[index], t,
  664                                     namehash_entries);
  665                                 /* Bits are 0-based for BIT_SET() */
  666                                 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
  667                                 uma_zfree(V_pf_tag_z, t);
  668                         }
  669                         break;
  670                 }
  671 }
  672 
  673 static uint16_t
  674 pf_tagname2tag(const char *tagname)
  675 {
  676         return (tagname2tag(&V_pf_tags, tagname));
  677 }
  678 
  679 #ifdef ALTQ
  680 static uint16_t
  681 pf_qname2qid(const char *qname)
  682 {
  683         return (tagname2tag(&V_pf_qids, qname));
  684 }
  685 
  686 static void
  687 pf_qid_unref(uint16_t qid)
  688 {
  689         tag_unref(&V_pf_qids, qid);
  690 }
  691 
  692 static int
  693 pf_begin_altq(u_int32_t *ticket)
  694 {
  695         struct pf_altq  *altq, *tmp;
  696         int              error = 0;
  697 
  698         PF_RULES_WASSERT();
  699 
  700         /* Purge the old altq lists */
  701         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
  702                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
  703                         /* detach and destroy the discipline */
  704                         error = altq_remove(altq);
  705                 }
  706                 free(altq, M_PFALTQ);
  707         }
  708         TAILQ_INIT(V_pf_altq_ifs_inactive);
  709         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
  710                 pf_qid_unref(altq->qid);
  711                 free(altq, M_PFALTQ);
  712         }
  713         TAILQ_INIT(V_pf_altqs_inactive);
  714         if (error)
  715                 return (error);
  716         *ticket = ++V_ticket_altqs_inactive;
  717         V_altqs_inactive_open = 1;
  718         return (0);
  719 }
  720 
  721 static int
  722 pf_rollback_altq(u_int32_t ticket)
  723 {
  724         struct pf_altq  *altq, *tmp;
  725         int              error = 0;
  726 
  727         PF_RULES_WASSERT();
  728 
  729         if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
  730                 return (0);
  731         /* Purge the old altq lists */
  732         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
  733                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
  734                         /* detach and destroy the discipline */
  735                         error = altq_remove(altq);
  736                 }
  737                 free(altq, M_PFALTQ);
  738         }
  739         TAILQ_INIT(V_pf_altq_ifs_inactive);
  740         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
  741                 pf_qid_unref(altq->qid);
  742                 free(altq, M_PFALTQ);
  743         }
  744         TAILQ_INIT(V_pf_altqs_inactive);
  745         V_altqs_inactive_open = 0;
  746         return (error);
  747 }
  748 
  749 static int
  750 pf_commit_altq(u_int32_t ticket)
  751 {
  752         struct pf_altqqueue     *old_altqs, *old_altq_ifs;
  753         struct pf_altq          *altq, *tmp;
  754         int                      err, error = 0;
  755 
  756         PF_RULES_WASSERT();
  757 
  758         if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
  759                 return (EBUSY);
  760 
  761         /* swap altqs, keep the old. */
  762         old_altqs = V_pf_altqs_active;
  763         old_altq_ifs = V_pf_altq_ifs_active;
  764         V_pf_altqs_active = V_pf_altqs_inactive;
  765         V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
  766         V_pf_altqs_inactive = old_altqs;
  767         V_pf_altq_ifs_inactive = old_altq_ifs;
  768         V_ticket_altqs_active = V_ticket_altqs_inactive;
  769 
  770         /* Attach new disciplines */
  771         TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
  772                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
  773                         /* attach the discipline */
  774                         error = altq_pfattach(altq);
  775                         if (error == 0 && V_pf_altq_running)
  776                                 error = pf_enable_altq(altq);
  777                         if (error != 0)
  778                                 return (error);
  779                 }
  780         }
  781 
  782         /* Purge the old altq lists */
  783         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
  784                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
  785                         /* detach and destroy the discipline */
  786                         if (V_pf_altq_running)
  787                                 error = pf_disable_altq(altq);
  788                         err = altq_pfdetach(altq);
  789                         if (err != 0 && error == 0)
  790                                 error = err;
  791                         err = altq_remove(altq);
  792                         if (err != 0 && error == 0)
  793                                 error = err;
  794                 }
  795                 free(altq, M_PFALTQ);
  796         }
  797         TAILQ_INIT(V_pf_altq_ifs_inactive);
  798         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
  799                 pf_qid_unref(altq->qid);
  800                 free(altq, M_PFALTQ);
  801         }
  802         TAILQ_INIT(V_pf_altqs_inactive);
  803 
  804         V_altqs_inactive_open = 0;
  805         return (error);
  806 }
  807 
  808 static int
  809 pf_enable_altq(struct pf_altq *altq)
  810 {
  811         struct ifnet            *ifp;
  812         struct tb_profile        tb;
  813         int                      error = 0;
  814 
  815         if ((ifp = ifunit(altq->ifname)) == NULL)
  816                 return (EINVAL);
  817 
  818         if (ifp->if_snd.altq_type != ALTQT_NONE)
  819                 error = altq_enable(&ifp->if_snd);
  820 
  821         /* set tokenbucket regulator */
  822         if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
  823                 tb.rate = altq->ifbandwidth;
  824                 tb.depth = altq->tbrsize;
  825                 error = tbr_set(&ifp->if_snd, &tb);
  826         }
  827 
  828         return (error);
  829 }
  830 
  831 static int
  832 pf_disable_altq(struct pf_altq *altq)
  833 {
  834         struct ifnet            *ifp;
  835         struct tb_profile        tb;
  836         int                      error;
  837 
  838         if ((ifp = ifunit(altq->ifname)) == NULL)
  839                 return (EINVAL);
  840 
  841         /*
  842          * when the discipline is no longer referenced, it was overridden
  843          * by a new one.  if so, just return.
  844          */
  845         if (altq->altq_disc != ifp->if_snd.altq_disc)
  846                 return (0);
  847 
  848         error = altq_disable(&ifp->if_snd);
  849 
  850         if (error == 0) {
  851                 /* clear tokenbucket regulator */
  852                 tb.rate = 0;
  853                 error = tbr_set(&ifp->if_snd, &tb);
  854         }
  855 
  856         return (error);
  857 }
  858 
  859 static int
  860 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
  861     struct pf_altq *altq)
  862 {
  863         struct ifnet    *ifp1;
  864         int              error = 0;
  865 
  866         /* Deactivate the interface in question */
  867         altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
  868         if ((ifp1 = ifunit(altq->ifname)) == NULL ||
  869             (remove && ifp1 == ifp)) {
  870                 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
  871         } else {
  872                 error = altq_add(ifp1, altq);
  873 
  874                 if (ticket != V_ticket_altqs_inactive)
  875                         error = EBUSY;
  876 
  877                 if (error)
  878                         free(altq, M_PFALTQ);
  879         }
  880 
  881         return (error);
  882 }
  883 
  884 void
  885 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
  886 {
  887         struct pf_altq  *a1, *a2, *a3;
  888         u_int32_t        ticket;
  889         int              error = 0;
  890 
  891         /*
  892          * No need to re-evaluate the configuration for events on interfaces
  893          * that do not support ALTQ, as it's not possible for such
  894          * interfaces to be part of the configuration.
  895          */
  896         if (!ALTQ_IS_READY(&ifp->if_snd))
  897                 return;
  898 
  899         /* Interrupt userland queue modifications */
  900         if (V_altqs_inactive_open)
  901                 pf_rollback_altq(V_ticket_altqs_inactive);
  902 
  903         /* Start new altq ruleset */
  904         if (pf_begin_altq(&ticket))
  905                 return;
  906 
  907         /* Copy the current active set */
  908         TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
  909                 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
  910                 if (a2 == NULL) {
  911                         error = ENOMEM;
  912                         break;
  913                 }
  914                 bcopy(a1, a2, sizeof(struct pf_altq));
  915 
  916                 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
  917                 if (error)
  918                         break;
  919 
  920                 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
  921         }
  922         if (error)
  923                 goto out;
  924         TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
  925                 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
  926                 if (a2 == NULL) {
  927                         error = ENOMEM;
  928                         break;
  929                 }
  930                 bcopy(a1, a2, sizeof(struct pf_altq));
  931 
  932                 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
  933                         error = EBUSY;
  934                         free(a2, M_PFALTQ);
  935                         break;
  936                 }
  937                 a2->altq_disc = NULL;
  938                 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
  939                         if (strncmp(a3->ifname, a2->ifname,
  940                                 IFNAMSIZ) == 0) {
  941                                 a2->altq_disc = a3->altq_disc;
  942                                 break;
  943                         }
  944                 }
  945                 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
  946                 if (error)
  947                         break;
  948 
  949                 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
  950         }
  951 
  952 out:
  953         if (error != 0)
  954                 pf_rollback_altq(ticket);
  955         else
  956                 pf_commit_altq(ticket);
  957 }
  958 #endif /* ALTQ */
  959 
  960 static int
  961 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
  962 {
  963         struct pf_kruleset      *rs;
  964         struct pf_krule         *rule;
  965 
  966         PF_RULES_WASSERT();
  967 
  968         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
  969                 return (EINVAL);
  970         rs = pf_find_or_create_kruleset(anchor);
  971         if (rs == NULL)
  972                 return (EINVAL);
  973         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
  974                 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
  975                 rs->rules[rs_num].inactive.rcount--;
  976         }
  977         *ticket = ++rs->rules[rs_num].inactive.ticket;
  978         rs->rules[rs_num].inactive.open = 1;
  979         return (0);
  980 }
  981 
  982 static int
  983 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
  984 {
  985         struct pf_kruleset      *rs;
  986         struct pf_krule         *rule;
  987 
  988         PF_RULES_WASSERT();
  989 
  990         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
  991                 return (EINVAL);
  992         rs = pf_find_kruleset(anchor);
  993         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
  994             rs->rules[rs_num].inactive.ticket != ticket)
  995                 return (0);
  996         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
  997                 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
  998                 rs->rules[rs_num].inactive.rcount--;
  999         }
 1000         rs->rules[rs_num].inactive.open = 0;
 1001         return (0);
 1002 }
 1003 
 1004 #define PF_MD5_UPD(st, elm)                                             \
 1005                 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
 1006 
 1007 #define PF_MD5_UPD_STR(st, elm)                                         \
 1008                 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
 1009 
 1010 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \
 1011                 (stor) = htonl((st)->elm);                              \
 1012                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
 1013 } while (0)
 1014 
 1015 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \
 1016                 (stor) = htons((st)->elm);                              \
 1017                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
 1018 } while (0)
 1019 
 1020 static void
 1021 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
 1022 {
 1023         PF_MD5_UPD(pfr, addr.type);
 1024         switch (pfr->addr.type) {
 1025                 case PF_ADDR_DYNIFTL:
 1026                         PF_MD5_UPD(pfr, addr.v.ifname);
 1027                         PF_MD5_UPD(pfr, addr.iflags);
 1028                         break;
 1029                 case PF_ADDR_TABLE:
 1030                         PF_MD5_UPD(pfr, addr.v.tblname);
 1031                         break;
 1032                 case PF_ADDR_ADDRMASK:
 1033                         /* XXX ignore af? */
 1034                         PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
 1035                         PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
 1036                         break;
 1037         }
 1038 
 1039         PF_MD5_UPD(pfr, port[0]);
 1040         PF_MD5_UPD(pfr, port[1]);
 1041         PF_MD5_UPD(pfr, neg);
 1042         PF_MD5_UPD(pfr, port_op);
 1043 }
 1044 
 1045 static void
 1046 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule)
 1047 {
 1048         u_int16_t x;
 1049         u_int32_t y;
 1050 
 1051         pf_hash_rule_addr(ctx, &rule->src);
 1052         pf_hash_rule_addr(ctx, &rule->dst);
 1053         for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
 1054                 PF_MD5_UPD_STR(rule, label[i]);
 1055         PF_MD5_UPD_STR(rule, ifname);
 1056         PF_MD5_UPD_STR(rule, match_tagname);
 1057         PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
 1058         PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
 1059         PF_MD5_UPD_HTONL(rule, prob, y);
 1060         PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
 1061         PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
 1062         PF_MD5_UPD(rule, uid.op);
 1063         PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
 1064         PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
 1065         PF_MD5_UPD(rule, gid.op);
 1066         PF_MD5_UPD_HTONL(rule, rule_flag, y);
 1067         PF_MD5_UPD(rule, action);
 1068         PF_MD5_UPD(rule, direction);
 1069         PF_MD5_UPD(rule, af);
 1070         PF_MD5_UPD(rule, quick);
 1071         PF_MD5_UPD(rule, ifnot);
 1072         PF_MD5_UPD(rule, match_tag_not);
 1073         PF_MD5_UPD(rule, natpass);
 1074         PF_MD5_UPD(rule, keep_state);
 1075         PF_MD5_UPD(rule, proto);
 1076         PF_MD5_UPD(rule, type);
 1077         PF_MD5_UPD(rule, code);
 1078         PF_MD5_UPD(rule, flags);
 1079         PF_MD5_UPD(rule, flagset);
 1080         PF_MD5_UPD(rule, allow_opts);
 1081         PF_MD5_UPD(rule, rt);
 1082         PF_MD5_UPD(rule, tos);
 1083 }
 1084 
 1085 static bool
 1086 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
 1087 {
 1088         MD5_CTX         ctx[2];
 1089         u_int8_t        digest[2][PF_MD5_DIGEST_LENGTH];
 1090 
 1091         MD5Init(&ctx[0]);
 1092         MD5Init(&ctx[1]);
 1093         pf_hash_rule(&ctx[0], a);
 1094         pf_hash_rule(&ctx[1], b);
 1095         MD5Final(digest[0], &ctx[0]);
 1096         MD5Final(digest[1], &ctx[1]);
 1097 
 1098         return (memcmp(digest[0], digest[1], PF_MD5_DIGEST_LENGTH) == 0);
 1099 }
 1100 
 1101 static int
 1102 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
 1103 {
 1104         struct pf_kruleset      *rs;
 1105         struct pf_krule         *rule, **old_array, *tail;
 1106         struct pf_krulequeue    *old_rules;
 1107         int                      error;
 1108         u_int32_t                old_rcount;
 1109 
 1110         PF_RULES_WASSERT();
 1111 
 1112         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
 1113                 return (EINVAL);
 1114         rs = pf_find_kruleset(anchor);
 1115         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
 1116             ticket != rs->rules[rs_num].inactive.ticket)
 1117                 return (EBUSY);
 1118 
 1119         /* Calculate checksum for the main ruleset */
 1120         if (rs == &pf_main_ruleset) {
 1121                 error = pf_setup_pfsync_matching(rs);
 1122                 if (error != 0)
 1123                         return (error);
 1124         }
 1125 
 1126         /* Swap rules, keep the old. */
 1127         old_rules = rs->rules[rs_num].active.ptr;
 1128         old_rcount = rs->rules[rs_num].active.rcount;
 1129         old_array = rs->rules[rs_num].active.ptr_array;
 1130 
 1131         rs->rules[rs_num].active.ptr =
 1132             rs->rules[rs_num].inactive.ptr;
 1133         rs->rules[rs_num].active.ptr_array =
 1134             rs->rules[rs_num].inactive.ptr_array;
 1135         rs->rules[rs_num].active.rcount =
 1136             rs->rules[rs_num].inactive.rcount;
 1137 
 1138         /* Attempt to preserve counter information. */
 1139         if (V_pf_status.keep_counters) {
 1140                 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
 1141                     entries) {
 1142                         tail = TAILQ_FIRST(old_rules);
 1143                         while ((tail != NULL) && ! pf_krule_compare(tail, rule))
 1144                                 tail = TAILQ_NEXT(tail, entries);
 1145                         if (tail != NULL) {
 1146                                 pf_counter_u64_critical_enter();
 1147                                 pf_counter_u64_add_protected(&rule->evaluations,
 1148                                     pf_counter_u64_fetch(&tail->evaluations));
 1149                                 pf_counter_u64_add_protected(&rule->packets[0],
 1150                                     pf_counter_u64_fetch(&tail->packets[0]));
 1151                                 pf_counter_u64_add_protected(&rule->packets[1],
 1152                                     pf_counter_u64_fetch(&tail->packets[1]));
 1153                                 pf_counter_u64_add_protected(&rule->bytes[0],
 1154                                     pf_counter_u64_fetch(&tail->bytes[0]));
 1155                                 pf_counter_u64_add_protected(&rule->bytes[1],
 1156                                     pf_counter_u64_fetch(&tail->bytes[1]));
 1157                                 pf_counter_u64_critical_exit();
 1158                         }
 1159                 }
 1160         }
 1161 
 1162         rs->rules[rs_num].inactive.ptr = old_rules;
 1163         rs->rules[rs_num].inactive.ptr_array = old_array;
 1164         rs->rules[rs_num].inactive.rcount = old_rcount;
 1165 
 1166         rs->rules[rs_num].active.ticket =
 1167             rs->rules[rs_num].inactive.ticket;
 1168         pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
 1169 
 1170         /* Purge the old rule list. */
 1171         PF_UNLNKDRULES_LOCK();
 1172         while ((rule = TAILQ_FIRST(old_rules)) != NULL)
 1173                 pf_unlink_rule_locked(old_rules, rule);
 1174         PF_UNLNKDRULES_UNLOCK();
 1175         if (rs->rules[rs_num].inactive.ptr_array)
 1176                 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
 1177         rs->rules[rs_num].inactive.ptr_array = NULL;
 1178         rs->rules[rs_num].inactive.rcount = 0;
 1179         rs->rules[rs_num].inactive.open = 0;
 1180         pf_remove_if_empty_kruleset(rs);
 1181 
 1182         return (0);
 1183 }
 1184 
 1185 static int
 1186 pf_setup_pfsync_matching(struct pf_kruleset *rs)
 1187 {
 1188         MD5_CTX                  ctx;
 1189         struct pf_krule         *rule;
 1190         int                      rs_cnt;
 1191         u_int8_t                 digest[PF_MD5_DIGEST_LENGTH];
 1192 
 1193         MD5Init(&ctx);
 1194         for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
 1195                 /* XXX PF_RULESET_SCRUB as well? */
 1196                 if (rs_cnt == PF_RULESET_SCRUB)
 1197                         continue;
 1198 
 1199                 if (rs->rules[rs_cnt].inactive.ptr_array)
 1200                         free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
 1201                 rs->rules[rs_cnt].inactive.ptr_array = NULL;
 1202 
 1203                 if (rs->rules[rs_cnt].inactive.rcount) {
 1204                         rs->rules[rs_cnt].inactive.ptr_array =
 1205                             malloc(sizeof(caddr_t) *
 1206                             rs->rules[rs_cnt].inactive.rcount,
 1207                             M_TEMP, M_NOWAIT);
 1208 
 1209                         if (!rs->rules[rs_cnt].inactive.ptr_array)
 1210                                 return (ENOMEM);
 1211                 }
 1212 
 1213                 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
 1214                     entries) {
 1215                         pf_hash_rule(&ctx, rule);
 1216                         (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
 1217                 }
 1218         }
 1219 
 1220         MD5Final(digest, &ctx);
 1221         memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
 1222         return (0);
 1223 }
 1224 
 1225 static int
 1226 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
 1227     sa_family_t af)
 1228 {
 1229         int error = 0;
 1230 
 1231         switch (addr->type) {
 1232         case PF_ADDR_TABLE:
 1233                 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
 1234                 if (addr->p.tbl == NULL)
 1235                         error = ENOMEM;
 1236                 break;
 1237         case PF_ADDR_DYNIFTL:
 1238                 error = pfi_dynaddr_setup(addr, af);
 1239                 break;
 1240         }
 1241 
 1242         return (error);
 1243 }
 1244 
 1245 static void
 1246 pf_addr_copyout(struct pf_addr_wrap *addr)
 1247 {
 1248 
 1249         switch (addr->type) {
 1250         case PF_ADDR_DYNIFTL:
 1251                 pfi_dynaddr_copyout(addr);
 1252                 break;
 1253         case PF_ADDR_TABLE:
 1254                 pf_tbladdr_copyout(addr);
 1255                 break;
 1256         }
 1257 }
 1258 
 1259 static void
 1260 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
 1261 {
 1262         int     secs = time_uptime, diff;
 1263 
 1264         bzero(out, sizeof(struct pf_src_node));
 1265 
 1266         bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
 1267         bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
 1268 
 1269         if (in->rule.ptr != NULL)
 1270                 out->rule.nr = in->rule.ptr->nr;
 1271 
 1272         for (int i = 0; i < 2; i++) {
 1273                 out->bytes[i] = counter_u64_fetch(in->bytes[i]);
 1274                 out->packets[i] = counter_u64_fetch(in->packets[i]);
 1275         }
 1276 
 1277         out->states = in->states;
 1278         out->conn = in->conn;
 1279         out->af = in->af;
 1280         out->ruletype = in->ruletype;
 1281 
 1282         out->creation = secs - in->creation;
 1283         if (out->expire > secs)
 1284                 out->expire -= secs;
 1285         else
 1286                 out->expire = 0;
 1287 
 1288         /* Adjust the connection rate estimate. */
 1289         diff = secs - in->conn_rate.last;
 1290         if (diff >= in->conn_rate.seconds)
 1291                 out->conn_rate.count = 0;
 1292         else
 1293                 out->conn_rate.count -=
 1294                     in->conn_rate.count * diff /
 1295                     in->conn_rate.seconds;
 1296 }
 1297 
 1298 #ifdef ALTQ
 1299 /*
 1300  * Handle export of struct pf_kaltq to user binaries that may be using any
 1301  * version of struct pf_altq.
 1302  */
 1303 static int
 1304 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
 1305 {
 1306         u_int32_t version;
 1307 
 1308         if (ioc_size == sizeof(struct pfioc_altq_v0))
 1309                 version = 0;
 1310         else
 1311                 version = pa->version;
 1312 
 1313         if (version > PFIOC_ALTQ_VERSION)
 1314                 return (EINVAL);
 1315 
 1316 #define ASSIGN(x) exported_q->x = q->x
 1317 #define COPY(x) \
 1318         bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
 1319 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
 1320 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
 1321 
 1322         switch (version) {
 1323         case 0: {
 1324                 struct pf_altq_v0 *exported_q =
 1325                     &((struct pfioc_altq_v0 *)pa)->altq;
 1326 
 1327                 COPY(ifname);
 1328 
 1329                 ASSIGN(scheduler);
 1330                 ASSIGN(tbrsize);
 1331                 exported_q->tbrsize = SATU16(q->tbrsize);
 1332                 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
 1333 
 1334                 COPY(qname);
 1335                 COPY(parent);
 1336                 ASSIGN(parent_qid);
 1337                 exported_q->bandwidth = SATU32(q->bandwidth);
 1338                 ASSIGN(priority);
 1339                 ASSIGN(local_flags);
 1340 
 1341                 ASSIGN(qlimit);
 1342                 ASSIGN(flags);
 1343 
 1344                 if (q->scheduler == ALTQT_HFSC) {
 1345 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
 1346 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
 1347                             SATU32(q->pq_u.hfsc_opts.x)
 1348                         
 1349                         ASSIGN_OPT_SATU32(rtsc_m1);
 1350                         ASSIGN_OPT(rtsc_d);
 1351                         ASSIGN_OPT_SATU32(rtsc_m2);
 1352 
 1353                         ASSIGN_OPT_SATU32(lssc_m1);
 1354                         ASSIGN_OPT(lssc_d);
 1355                         ASSIGN_OPT_SATU32(lssc_m2);
 1356 
 1357                         ASSIGN_OPT_SATU32(ulsc_m1);
 1358                         ASSIGN_OPT(ulsc_d);
 1359                         ASSIGN_OPT_SATU32(ulsc_m2);
 1360 
 1361                         ASSIGN_OPT(flags);
 1362                         
 1363 #undef ASSIGN_OPT
 1364 #undef ASSIGN_OPT_SATU32
 1365                 } else
 1366                         COPY(pq_u);
 1367 
 1368                 ASSIGN(qid);
 1369                 break;
 1370         }
 1371         case 1: {
 1372                 struct pf_altq_v1 *exported_q =
 1373                     &((struct pfioc_altq_v1 *)pa)->altq;
 1374 
 1375                 COPY(ifname);
 1376 
 1377                 ASSIGN(scheduler);
 1378                 ASSIGN(tbrsize);
 1379                 ASSIGN(ifbandwidth);
 1380 
 1381                 COPY(qname);
 1382                 COPY(parent);
 1383                 ASSIGN(parent_qid);
 1384                 ASSIGN(bandwidth);
 1385                 ASSIGN(priority);
 1386                 ASSIGN(local_flags);
 1387 
 1388                 ASSIGN(qlimit);
 1389                 ASSIGN(flags);
 1390                 COPY(pq_u);
 1391 
 1392                 ASSIGN(qid);
 1393                 break;
 1394         }
 1395         default:
 1396                 panic("%s: unhandled struct pfioc_altq version", __func__);
 1397                 break;
 1398         }
 1399 
 1400 #undef ASSIGN
 1401 #undef COPY
 1402 #undef SATU16
 1403 #undef SATU32
 1404 
 1405         return (0);
 1406 }
 1407 
 1408 /*
 1409  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
 1410  * that may be using any version of it.
 1411  */
 1412 static int
 1413 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
 1414 {
 1415         u_int32_t version;
 1416 
 1417         if (ioc_size == sizeof(struct pfioc_altq_v0))
 1418                 version = 0;
 1419         else
 1420                 version = pa->version;
 1421 
 1422         if (version > PFIOC_ALTQ_VERSION)
 1423                 return (EINVAL);
 1424 
 1425 #define ASSIGN(x) q->x = imported_q->x
 1426 #define COPY(x) \
 1427         bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
 1428 
 1429         switch (version) {
 1430         case 0: {
 1431                 struct pf_altq_v0 *imported_q =
 1432                     &((struct pfioc_altq_v0 *)pa)->altq;
 1433 
 1434                 COPY(ifname);
 1435 
 1436                 ASSIGN(scheduler);
 1437                 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
 1438                 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
 1439 
 1440                 COPY(qname);
 1441                 COPY(parent);
 1442                 ASSIGN(parent_qid);
 1443                 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
 1444                 ASSIGN(priority);
 1445                 ASSIGN(local_flags);
 1446 
 1447                 ASSIGN(qlimit);
 1448                 ASSIGN(flags);
 1449 
 1450                 if (imported_q->scheduler == ALTQT_HFSC) {
 1451 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
 1452 
 1453                         /*
 1454                          * The m1 and m2 parameters are being copied from
 1455                          * 32-bit to 64-bit.
 1456                          */
 1457                         ASSIGN_OPT(rtsc_m1);
 1458                         ASSIGN_OPT(rtsc_d);
 1459                         ASSIGN_OPT(rtsc_m2);
 1460 
 1461                         ASSIGN_OPT(lssc_m1);
 1462                         ASSIGN_OPT(lssc_d);
 1463                         ASSIGN_OPT(lssc_m2);
 1464 
 1465                         ASSIGN_OPT(ulsc_m1);
 1466                         ASSIGN_OPT(ulsc_d);
 1467                         ASSIGN_OPT(ulsc_m2);
 1468 
 1469                         ASSIGN_OPT(flags);
 1470                         
 1471 #undef ASSIGN_OPT
 1472                 } else
 1473                         COPY(pq_u);
 1474 
 1475                 ASSIGN(qid);
 1476                 break;
 1477         }
 1478         case 1: {
 1479                 struct pf_altq_v1 *imported_q =
 1480                     &((struct pfioc_altq_v1 *)pa)->altq;
 1481 
 1482                 COPY(ifname);
 1483 
 1484                 ASSIGN(scheduler);
 1485                 ASSIGN(tbrsize);
 1486                 ASSIGN(ifbandwidth);
 1487 
 1488                 COPY(qname);
 1489                 COPY(parent);
 1490                 ASSIGN(parent_qid);
 1491                 ASSIGN(bandwidth);
 1492                 ASSIGN(priority);
 1493                 ASSIGN(local_flags);
 1494 
 1495                 ASSIGN(qlimit);
 1496                 ASSIGN(flags);
 1497                 COPY(pq_u);
 1498 
 1499                 ASSIGN(qid);
 1500                 break;
 1501         }
 1502         default:        
 1503                 panic("%s: unhandled struct pfioc_altq version", __func__);
 1504                 break;
 1505         }
 1506 
 1507 #undef ASSIGN
 1508 #undef COPY
 1509 
 1510         return (0);
 1511 }
 1512 
 1513 static struct pf_altq *
 1514 pf_altq_get_nth_active(u_int32_t n)
 1515 {
 1516         struct pf_altq          *altq;
 1517         u_int32_t                nr;
 1518 
 1519         nr = 0;
 1520         TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
 1521                 if (nr == n)
 1522                         return (altq);
 1523                 nr++;
 1524         }
 1525 
 1526         TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
 1527                 if (nr == n)
 1528                         return (altq);
 1529                 nr++;
 1530         }
 1531 
 1532         return (NULL);
 1533 }
 1534 #endif /* ALTQ */
 1535 
 1536 struct pf_krule *
 1537 pf_krule_alloc(void)
 1538 {
 1539         struct pf_krule *rule;
 1540 
 1541         rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
 1542         mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
 1543         return (rule);
 1544 }
 1545 
 1546 void
 1547 pf_krule_free(struct pf_krule *rule)
 1548 {
 1549 #ifdef PF_WANT_32_TO_64_COUNTER
 1550         bool wowned;
 1551 #endif
 1552 
 1553         if (rule == NULL)
 1554                 return;
 1555 
 1556 #ifdef PF_WANT_32_TO_64_COUNTER
 1557         if (rule->allrulelinked) {
 1558                 wowned = PF_RULES_WOWNED();
 1559                 if (!wowned)
 1560                         PF_RULES_WLOCK();
 1561                 LIST_REMOVE(rule, allrulelist);
 1562                 V_pf_allrulecount--;
 1563                 if (!wowned)
 1564                         PF_RULES_WUNLOCK();
 1565         }
 1566 #endif
 1567 
 1568         pf_counter_u64_deinit(&rule->evaluations);
 1569         for (int i = 0; i < 2; i++) {
 1570                 pf_counter_u64_deinit(&rule->packets[i]);
 1571                 pf_counter_u64_deinit(&rule->bytes[i]);
 1572         }
 1573         counter_u64_free(rule->states_cur);
 1574         counter_u64_free(rule->states_tot);
 1575         counter_u64_free(rule->src_nodes);
 1576 
 1577         mtx_destroy(&rule->rpool.mtx);
 1578         free(rule, M_PFRULE);
 1579 }
 1580 
 1581 static void
 1582 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
 1583     struct pf_pooladdr *pool)
 1584 {
 1585 
 1586         bzero(pool, sizeof(*pool));
 1587         bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
 1588         strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
 1589 }
 1590 
 1591 static int
 1592 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
 1593     struct pf_kpooladdr *kpool)
 1594 {
 1595         int ret;
 1596 
 1597         bzero(kpool, sizeof(*kpool));
 1598         bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
 1599         ret = pf_user_strcpy(kpool->ifname, pool->ifname,
 1600             sizeof(kpool->ifname));
 1601         return (ret);
 1602 }
 1603 
 1604 static void
 1605 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
 1606 {
 1607         bzero(pool, sizeof(*pool));
 1608 
 1609         bcopy(&kpool->key, &pool->key, sizeof(pool->key));
 1610         bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter));
 1611 
 1612         pool->tblidx = kpool->tblidx;
 1613         pool->proxy_port[0] = kpool->proxy_port[0];
 1614         pool->proxy_port[1] = kpool->proxy_port[1];
 1615         pool->opts = kpool->opts;
 1616 }
 1617 
 1618 static void
 1619 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
 1620 {
 1621         _Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
 1622         _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
 1623 
 1624         bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
 1625         bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
 1626 
 1627         kpool->tblidx = pool->tblidx;
 1628         kpool->proxy_port[0] = pool->proxy_port[0];
 1629         kpool->proxy_port[1] = pool->proxy_port[1];
 1630         kpool->opts = pool->opts;
 1631 }
 1632 
 1633 static void
 1634 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
 1635 {
 1636 
 1637         bzero(rule, sizeof(*rule));
 1638 
 1639         bcopy(&krule->src, &rule->src, sizeof(rule->src));
 1640         bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
 1641 
 1642         for (int i = 0; i < PF_SKIP_COUNT; ++i) {
 1643                 if (rule->skip[i].ptr == NULL)
 1644                         rule->skip[i].nr = -1;
 1645                 else
 1646                         rule->skip[i].nr = krule->skip[i].ptr->nr;
 1647         }
 1648 
 1649         strlcpy(rule->label, krule->label[0], sizeof(rule->label));
 1650         strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
 1651         strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
 1652         strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
 1653         strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
 1654         strlcpy(rule->match_tagname, krule->match_tagname,
 1655             sizeof(rule->match_tagname));
 1656         strlcpy(rule->overload_tblname, krule->overload_tblname,
 1657             sizeof(rule->overload_tblname));
 1658 
 1659         pf_kpool_to_pool(&krule->rpool, &rule->rpool);
 1660 
 1661         rule->evaluations = pf_counter_u64_fetch(&krule->evaluations);
 1662         for (int i = 0; i < 2; i++) {
 1663                 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]);
 1664                 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]);
 1665         }
 1666 
 1667         /* kif, anchor, overload_tbl are not copied over. */
 1668 
 1669         rule->os_fingerprint = krule->os_fingerprint;
 1670 
 1671         rule->rtableid = krule->rtableid;
 1672         bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
 1673         rule->max_states = krule->max_states;
 1674         rule->max_src_nodes = krule->max_src_nodes;
 1675         rule->max_src_states = krule->max_src_states;
 1676         rule->max_src_conn = krule->max_src_conn;
 1677         rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
 1678         rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
 1679         rule->qid = krule->qid;
 1680         rule->pqid = krule->pqid;
 1681         rule->nr = krule->nr;
 1682         rule->prob = krule->prob;
 1683         rule->cuid = krule->cuid;
 1684         rule->cpid = krule->cpid;
 1685 
 1686         rule->return_icmp = krule->return_icmp;
 1687         rule->return_icmp6 = krule->return_icmp6;
 1688         rule->max_mss = krule->max_mss;
 1689         rule->tag = krule->tag;
 1690         rule->match_tag = krule->match_tag;
 1691         rule->scrub_flags = krule->scrub_flags;
 1692 
 1693         bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
 1694         bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
 1695 
 1696         rule->rule_flag = krule->rule_flag;
 1697         rule->action = krule->action;
 1698         rule->direction = krule->direction;
 1699         rule->log = krule->log;
 1700         rule->logif = krule->logif;
 1701         rule->quick = krule->quick;
 1702         rule->ifnot = krule->ifnot;
 1703         rule->match_tag_not = krule->match_tag_not;
 1704         rule->natpass = krule->natpass;
 1705 
 1706         rule->keep_state = krule->keep_state;
 1707         rule->af = krule->af;
 1708         rule->proto = krule->proto;
 1709         rule->type = krule->type;
 1710         rule->code = krule->code;
 1711         rule->flags = krule->flags;
 1712         rule->flagset = krule->flagset;
 1713         rule->min_ttl = krule->min_ttl;
 1714         rule->allow_opts = krule->allow_opts;
 1715         rule->rt = krule->rt;
 1716         rule->return_ttl = krule->return_ttl;
 1717         rule->tos = krule->tos;
 1718         rule->set_tos = krule->set_tos;
 1719         rule->anchor_relative = krule->anchor_relative;
 1720         rule->anchor_wildcard = krule->anchor_wildcard;
 1721 
 1722         rule->flush = krule->flush;
 1723         rule->prio = krule->prio;
 1724         rule->set_prio[0] = krule->set_prio[0];
 1725         rule->set_prio[1] = krule->set_prio[1];
 1726 
 1727         bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
 1728 
 1729         rule->u_states_cur = counter_u64_fetch(krule->states_cur);
 1730         rule->u_states_tot = counter_u64_fetch(krule->states_tot);
 1731         rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
 1732 }
 1733 
 1734 static int
 1735 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
 1736 {
 1737         int ret;
 1738 
 1739 #ifndef INET
 1740         if (rule->af == AF_INET) {
 1741                 return (EAFNOSUPPORT);
 1742         }
 1743 #endif /* INET */
 1744 #ifndef INET6
 1745         if (rule->af == AF_INET6) {
 1746                 return (EAFNOSUPPORT);
 1747         }
 1748 #endif /* INET6 */
 1749 
 1750         ret = pf_check_rule_addr(&rule->src);
 1751         if (ret != 0)
 1752                 return (ret);
 1753         ret = pf_check_rule_addr(&rule->dst);
 1754         if (ret != 0)
 1755                 return (ret);
 1756 
 1757         bcopy(&rule->src, &krule->src, sizeof(rule->src));
 1758         bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
 1759 
 1760         ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
 1761         if (ret != 0)
 1762                 return (ret);
 1763         ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
 1764         if (ret != 0)
 1765                 return (ret);
 1766         ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
 1767         if (ret != 0)
 1768                 return (ret);
 1769         ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
 1770         if (ret != 0)
 1771                 return (ret);
 1772         ret = pf_user_strcpy(krule->tagname, rule->tagname,
 1773             sizeof(rule->tagname));
 1774         if (ret != 0)
 1775                 return (ret);
 1776         ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
 1777             sizeof(rule->match_tagname));
 1778         if (ret != 0)
 1779                 return (ret);
 1780         ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
 1781             sizeof(rule->overload_tblname));
 1782         if (ret != 0)
 1783                 return (ret);
 1784 
 1785         pf_pool_to_kpool(&rule->rpool, &krule->rpool);
 1786 
 1787         /* Don't allow userspace to set evaulations, packets or bytes. */
 1788         /* kif, anchor, overload_tbl are not copied over. */
 1789 
 1790         krule->os_fingerprint = rule->os_fingerprint;
 1791 
 1792         krule->rtableid = rule->rtableid;
 1793         bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
 1794         krule->max_states = rule->max_states;
 1795         krule->max_src_nodes = rule->max_src_nodes;
 1796         krule->max_src_states = rule->max_src_states;
 1797         krule->max_src_conn = rule->max_src_conn;
 1798         krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
 1799         krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
 1800         krule->qid = rule->qid;
 1801         krule->pqid = rule->pqid;
 1802         krule->nr = rule->nr;
 1803         krule->prob = rule->prob;
 1804         krule->cuid = rule->cuid;
 1805         krule->cpid = rule->cpid;
 1806 
 1807         krule->return_icmp = rule->return_icmp;
 1808         krule->return_icmp6 = rule->return_icmp6;
 1809         krule->max_mss = rule->max_mss;
 1810         krule->tag = rule->tag;
 1811         krule->match_tag = rule->match_tag;
 1812         krule->scrub_flags = rule->scrub_flags;
 1813 
 1814         bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
 1815         bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
 1816 
 1817         krule->rule_flag = rule->rule_flag;
 1818         krule->action = rule->action;
 1819         krule->direction = rule->direction;
 1820         krule->log = rule->log;
 1821         krule->logif = rule->logif;
 1822         krule->quick = rule->quick;
 1823         krule->ifnot = rule->ifnot;
 1824         krule->match_tag_not = rule->match_tag_not;
 1825         krule->natpass = rule->natpass;
 1826 
 1827         krule->keep_state = rule->keep_state;
 1828         krule->af = rule->af;
 1829         krule->proto = rule->proto;
 1830         krule->type = rule->type;
 1831         krule->code = rule->code;
 1832         krule->flags = rule->flags;
 1833         krule->flagset = rule->flagset;
 1834         krule->min_ttl = rule->min_ttl;
 1835         krule->allow_opts = rule->allow_opts;
 1836         krule->rt = rule->rt;
 1837         krule->return_ttl = rule->return_ttl;
 1838         krule->tos = rule->tos;
 1839         krule->set_tos = rule->set_tos;
 1840 
 1841         krule->flush = rule->flush;
 1842         krule->prio = rule->prio;
 1843         krule->set_prio[0] = rule->set_prio[0];
 1844         krule->set_prio[1] = rule->set_prio[1];
 1845 
 1846         bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
 1847 
 1848         return (0);
 1849 }
 1850 
 1851 static bool
 1852 pf_label_match(const struct pf_krule *rule, const char *label)
 1853 {
 1854         int i = 0;
 1855 
 1856         while (*rule->label[i]) {
 1857                 if (strcmp(rule->label[i], label) == 0)
 1858                         return (true);
 1859                 i++;
 1860         }
 1861 
 1862         return (false);
 1863 }
 1864 
 1865 static unsigned int
 1866 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
 1867 {
 1868         struct pf_kstate *match;
 1869         int more = 0;
 1870         unsigned int killed = 0;
 1871 
 1872         /* Call with unlocked hashrow */
 1873 
 1874         match = pf_find_state_all(key, dir, &more);
 1875         if (match && !more) {
 1876                 pf_unlink_state(match, 0);
 1877                 killed++;
 1878         }
 1879 
 1880         return (killed);
 1881 }
 1882 
 1883 static int
 1884 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
 1885 {
 1886         struct pf_kstate                *s;
 1887         struct pf_state_key     *sk;
 1888         struct pf_addr          *srcaddr, *dstaddr;
 1889         struct pf_state_key_cmp  match_key;
 1890         int                      idx, killed = 0;
 1891         unsigned int             dir;
 1892         u_int16_t                srcport, dstport;
 1893         struct pfi_kkif         *kif;
 1894 
 1895 relock_DIOCKILLSTATES:
 1896         PF_HASHROW_LOCK(ih);
 1897         LIST_FOREACH(s, &ih->states, entry) {
 1898                 /* For floating states look at the original kif. */
 1899                 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
 1900 
 1901                 sk = s->key[PF_SK_WIRE];
 1902                 if (s->direction == PF_OUT) {
 1903                         srcaddr = &sk->addr[1];
 1904                         dstaddr = &sk->addr[0];
 1905                         srcport = sk->port[1];
 1906                         dstport = sk->port[0];
 1907                 } else {
 1908                         srcaddr = &sk->addr[0];
 1909                         dstaddr = &sk->addr[1];
 1910                         srcport = sk->port[0];
 1911                         dstport = sk->port[1];
 1912                 }
 1913 
 1914                 if (psk->psk_af && sk->af != psk->psk_af)
 1915                         continue;
 1916 
 1917                 if (psk->psk_proto && psk->psk_proto != sk->proto)
 1918                         continue;
 1919 
 1920                 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
 1921                     &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
 1922                         continue;
 1923 
 1924                 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
 1925                     &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
 1926                         continue;
 1927 
 1928                 if (!  PF_MATCHA(psk->psk_rt_addr.neg,
 1929                     &psk->psk_rt_addr.addr.v.a.addr,
 1930                     &psk->psk_rt_addr.addr.v.a.mask,
 1931                     &s->rt_addr, sk->af))
 1932                         continue;
 1933 
 1934                 if (psk->psk_src.port_op != 0 &&
 1935                     ! pf_match_port(psk->psk_src.port_op,
 1936                     psk->psk_src.port[0], psk->psk_src.port[1], srcport))
 1937                         continue;
 1938 
 1939                 if (psk->psk_dst.port_op != 0 &&
 1940                     ! pf_match_port(psk->psk_dst.port_op,
 1941                     psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
 1942                         continue;
 1943 
 1944                 if (psk->psk_label[0] &&
 1945                     ! pf_label_match(s->rule.ptr, psk->psk_label))
 1946                         continue;
 1947 
 1948                 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
 1949                     kif->pfik_name))
 1950                         continue;
 1951 
 1952                 if (psk->psk_kill_match) {
 1953                         /* Create the key to find matching states, with lock
 1954                          * held. */
 1955 
 1956                         bzero(&match_key, sizeof(match_key));
 1957 
 1958                         if (s->direction == PF_OUT) {
 1959                                 dir = PF_IN;
 1960                                 idx = PF_SK_STACK;
 1961                         } else {
 1962                                 dir = PF_OUT;
 1963                                 idx = PF_SK_WIRE;
 1964                         }
 1965 
 1966                         match_key.af = s->key[idx]->af;
 1967                         match_key.proto = s->key[idx]->proto;
 1968                         PF_ACPY(&match_key.addr[0],
 1969                             &s->key[idx]->addr[1], match_key.af);
 1970                         match_key.port[0] = s->key[idx]->port[1];
 1971                         PF_ACPY(&match_key.addr[1],
 1972                             &s->key[idx]->addr[0], match_key.af);
 1973                         match_key.port[1] = s->key[idx]->port[0];
 1974                 }
 1975 
 1976                 pf_unlink_state(s, PF_ENTER_LOCKED);
 1977                 killed++;
 1978 
 1979                 if (psk->psk_kill_match)
 1980                         killed += pf_kill_matching_state(&match_key, dir);
 1981 
 1982                 goto relock_DIOCKILLSTATES;
 1983         }
 1984         PF_HASHROW_UNLOCK(ih);
 1985 
 1986         return (killed);
 1987 }
 1988 
 1989 static int
 1990 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk,
 1991     struct pf_kstate_kill *kill)
 1992 {
 1993         int ret;
 1994 
 1995         bzero(kill, sizeof(*kill));
 1996 
 1997         bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp));
 1998         kill->psk_af = psk->psk_af;
 1999         kill->psk_proto = psk->psk_proto;
 2000         bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src));
 2001         bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst));
 2002         ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname,
 2003             sizeof(kill->psk_ifname));
 2004         if (ret != 0)
 2005                 return (ret);
 2006         ret = pf_user_strcpy(kill->psk_label, psk->psk_label,
 2007             sizeof(kill->psk_label));
 2008         if (ret != 0)
 2009                 return (ret);
 2010 
 2011         return (0);
 2012 }
 2013 
 2014 static int
 2015 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
 2016     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
 2017     struct thread *td)
 2018 {
 2019         struct pf_kruleset      *ruleset;
 2020         struct pf_krule         *tail;
 2021         struct pf_kpooladdr     *pa;
 2022         struct pfi_kkif         *kif = NULL;
 2023         int                      rs_num;
 2024         int                      error = 0;
 2025 
 2026         if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
 2027                 error = EINVAL;
 2028                 goto errout_unlocked;
 2029         }
 2030 
 2031 #define ERROUT(x)       ERROUT_FUNCTION(errout, x)
 2032 
 2033         if (rule->ifname[0])
 2034                 kif = pf_kkif_create(M_WAITOK);
 2035         pf_counter_u64_init(&rule->evaluations, M_WAITOK);
 2036         for (int i = 0; i < 2; i++) {
 2037                 pf_counter_u64_init(&rule->packets[i], M_WAITOK);
 2038                 pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
 2039         }
 2040         rule->states_cur = counter_u64_alloc(M_WAITOK);
 2041         rule->states_tot = counter_u64_alloc(M_WAITOK);
 2042         rule->src_nodes = counter_u64_alloc(M_WAITOK);
 2043         rule->cuid = td->td_ucred->cr_ruid;
 2044         rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
 2045         TAILQ_INIT(&rule->rpool.list);
 2046 
 2047         PF_RULES_WLOCK();
 2048 #ifdef PF_WANT_32_TO_64_COUNTER
 2049         LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
 2050         MPASS(!rule->allrulelinked);
 2051         rule->allrulelinked = true;
 2052         V_pf_allrulecount++;
 2053 #endif
 2054         ruleset = pf_find_kruleset(anchor);
 2055         if (ruleset == NULL)
 2056                 ERROUT(EINVAL);
 2057         rs_num = pf_get_ruleset_number(rule->action);
 2058         if (rs_num >= PF_RULESET_MAX)
 2059                 ERROUT(EINVAL);
 2060         if (ticket != ruleset->rules[rs_num].inactive.ticket) {
 2061                 DPFPRINTF(PF_DEBUG_MISC,
 2062                     ("ticket: %d != [%d]%d\n", ticket, rs_num,
 2063                     ruleset->rules[rs_num].inactive.ticket));
 2064                 ERROUT(EBUSY);
 2065         }
 2066         if (pool_ticket != V_ticket_pabuf) {
 2067                 DPFPRINTF(PF_DEBUG_MISC,
 2068                     ("pool_ticket: %d != %d\n", pool_ticket,
 2069                     V_ticket_pabuf));
 2070                 ERROUT(EBUSY);
 2071         }
 2072 
 2073         tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
 2074             pf_krulequeue);
 2075         if (tail)
 2076                 rule->nr = tail->nr + 1;
 2077         else
 2078                 rule->nr = 0;
 2079         if (rule->ifname[0]) {
 2080                 rule->kif = pfi_kkif_attach(kif, rule->ifname);
 2081                 kif = NULL;
 2082                 pfi_kkif_ref(rule->kif);
 2083         } else
 2084                 rule->kif = NULL;
 2085 
 2086         if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
 2087                 error = EBUSY;
 2088 
 2089 #ifdef ALTQ
 2090         /* set queue IDs */
 2091         if (rule->qname[0] != 0) {
 2092                 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
 2093                         error = EBUSY;
 2094                 else if (rule->pqname[0] != 0) {
 2095                         if ((rule->pqid =
 2096                             pf_qname2qid(rule->pqname)) == 0)
 2097                                 error = EBUSY;
 2098                 } else
 2099                         rule->pqid = rule->qid;
 2100         }
 2101 #endif
 2102         if (rule->tagname[0])
 2103                 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
 2104                         error = EBUSY;
 2105         if (rule->match_tagname[0])
 2106                 if ((rule->match_tag =
 2107                     pf_tagname2tag(rule->match_tagname)) == 0)
 2108                         error = EBUSY;
 2109         if (rule->rt && !rule->direction)
 2110                 error = EINVAL;
 2111         if (!rule->log)
 2112                 rule->logif = 0;
 2113         if (rule->logif >= PFLOGIFS_MAX)
 2114                 error = EINVAL;
 2115         if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
 2116                 error = ENOMEM;
 2117         if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
 2118                 error = ENOMEM;
 2119         if (pf_kanchor_setup(rule, ruleset, anchor_call))
 2120                 error = EINVAL;
 2121         if (rule->scrub_flags & PFSTATE_SETPRIO &&
 2122             (rule->set_prio[0] > PF_PRIO_MAX ||
 2123             rule->set_prio[1] > PF_PRIO_MAX))
 2124                 error = EINVAL;
 2125         TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
 2126                 if (pa->addr.type == PF_ADDR_TABLE) {
 2127                         pa->addr.p.tbl = pfr_attach_table(ruleset,
 2128                             pa->addr.v.tblname);
 2129                         if (pa->addr.p.tbl == NULL)
 2130                                 error = ENOMEM;
 2131                 }
 2132 
 2133         rule->overload_tbl = NULL;
 2134         if (rule->overload_tblname[0]) {
 2135                 if ((rule->overload_tbl = pfr_attach_table(ruleset,
 2136                     rule->overload_tblname)) == NULL)
 2137                         error = EINVAL;
 2138                 else
 2139                         rule->overload_tbl->pfrkt_flags |=
 2140                             PFR_TFLAG_ACTIVE;
 2141         }
 2142 
 2143         pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
 2144         if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
 2145             (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
 2146             (rule->rt > PF_NOPFROUTE)) &&
 2147             (TAILQ_FIRST(&rule->rpool.list) == NULL))
 2148                 error = EINVAL;
 2149 
 2150         if (error) {
 2151                 pf_free_rule(rule);
 2152                 rule = NULL;
 2153                 ERROUT(error);
 2154         }
 2155 
 2156         rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
 2157         TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
 2158             rule, entries);
 2159         ruleset->rules[rs_num].inactive.rcount++;
 2160 
 2161         PF_RULES_WUNLOCK();
 2162 
 2163         return (0);
 2164 
 2165 #undef ERROUT
 2166 errout:
 2167         PF_RULES_WUNLOCK();
 2168 errout_unlocked:
 2169         pf_kkif_free(kif);
 2170         pf_krule_free(rule);
 2171         return (error);
 2172 }
 2173 
 2174 static int
 2175 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
 2176 {
 2177         int                      error = 0;
 2178         PF_RULES_RLOCK_TRACKER;
 2179 
 2180 #define ERROUT_IOCTL(target, x)                                 \
 2181     do {                                                                \
 2182             error = (x);                                                \
 2183             SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);  \
 2184             goto target;                                                \
 2185     } while (0)
 2186 
 2187 
 2188         /* XXX keep in sync with switch() below */
 2189         if (securelevel_gt(td->td_ucred, 2))
 2190                 switch (cmd) {
 2191                 case DIOCGETRULES:
 2192                 case DIOCGETRULE:
 2193                 case DIOCGETRULENV:
 2194                 case DIOCGETADDRS:
 2195                 case DIOCGETADDR:
 2196                 case DIOCGETSTATE:
 2197                 case DIOCGETSTATENV:
 2198                 case DIOCSETSTATUSIF:
 2199                 case DIOCGETSTATUS:
 2200                 case DIOCGETSTATUSNV:
 2201                 case DIOCCLRSTATUS:
 2202                 case DIOCNATLOOK:
 2203                 case DIOCSETDEBUG:
 2204                 case DIOCGETSTATES:
 2205                 case DIOCGETSTATESV2:
 2206                 case DIOCGETTIMEOUT:
 2207                 case DIOCCLRRULECTRS:
 2208                 case DIOCGETLIMIT:
 2209                 case DIOCGETALTQSV0:
 2210                 case DIOCGETALTQSV1:
 2211                 case DIOCGETALTQV0:
 2212                 case DIOCGETALTQV1:
 2213                 case DIOCGETQSTATSV0:
 2214                 case DIOCGETQSTATSV1:
 2215                 case DIOCGETRULESETS:
 2216                 case DIOCGETRULESET:
 2217                 case DIOCRGETTABLES:
 2218                 case DIOCRGETTSTATS:
 2219                 case DIOCRCLRTSTATS:
 2220                 case DIOCRCLRADDRS:
 2221                 case DIOCRADDADDRS:
 2222                 case DIOCRDELADDRS:
 2223                 case DIOCRSETADDRS:
 2224                 case DIOCRGETADDRS:
 2225                 case DIOCRGETASTATS:
 2226                 case DIOCRCLRASTATS:
 2227                 case DIOCRTSTADDRS:
 2228                 case DIOCOSFPGET:
 2229                 case DIOCGETSRCNODES:
 2230                 case DIOCCLRSRCNODES:
 2231                 case DIOCGETSYNCOOKIES:
 2232                 case DIOCIGETIFACES:
 2233                 case DIOCGIFSPEEDV0:
 2234                 case DIOCGIFSPEEDV1:
 2235                 case DIOCSETIFFLAG:
 2236                 case DIOCCLRIFFLAG:
 2237                         break;
 2238                 case DIOCRCLRTABLES:
 2239                 case DIOCRADDTABLES:
 2240                 case DIOCRDELTABLES:
 2241                 case DIOCRSETTFLAGS:
 2242                         if (((struct pfioc_table *)addr)->pfrio_flags &
 2243                             PFR_FLAG_DUMMY)
 2244                                 break; /* dummy operation ok */
 2245                         return (EPERM);
 2246                 default:
 2247                         return (EPERM);
 2248                 }
 2249 
 2250         if (!(flags & FWRITE))
 2251                 switch (cmd) {
 2252                 case DIOCGETRULES:
 2253                 case DIOCGETADDRS:
 2254                 case DIOCGETADDR:
 2255                 case DIOCGETSTATE:
 2256                 case DIOCGETSTATENV:
 2257                 case DIOCGETSTATUS:
 2258                 case DIOCGETSTATUSNV:
 2259                 case DIOCGETSTATES:
 2260                 case DIOCGETSTATESV2:
 2261                 case DIOCGETTIMEOUT:
 2262                 case DIOCGETLIMIT:
 2263                 case DIOCGETALTQSV0:
 2264                 case DIOCGETALTQSV1:
 2265                 case DIOCGETALTQV0:
 2266                 case DIOCGETALTQV1:
 2267                 case DIOCGETQSTATSV0:
 2268                 case DIOCGETQSTATSV1:
 2269                 case DIOCGETRULESETS:
 2270                 case DIOCGETRULESET:
 2271                 case DIOCNATLOOK:
 2272                 case DIOCRGETTABLES:
 2273                 case DIOCRGETTSTATS:
 2274                 case DIOCRGETADDRS:
 2275                 case DIOCRGETASTATS:
 2276                 case DIOCRTSTADDRS:
 2277                 case DIOCOSFPGET:
 2278                 case DIOCGETSRCNODES:
 2279                 case DIOCGETSYNCOOKIES:
 2280                 case DIOCIGETIFACES:
 2281                 case DIOCGIFSPEEDV1:
 2282                 case DIOCGIFSPEEDV0:
 2283                 case DIOCGETRULENV:
 2284                         break;
 2285                 case DIOCRCLRTABLES:
 2286                 case DIOCRADDTABLES:
 2287                 case DIOCRDELTABLES:
 2288                 case DIOCRCLRTSTATS:
 2289                 case DIOCRCLRADDRS:
 2290                 case DIOCRADDADDRS:
 2291                 case DIOCRDELADDRS:
 2292                 case DIOCRSETADDRS:
 2293                 case DIOCRSETTFLAGS:
 2294                         if (((struct pfioc_table *)addr)->pfrio_flags &
 2295                             PFR_FLAG_DUMMY) {
 2296                                 flags |= FWRITE; /* need write lock for dummy */
 2297                                 break; /* dummy operation ok */
 2298                         }
 2299                         return (EACCES);
 2300                 case DIOCGETRULE:
 2301                         if (((struct pfioc_rule *)addr)->action ==
 2302                             PF_GET_CLR_CNTR)
 2303                                 return (EACCES);
 2304                         break;
 2305                 default:
 2306                         return (EACCES);
 2307                 }
 2308 
 2309         CURVNET_SET(TD_TO_VNET(td));
 2310 
 2311         switch (cmd) {
 2312         case DIOCSTART:
 2313                 sx_xlock(&pf_ioctl_lock);
 2314                 if (V_pf_status.running)
 2315                         error = EEXIST;
 2316                 else {
 2317                         int cpu;
 2318 
 2319                         hook_pf();
 2320                         V_pf_status.running = 1;
 2321                         V_pf_status.since = time_second;
 2322 
 2323                         CPU_FOREACH(cpu)
 2324                                 V_pf_stateid[cpu] = time_second;
 2325 
 2326                         DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
 2327                 }
 2328                 break;
 2329 
 2330         case DIOCSTOP:
 2331                 sx_xlock(&pf_ioctl_lock);
 2332                 if (!V_pf_status.running)
 2333                         error = ENOENT;
 2334                 else {
 2335                         V_pf_status.running = 0;
 2336                         dehook_pf();
 2337                         V_pf_status.since = time_second;
 2338                         DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
 2339                 }
 2340                 break;
 2341 
 2342         case DIOCADDRULENV: {
 2343                 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
 2344                 nvlist_t        *nvl = NULL;
 2345                 void            *nvlpacked = NULL;
 2346                 struct pf_krule *rule = NULL;
 2347                 const char      *anchor = "", *anchor_call = "";
 2348                 uint32_t         ticket = 0, pool_ticket = 0;
 2349 
 2350 #define ERROUT(x)       ERROUT_IOCTL(DIOCADDRULENV_error, x)
 2351 
 2352                 if (nv->len > pf_ioctl_maxcount)
 2353                         ERROUT(ENOMEM);
 2354 
 2355                 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 2356                 error = copyin(nv->data, nvlpacked, nv->len);
 2357                 if (error)
 2358                         ERROUT(error);
 2359 
 2360                 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 2361                 if (nvl == NULL)
 2362                         ERROUT(EBADMSG);
 2363 
 2364                 if (! nvlist_exists_number(nvl, "ticket"))
 2365                         ERROUT(EINVAL);
 2366                 ticket = nvlist_get_number(nvl, "ticket");
 2367 
 2368                 if (! nvlist_exists_number(nvl, "pool_ticket"))
 2369                         ERROUT(EINVAL);
 2370                 pool_ticket = nvlist_get_number(nvl, "pool_ticket");
 2371 
 2372                 if (! nvlist_exists_nvlist(nvl, "rule"))
 2373                         ERROUT(EINVAL);
 2374 
 2375                 rule = pf_krule_alloc();
 2376                 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
 2377                     rule);
 2378                 if (error)
 2379                         ERROUT(error);
 2380 
 2381                 if (nvlist_exists_string(nvl, "anchor"))
 2382                         anchor = nvlist_get_string(nvl, "anchor");
 2383                 if (nvlist_exists_string(nvl, "anchor_call"))
 2384                         anchor_call = nvlist_get_string(nvl, "anchor_call");
 2385 
 2386                 if ((error = nvlist_error(nvl)))
 2387                         ERROUT(error);
 2388 
 2389                 /* Frees rule on error */
 2390                 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
 2391                     anchor_call, td);
 2392 
 2393                 nvlist_destroy(nvl);
 2394                 free(nvlpacked, M_NVLIST);
 2395                 break;
 2396 #undef ERROUT
 2397 DIOCADDRULENV_error:
 2398                 pf_krule_free(rule);
 2399                 nvlist_destroy(nvl);
 2400                 free(nvlpacked, M_NVLIST);
 2401 
 2402                 break;
 2403         }
 2404         case DIOCADDRULE: {
 2405                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
 2406                 struct pf_krule         *rule;
 2407 
 2408                 rule = pf_krule_alloc();
 2409                 error = pf_rule_to_krule(&pr->rule, rule);
 2410                 if (error != 0) {
 2411                         pf_krule_free(rule);
 2412                         break;
 2413                 }
 2414 
 2415                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
 2416 
 2417                 /* Frees rule on error */
 2418                 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
 2419                     pr->anchor, pr->anchor_call, td);
 2420                 break;
 2421         }
 2422 
 2423         case DIOCGETRULES: {
 2424                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
 2425                 struct pf_kruleset      *ruleset;
 2426                 struct pf_krule         *tail;
 2427                 int                      rs_num;
 2428 
 2429                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
 2430 
 2431                 PF_RULES_WLOCK();
 2432                 ruleset = pf_find_kruleset(pr->anchor);
 2433                 if (ruleset == NULL) {
 2434                         PF_RULES_WUNLOCK();
 2435                         error = EINVAL;
 2436                         break;
 2437                 }
 2438                 rs_num = pf_get_ruleset_number(pr->rule.action);
 2439                 if (rs_num >= PF_RULESET_MAX) {
 2440                         PF_RULES_WUNLOCK();
 2441                         error = EINVAL;
 2442                         break;
 2443                 }
 2444                 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
 2445                     pf_krulequeue);
 2446                 if (tail)
 2447                         pr->nr = tail->nr + 1;
 2448                 else
 2449                         pr->nr = 0;
 2450                 pr->ticket = ruleset->rules[rs_num].active.ticket;
 2451                 PF_RULES_WUNLOCK();
 2452                 break;
 2453         }
 2454 
 2455         case DIOCGETRULE: {
 2456                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
 2457                 struct pf_kruleset      *ruleset;
 2458                 struct pf_krule         *rule;
 2459                 int                      rs_num;
 2460 
 2461                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
 2462 
 2463                 PF_RULES_WLOCK();
 2464                 ruleset = pf_find_kruleset(pr->anchor);
 2465                 if (ruleset == NULL) {
 2466                         PF_RULES_WUNLOCK();
 2467                         error = EINVAL;
 2468                         break;
 2469                 }
 2470                 rs_num = pf_get_ruleset_number(pr->rule.action);
 2471                 if (rs_num >= PF_RULESET_MAX) {
 2472                         PF_RULES_WUNLOCK();
 2473                         error = EINVAL;
 2474                         break;
 2475                 }
 2476                 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
 2477                         PF_RULES_WUNLOCK();
 2478                         error = EBUSY;
 2479                         break;
 2480                 }
 2481                 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
 2482                 while ((rule != NULL) && (rule->nr != pr->nr))
 2483                         rule = TAILQ_NEXT(rule, entries);
 2484                 if (rule == NULL) {
 2485                         PF_RULES_WUNLOCK();
 2486                         error = EBUSY;
 2487                         break;
 2488                 }
 2489 
 2490                 pf_krule_to_rule(rule, &pr->rule);
 2491 
 2492                 if (pf_kanchor_copyout(ruleset, rule, pr)) {
 2493                         PF_RULES_WUNLOCK();
 2494                         error = EBUSY;
 2495                         break;
 2496                 }
 2497                 pf_addr_copyout(&pr->rule.src.addr);
 2498                 pf_addr_copyout(&pr->rule.dst.addr);
 2499 
 2500                 if (pr->action == PF_GET_CLR_CNTR) {
 2501                         pf_counter_u64_zero(&rule->evaluations);
 2502                         for (int i = 0; i < 2; i++) {
 2503                                 pf_counter_u64_zero(&rule->packets[i]);
 2504                                 pf_counter_u64_zero(&rule->bytes[i]);
 2505                         }
 2506                         counter_u64_zero(rule->states_tot);
 2507                 }
 2508                 PF_RULES_WUNLOCK();
 2509                 break;
 2510         }
 2511 
 2512         case DIOCGETRULENV: {
 2513                 struct pfioc_nv         *nv = (struct pfioc_nv *)addr;
 2514                 nvlist_t                *nvrule = NULL;
 2515                 nvlist_t                *nvl = NULL;
 2516                 struct pf_kruleset      *ruleset;
 2517                 struct pf_krule         *rule;
 2518                 void                    *nvlpacked = NULL;
 2519                 int                      rs_num, nr;
 2520                 bool                     clear_counter = false;
 2521 
 2522 #define ERROUT(x)       ERROUT_IOCTL(DIOCGETRULENV_error, x)
 2523 
 2524                 if (nv->len > pf_ioctl_maxcount)
 2525                         ERROUT(ENOMEM);
 2526 
 2527                 /* Copy the request in */
 2528                 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 2529                 if (nvlpacked == NULL)
 2530                         ERROUT(ENOMEM);
 2531 
 2532                 error = copyin(nv->data, nvlpacked, nv->len);
 2533                 if (error)
 2534                         ERROUT(error);
 2535 
 2536                 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 2537                 if (nvl == NULL)
 2538                         ERROUT(EBADMSG);
 2539 
 2540                 if (! nvlist_exists_string(nvl, "anchor"))
 2541                         ERROUT(EBADMSG);
 2542                 if (! nvlist_exists_number(nvl, "ruleset"))
 2543                         ERROUT(EBADMSG);
 2544                 if (! nvlist_exists_number(nvl, "ticket"))
 2545                         ERROUT(EBADMSG);
 2546                 if (! nvlist_exists_number(nvl, "nr"))
 2547                         ERROUT(EBADMSG);
 2548 
 2549                 if (nvlist_exists_bool(nvl, "clear_counter"))
 2550                         clear_counter = nvlist_get_bool(nvl, "clear_counter");
 2551 
 2552                 if (clear_counter && !(flags & FWRITE))
 2553                         ERROUT(EACCES);
 2554 
 2555                 nr = nvlist_get_number(nvl, "nr");
 2556 
 2557                 PF_RULES_WLOCK();
 2558                 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
 2559                 if (ruleset == NULL) {
 2560                         PF_RULES_WUNLOCK();
 2561                         ERROUT(ENOENT);
 2562                 }
 2563 
 2564                 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
 2565                 if (rs_num >= PF_RULESET_MAX) {
 2566                         PF_RULES_WUNLOCK();
 2567                         ERROUT(EINVAL);
 2568                 }
 2569 
 2570                 if (nvlist_get_number(nvl, "ticket") !=
 2571                     ruleset->rules[rs_num].active.ticket) {
 2572                         PF_RULES_WUNLOCK();
 2573                         ERROUT(EBUSY);
 2574                 }
 2575 
 2576                 if ((error = nvlist_error(nvl))) {
 2577                         PF_RULES_WUNLOCK();
 2578                         ERROUT(error);
 2579                 }
 2580 
 2581                 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
 2582                 while ((rule != NULL) && (rule->nr != nr))
 2583                         rule = TAILQ_NEXT(rule, entries);
 2584                 if (rule == NULL) {
 2585                         PF_RULES_WUNLOCK();
 2586                         ERROUT(EBUSY);
 2587                 }
 2588 
 2589                 nvrule = pf_krule_to_nvrule(rule);
 2590 
 2591                 nvlist_destroy(nvl);
 2592                 nvl = nvlist_create(0);
 2593                 if (nvl == NULL) {
 2594                         PF_RULES_WUNLOCK();
 2595                         ERROUT(ENOMEM);
 2596                 }
 2597                 nvlist_add_number(nvl, "nr", nr);
 2598                 nvlist_add_nvlist(nvl, "rule", nvrule);
 2599                 nvlist_destroy(nvrule);
 2600                 nvrule = NULL;
 2601                 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
 2602                         PF_RULES_WUNLOCK();
 2603                         ERROUT(EBUSY);
 2604                 }
 2605 
 2606                 free(nvlpacked, M_NVLIST);
 2607                 nvlpacked = nvlist_pack(nvl, &nv->len);
 2608                 if (nvlpacked == NULL) {
 2609                         PF_RULES_WUNLOCK();
 2610                         ERROUT(ENOMEM);
 2611                 }
 2612 
 2613                 if (nv->size == 0) {
 2614                         PF_RULES_WUNLOCK();
 2615                         ERROUT(0);
 2616                 }
 2617                 else if (nv->size < nv->len) {
 2618                         PF_RULES_WUNLOCK();
 2619                         ERROUT(ENOSPC);
 2620                 }
 2621 
 2622                 if (clear_counter) {
 2623                         pf_counter_u64_zero(&rule->evaluations);
 2624                         for (int i = 0; i < 2; i++) {
 2625                                 pf_counter_u64_zero(&rule->packets[i]);
 2626                                 pf_counter_u64_zero(&rule->bytes[i]);
 2627                         }
 2628                         counter_u64_zero(rule->states_tot);
 2629                 }
 2630                 PF_RULES_WUNLOCK();
 2631 
 2632                 error = copyout(nvlpacked, nv->data, nv->len);
 2633 
 2634 #undef ERROUT
 2635 DIOCGETRULENV_error:
 2636                 free(nvlpacked, M_NVLIST);
 2637                 nvlist_destroy(nvrule);
 2638                 nvlist_destroy(nvl);
 2639 
 2640                 break;
 2641         }
 2642 
 2643         case DIOCCHANGERULE: {
 2644                 struct pfioc_rule       *pcr = (struct pfioc_rule *)addr;
 2645                 struct pf_kruleset      *ruleset;
 2646                 struct pf_krule         *oldrule = NULL, *newrule = NULL;
 2647                 struct pfi_kkif         *kif = NULL;
 2648                 struct pf_kpooladdr     *pa;
 2649                 u_int32_t                nr = 0;
 2650                 int                      rs_num;
 2651 
 2652                 pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
 2653 
 2654                 if (pcr->action < PF_CHANGE_ADD_HEAD ||
 2655                     pcr->action > PF_CHANGE_GET_TICKET) {
 2656                         error = EINVAL;
 2657                         break;
 2658                 }
 2659                 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
 2660                         error = EINVAL;
 2661                         break;
 2662                 }
 2663 
 2664                 if (pcr->action != PF_CHANGE_REMOVE) {
 2665                         newrule = pf_krule_alloc();
 2666                         error = pf_rule_to_krule(&pcr->rule, newrule);
 2667                         if (error != 0) {
 2668                                 free(newrule, M_PFRULE);
 2669                                 break;
 2670                         }
 2671 
 2672                         if (newrule->ifname[0])
 2673                                 kif = pf_kkif_create(M_WAITOK);
 2674                         pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
 2675                         for (int i = 0; i < 2; i++) {
 2676                                 pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
 2677                                 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
 2678                         }
 2679                         newrule->states_cur = counter_u64_alloc(M_WAITOK);
 2680                         newrule->states_tot = counter_u64_alloc(M_WAITOK);
 2681                         newrule->src_nodes = counter_u64_alloc(M_WAITOK);
 2682                         newrule->cuid = td->td_ucred->cr_ruid;
 2683                         newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
 2684                         TAILQ_INIT(&newrule->rpool.list);
 2685                 }
 2686 #define ERROUT(x)       ERROUT_IOCTL(DIOCCHANGERULE_error, x)
 2687 
 2688                 PF_RULES_WLOCK();
 2689 #ifdef PF_WANT_32_TO_64_COUNTER
 2690                 if (newrule != NULL) {
 2691                         LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
 2692                         newrule->allrulelinked = true;
 2693                         V_pf_allrulecount++;
 2694                 }
 2695 #endif
 2696 
 2697                 if (!(pcr->action == PF_CHANGE_REMOVE ||
 2698                     pcr->action == PF_CHANGE_GET_TICKET) &&
 2699                     pcr->pool_ticket != V_ticket_pabuf)
 2700                         ERROUT(EBUSY);
 2701 
 2702                 ruleset = pf_find_kruleset(pcr->anchor);
 2703                 if (ruleset == NULL)
 2704                         ERROUT(EINVAL);
 2705 
 2706                 rs_num = pf_get_ruleset_number(pcr->rule.action);
 2707                 if (rs_num >= PF_RULESET_MAX)
 2708                         ERROUT(EINVAL);
 2709 
 2710                 if (pcr->action == PF_CHANGE_GET_TICKET) {
 2711                         pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
 2712                         ERROUT(0);
 2713                 } else if (pcr->ticket !=
 2714                             ruleset->rules[rs_num].active.ticket)
 2715                                 ERROUT(EINVAL);
 2716 
 2717                 if (pcr->action != PF_CHANGE_REMOVE) {
 2718                         if (newrule->ifname[0]) {
 2719                                 newrule->kif = pfi_kkif_attach(kif,
 2720                                     newrule->ifname);
 2721                                 kif = NULL;
 2722                                 pfi_kkif_ref(newrule->kif);
 2723                         } else
 2724                                 newrule->kif = NULL;
 2725 
 2726                         if (newrule->rtableid > 0 &&
 2727                             newrule->rtableid >= rt_numfibs)
 2728                                 error = EBUSY;
 2729 
 2730 #ifdef ALTQ
 2731                         /* set queue IDs */
 2732                         if (newrule->qname[0] != 0) {
 2733                                 if ((newrule->qid =
 2734                                     pf_qname2qid(newrule->qname)) == 0)
 2735                                         error = EBUSY;
 2736                                 else if (newrule->pqname[0] != 0) {
 2737                                         if ((newrule->pqid =
 2738                                             pf_qname2qid(newrule->pqname)) == 0)
 2739                                                 error = EBUSY;
 2740                                 } else
 2741                                         newrule->pqid = newrule->qid;
 2742                         }
 2743 #endif /* ALTQ */
 2744                         if (newrule->tagname[0])
 2745                                 if ((newrule->tag =
 2746                                     pf_tagname2tag(newrule->tagname)) == 0)
 2747                                         error = EBUSY;
 2748                         if (newrule->match_tagname[0])
 2749                                 if ((newrule->match_tag = pf_tagname2tag(
 2750                                     newrule->match_tagname)) == 0)
 2751                                         error = EBUSY;
 2752                         if (newrule->rt && !newrule->direction)
 2753                                 error = EINVAL;
 2754                         if (!newrule->log)
 2755                                 newrule->logif = 0;
 2756                         if (newrule->logif >= PFLOGIFS_MAX)
 2757                                 error = EINVAL;
 2758                         if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
 2759                                 error = ENOMEM;
 2760                         if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
 2761                                 error = ENOMEM;
 2762                         if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
 2763                                 error = EINVAL;
 2764                         TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
 2765                                 if (pa->addr.type == PF_ADDR_TABLE) {
 2766                                         pa->addr.p.tbl =
 2767                                             pfr_attach_table(ruleset,
 2768                                             pa->addr.v.tblname);
 2769                                         if (pa->addr.p.tbl == NULL)
 2770                                                 error = ENOMEM;
 2771                                 }
 2772 
 2773                         newrule->overload_tbl = NULL;
 2774                         if (newrule->overload_tblname[0]) {
 2775                                 if ((newrule->overload_tbl = pfr_attach_table(
 2776                                     ruleset, newrule->overload_tblname)) ==
 2777                                     NULL)
 2778                                         error = EINVAL;
 2779                                 else
 2780                                         newrule->overload_tbl->pfrkt_flags |=
 2781                                             PFR_TFLAG_ACTIVE;
 2782                         }
 2783 
 2784                         pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
 2785                         if (((((newrule->action == PF_NAT) ||
 2786                             (newrule->action == PF_RDR) ||
 2787                             (newrule->action == PF_BINAT) ||
 2788                             (newrule->rt > PF_NOPFROUTE)) &&
 2789                             !newrule->anchor)) &&
 2790                             (TAILQ_FIRST(&newrule->rpool.list) == NULL))
 2791                                 error = EINVAL;
 2792 
 2793                         if (error) {
 2794                                 pf_free_rule(newrule);
 2795                                 PF_RULES_WUNLOCK();
 2796                                 break;
 2797                         }
 2798 
 2799                         newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
 2800                 }
 2801                 pf_empty_kpool(&V_pf_pabuf);
 2802 
 2803                 if (pcr->action == PF_CHANGE_ADD_HEAD)
 2804                         oldrule = TAILQ_FIRST(
 2805                             ruleset->rules[rs_num].active.ptr);
 2806                 else if (pcr->action == PF_CHANGE_ADD_TAIL)
 2807                         oldrule = TAILQ_LAST(
 2808                             ruleset->rules[rs_num].active.ptr, pf_krulequeue);
 2809                 else {
 2810                         oldrule = TAILQ_FIRST(
 2811                             ruleset->rules[rs_num].active.ptr);
 2812                         while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
 2813                                 oldrule = TAILQ_NEXT(oldrule, entries);
 2814                         if (oldrule == NULL) {
 2815                                 if (newrule != NULL)
 2816                                         pf_free_rule(newrule);
 2817                                 PF_RULES_WUNLOCK();
 2818                                 error = EINVAL;
 2819                                 break;
 2820                         }
 2821                 }
 2822 
 2823                 if (pcr->action == PF_CHANGE_REMOVE) {
 2824                         pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
 2825                             oldrule);
 2826                         ruleset->rules[rs_num].active.rcount--;
 2827                 } else {
 2828                         if (oldrule == NULL)
 2829                                 TAILQ_INSERT_TAIL(
 2830                                     ruleset->rules[rs_num].active.ptr,
 2831                                     newrule, entries);
 2832                         else if (pcr->action == PF_CHANGE_ADD_HEAD ||
 2833                             pcr->action == PF_CHANGE_ADD_BEFORE)
 2834                                 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
 2835                         else
 2836                                 TAILQ_INSERT_AFTER(
 2837                                     ruleset->rules[rs_num].active.ptr,
 2838                                     oldrule, newrule, entries);
 2839                         ruleset->rules[rs_num].active.rcount++;
 2840                 }
 2841 
 2842                 nr = 0;
 2843                 TAILQ_FOREACH(oldrule,
 2844                     ruleset->rules[rs_num].active.ptr, entries)
 2845                         oldrule->nr = nr++;
 2846 
 2847                 ruleset->rules[rs_num].active.ticket++;
 2848 
 2849                 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
 2850                 pf_remove_if_empty_kruleset(ruleset);
 2851 
 2852                 PF_RULES_WUNLOCK();
 2853                 break;
 2854 
 2855 #undef ERROUT
 2856 DIOCCHANGERULE_error:
 2857                 PF_RULES_WUNLOCK();
 2858                 pf_krule_free(newrule);
 2859                 pf_kkif_free(kif);
 2860                 break;
 2861         }
 2862 
 2863         case DIOCCLRSTATES: {
 2864                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
 2865                 struct pf_kstate_kill    kill;
 2866 
 2867                 error = pf_state_kill_to_kstate_kill(psk, &kill);
 2868                 if (error)
 2869                         break;
 2870 
 2871                 psk->psk_killed = pf_clear_states(&kill);
 2872                 break;
 2873         }
 2874 
 2875         case DIOCCLRSTATESNV: {
 2876                 error = pf_clearstates_nv((struct pfioc_nv *)addr);
 2877                 break;
 2878         }
 2879 
 2880         case DIOCKILLSTATES: {
 2881                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
 2882                 struct pf_kstate_kill    kill;
 2883 
 2884                 error = pf_state_kill_to_kstate_kill(psk, &kill);
 2885                 if (error)
 2886                         break;
 2887 
 2888                 psk->psk_killed = 0;
 2889                 pf_killstates(&kill, &psk->psk_killed);
 2890                 break;
 2891         }
 2892 
 2893         case DIOCKILLSTATESNV: {
 2894                 error = pf_killstates_nv((struct pfioc_nv *)addr);
 2895                 break;
 2896         }
 2897 
 2898         case DIOCADDSTATE: {
 2899                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
 2900                 struct pfsync_state     *sp = &ps->state;
 2901 
 2902                 if (sp->timeout >= PFTM_MAX) {
 2903                         error = EINVAL;
 2904                         break;
 2905                 }
 2906                 if (V_pfsync_state_import_ptr != NULL) {
 2907                         PF_RULES_RLOCK();
 2908                         error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
 2909                         PF_RULES_RUNLOCK();
 2910                 } else
 2911                         error = EOPNOTSUPP;
 2912                 break;
 2913         }
 2914 
 2915         case DIOCGETSTATE: {
 2916                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
 2917                 struct pf_kstate        *s;
 2918 
 2919                 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
 2920                 if (s == NULL) {
 2921                         error = ENOENT;
 2922                         break;
 2923                 }
 2924 
 2925                 pfsync_state_export(&ps->state, s);
 2926                 PF_STATE_UNLOCK(s);
 2927                 break;
 2928         }
 2929 
 2930         case DIOCGETSTATENV: {
 2931                 error = pf_getstate((struct pfioc_nv *)addr);
 2932                 break;
 2933         }
 2934 
 2935         case DIOCGETSTATES: {
 2936                 struct pfioc_states     *ps = (struct pfioc_states *)addr;
 2937                 struct pf_kstate        *s;
 2938                 struct pfsync_state     *pstore, *p;
 2939                 int                      i, nr;
 2940                 size_t                   slice_count = 16, count;
 2941                 void                    *out;
 2942 
 2943                 if (ps->ps_len <= 0) {
 2944                         nr = uma_zone_get_cur(V_pf_state_z);
 2945                         ps->ps_len = sizeof(struct pfsync_state) * nr;
 2946                         break;
 2947                 }
 2948 
 2949                 out = ps->ps_states;
 2950                 pstore = mallocarray(slice_count,
 2951                     sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO);
 2952                 nr = 0;
 2953 
 2954                 for (i = 0; i <= pf_hashmask; i++) {
 2955                         struct pf_idhash *ih = &V_pf_idhash[i];
 2956 
 2957 DIOCGETSTATES_retry:
 2958                         p = pstore;
 2959 
 2960                         if (LIST_EMPTY(&ih->states))
 2961                                 continue;
 2962 
 2963                         PF_HASHROW_LOCK(ih);
 2964                         count = 0;
 2965                         LIST_FOREACH(s, &ih->states, entry) {
 2966                                 if (s->timeout == PFTM_UNLINKED)
 2967                                         continue;
 2968                                 count++;
 2969                         }
 2970 
 2971                         if (count > slice_count) {
 2972                                 PF_HASHROW_UNLOCK(ih);
 2973                                 free(pstore, M_TEMP);
 2974                                 slice_count = count * 2;
 2975                                 pstore = mallocarray(slice_count,
 2976                                     sizeof(struct pfsync_state), M_TEMP,
 2977                                     M_WAITOK | M_ZERO);
 2978                                 goto DIOCGETSTATES_retry;
 2979                         }
 2980 
 2981                         if ((nr+count) * sizeof(*p) > ps->ps_len) {
 2982                                 PF_HASHROW_UNLOCK(ih);
 2983                                 goto DIOCGETSTATES_full;
 2984                         }
 2985 
 2986                         LIST_FOREACH(s, &ih->states, entry) {
 2987                                 if (s->timeout == PFTM_UNLINKED)
 2988                                         continue;
 2989 
 2990                                 pfsync_state_export(p, s);
 2991                                 p++;
 2992                                 nr++;
 2993                         }
 2994                         PF_HASHROW_UNLOCK(ih);
 2995                         error = copyout(pstore, out,
 2996                             sizeof(struct pfsync_state) * count);
 2997                         if (error)
 2998                                 break;
 2999                         out = ps->ps_states + nr;
 3000                 }
 3001 DIOCGETSTATES_full:
 3002                 ps->ps_len = sizeof(struct pfsync_state) * nr;
 3003                 free(pstore, M_TEMP);
 3004 
 3005                 break;
 3006         }
 3007 
 3008         case DIOCGETSTATESV2: {
 3009                 struct pfioc_states_v2  *ps = (struct pfioc_states_v2 *)addr;
 3010                 struct pf_kstate        *s;
 3011                 struct pf_state_export  *pstore, *p;
 3012                 int i, nr;
 3013                 size_t slice_count = 16, count;
 3014                 void *out;
 3015 
 3016                 if (ps->ps_req_version > PF_STATE_VERSION) {
 3017                         error = ENOTSUP;
 3018                         break;
 3019                 }
 3020 
 3021                 if (ps->ps_len <= 0) {
 3022                         nr = uma_zone_get_cur(V_pf_state_z);
 3023                         ps->ps_len = sizeof(struct pf_state_export) * nr;
 3024                         break;
 3025                 }
 3026 
 3027                 out = ps->ps_states;
 3028                 pstore = mallocarray(slice_count,
 3029                     sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
 3030                 nr = 0;
 3031 
 3032                 for (i = 0; i <= pf_hashmask; i++) {
 3033                         struct pf_idhash *ih = &V_pf_idhash[i];
 3034 
 3035 DIOCGETSTATESV2_retry:
 3036                         p = pstore;
 3037 
 3038                         if (LIST_EMPTY(&ih->states))
 3039                                 continue;
 3040 
 3041                         PF_HASHROW_LOCK(ih);
 3042                         count = 0;
 3043                         LIST_FOREACH(s, &ih->states, entry) {
 3044                                 if (s->timeout == PFTM_UNLINKED)
 3045                                         continue;
 3046                                 count++;
 3047                         }
 3048 
 3049                         if (count > slice_count) {
 3050                                 PF_HASHROW_UNLOCK(ih);
 3051                                 free(pstore, M_TEMP);
 3052                                 slice_count = count * 2;
 3053                                 pstore = mallocarray(slice_count,
 3054                                     sizeof(struct pf_state_export), M_TEMP,
 3055                                     M_WAITOK | M_ZERO);
 3056                                 goto DIOCGETSTATESV2_retry;
 3057                         }
 3058 
 3059                         if ((nr+count) * sizeof(*p) > ps->ps_len) {
 3060                                 PF_HASHROW_UNLOCK(ih);
 3061                                 goto DIOCGETSTATESV2_full;
 3062                         }
 3063 
 3064                         LIST_FOREACH(s, &ih->states, entry) {
 3065                                 if (s->timeout == PFTM_UNLINKED)
 3066                                         continue;
 3067 
 3068                                 pf_state_export(p, s);
 3069                                 p++;
 3070                                 nr++;
 3071                         }
 3072                         PF_HASHROW_UNLOCK(ih);
 3073                         error = copyout(pstore, out,
 3074                             sizeof(struct pf_state_export) * count);
 3075                         if (error)
 3076                                 break;
 3077                         out = ps->ps_states + nr;
 3078                 }
 3079 DIOCGETSTATESV2_full:
 3080                 ps->ps_len = nr * sizeof(struct pf_state_export);
 3081                 free(pstore, M_TEMP);
 3082 
 3083                 break;
 3084         }
 3085 
 3086         case DIOCGETSTATUS: {
 3087                 struct pf_status *s = (struct pf_status *)addr;
 3088 
 3089                 PF_RULES_RLOCK();
 3090                 s->running = V_pf_status.running;
 3091                 s->since   = V_pf_status.since;
 3092                 s->debug   = V_pf_status.debug;
 3093                 s->hostid  = V_pf_status.hostid;
 3094                 s->states  = V_pf_status.states;
 3095                 s->src_nodes = V_pf_status.src_nodes;
 3096 
 3097                 for (int i = 0; i < PFRES_MAX; i++)
 3098                         s->counters[i] =
 3099                             counter_u64_fetch(V_pf_status.counters[i]);
 3100                 for (int i = 0; i < LCNT_MAX; i++)
 3101                         s->lcounters[i] =
 3102                             counter_u64_fetch(V_pf_status.lcounters[i]);
 3103                 for (int i = 0; i < FCNT_MAX; i++)
 3104                         s->fcounters[i] =
 3105                             pf_counter_u64_fetch(&V_pf_status.fcounters[i]);
 3106                 for (int i = 0; i < SCNT_MAX; i++)
 3107                         s->scounters[i] =
 3108                             counter_u64_fetch(V_pf_status.scounters[i]);
 3109 
 3110                 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
 3111                 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
 3112                     PF_MD5_DIGEST_LENGTH);
 3113 
 3114                 pfi_update_status(s->ifname, s);
 3115                 PF_RULES_RUNLOCK();
 3116                 break;
 3117         }
 3118 
 3119         case DIOCGETSTATUSNV: {
 3120                 error = pf_getstatus((struct pfioc_nv *)addr);
 3121                 break;
 3122         }
 3123 
 3124         case DIOCSETSTATUSIF: {
 3125                 struct pfioc_if *pi = (struct pfioc_if *)addr;
 3126 
 3127                 if (pi->ifname[0] == 0) {
 3128                         bzero(V_pf_status.ifname, IFNAMSIZ);
 3129                         break;
 3130                 }
 3131                 PF_RULES_WLOCK();
 3132                 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
 3133                 PF_RULES_WUNLOCK();
 3134                 break;
 3135         }
 3136 
 3137         case DIOCCLRSTATUS: {
 3138                 PF_RULES_WLOCK();
 3139                 for (int i = 0; i < PFRES_MAX; i++)
 3140                         counter_u64_zero(V_pf_status.counters[i]);
 3141                 for (int i = 0; i < FCNT_MAX; i++)
 3142                         pf_counter_u64_zero(&V_pf_status.fcounters[i]);
 3143                 for (int i = 0; i < SCNT_MAX; i++)
 3144                         counter_u64_zero(V_pf_status.scounters[i]);
 3145                 for (int i = 0; i < KLCNT_MAX; i++)
 3146                         counter_u64_zero(V_pf_status.lcounters[i]);
 3147                 V_pf_status.since = time_second;
 3148                 if (*V_pf_status.ifname)
 3149                         pfi_update_status(V_pf_status.ifname, NULL);
 3150                 PF_RULES_WUNLOCK();
 3151                 break;
 3152         }
 3153 
 3154         case DIOCNATLOOK: {
 3155                 struct pfioc_natlook    *pnl = (struct pfioc_natlook *)addr;
 3156                 struct pf_state_key     *sk;
 3157                 struct pf_kstate        *state;
 3158                 struct pf_state_key_cmp  key;
 3159                 int                      m = 0, direction = pnl->direction;
 3160                 int                      sidx, didx;
 3161 
 3162                 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
 3163                 sidx = (direction == PF_IN) ? 1 : 0;
 3164                 didx = (direction == PF_IN) ? 0 : 1;
 3165 
 3166                 if (!pnl->proto ||
 3167                     PF_AZERO(&pnl->saddr, pnl->af) ||
 3168                     PF_AZERO(&pnl->daddr, pnl->af) ||
 3169                     ((pnl->proto == IPPROTO_TCP ||
 3170                     pnl->proto == IPPROTO_UDP) &&
 3171                     (!pnl->dport || !pnl->sport)))
 3172                         error = EINVAL;
 3173                 else {
 3174                         bzero(&key, sizeof(key));
 3175                         key.af = pnl->af;
 3176                         key.proto = pnl->proto;
 3177                         PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
 3178                         key.port[sidx] = pnl->sport;
 3179                         PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
 3180                         key.port[didx] = pnl->dport;
 3181 
 3182                         state = pf_find_state_all(&key, direction, &m);
 3183 
 3184                         if (m > 1)
 3185                                 error = E2BIG;  /* more than one state */
 3186                         else if (state != NULL) {
 3187                                 /* XXXGL: not locked read */
 3188                                 sk = state->key[sidx];
 3189                                 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
 3190                                 pnl->rsport = sk->port[sidx];
 3191                                 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
 3192                                 pnl->rdport = sk->port[didx];
 3193                         } else
 3194                                 error = ENOENT;
 3195                 }
 3196                 break;
 3197         }
 3198 
 3199         case DIOCSETTIMEOUT: {
 3200                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
 3201                 int              old;
 3202 
 3203                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
 3204                     pt->seconds < 0) {
 3205                         error = EINVAL;
 3206                         break;
 3207                 }
 3208                 PF_RULES_WLOCK();
 3209                 old = V_pf_default_rule.timeout[pt->timeout];
 3210                 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
 3211                         pt->seconds = 1;
 3212                 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
 3213                 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
 3214                         wakeup(pf_purge_thread);
 3215                 pt->seconds = old;
 3216                 PF_RULES_WUNLOCK();
 3217                 break;
 3218         }
 3219 
 3220         case DIOCGETTIMEOUT: {
 3221                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
 3222 
 3223                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
 3224                         error = EINVAL;
 3225                         break;
 3226                 }
 3227                 PF_RULES_RLOCK();
 3228                 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
 3229                 PF_RULES_RUNLOCK();
 3230                 break;
 3231         }
 3232 
 3233         case DIOCGETLIMIT: {
 3234                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
 3235 
 3236                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
 3237                         error = EINVAL;
 3238                         break;
 3239                 }
 3240                 PF_RULES_RLOCK();
 3241                 pl->limit = V_pf_limits[pl->index].limit;
 3242                 PF_RULES_RUNLOCK();
 3243                 break;
 3244         }
 3245 
 3246         case DIOCSETLIMIT: {
 3247                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
 3248                 int                      old_limit;
 3249 
 3250                 PF_RULES_WLOCK();
 3251                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
 3252                     V_pf_limits[pl->index].zone == NULL) {
 3253                         PF_RULES_WUNLOCK();
 3254                         error = EINVAL;
 3255                         break;
 3256                 }
 3257                 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
 3258                 old_limit = V_pf_limits[pl->index].limit;
 3259                 V_pf_limits[pl->index].limit = pl->limit;
 3260                 pl->limit = old_limit;
 3261                 PF_RULES_WUNLOCK();
 3262                 break;
 3263         }
 3264 
 3265         case DIOCSETDEBUG: {
 3266                 u_int32_t       *level = (u_int32_t *)addr;
 3267 
 3268                 PF_RULES_WLOCK();
 3269                 V_pf_status.debug = *level;
 3270                 PF_RULES_WUNLOCK();
 3271                 break;
 3272         }
 3273 
 3274         case DIOCCLRRULECTRS: {
 3275                 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
 3276                 struct pf_kruleset      *ruleset = &pf_main_ruleset;
 3277                 struct pf_krule         *rule;
 3278 
 3279                 PF_RULES_WLOCK();
 3280                 TAILQ_FOREACH(rule,
 3281                     ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
 3282                         pf_counter_u64_zero(&rule->evaluations);
 3283                         for (int i = 0; i < 2; i++) {
 3284                                 pf_counter_u64_zero(&rule->packets[i]);
 3285                                 pf_counter_u64_zero(&rule->bytes[i]);
 3286                         }
 3287                 }
 3288                 PF_RULES_WUNLOCK();
 3289                 break;
 3290         }
 3291 
 3292         case DIOCGIFSPEEDV0:
 3293         case DIOCGIFSPEEDV1: {
 3294                 struct pf_ifspeed_v1    *psp = (struct pf_ifspeed_v1 *)addr;
 3295                 struct pf_ifspeed_v1    ps;
 3296                 struct ifnet            *ifp;
 3297 
 3298                 if (psp->ifname[0] == '\0') {
 3299                         error = EINVAL;
 3300                         break;
 3301                 }
 3302 
 3303                 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
 3304                 if (error != 0)
 3305                         break;
 3306                 ifp = ifunit(ps.ifname);
 3307                 if (ifp != NULL) {
 3308                         psp->baudrate32 =
 3309                             (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
 3310                         if (cmd == DIOCGIFSPEEDV1)
 3311                                 psp->baudrate = ifp->if_baudrate;
 3312                 } else {
 3313                         error = EINVAL;
 3314                 }
 3315                 break;
 3316         }
 3317 
 3318 #ifdef ALTQ
 3319         case DIOCSTARTALTQ: {
 3320                 struct pf_altq          *altq;
 3321 
 3322                 PF_RULES_WLOCK();
 3323                 /* enable all altq interfaces on active list */
 3324                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
 3325                         if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
 3326                                 error = pf_enable_altq(altq);
 3327                                 if (error != 0)
 3328                                         break;
 3329                         }
 3330                 }
 3331                 if (error == 0)
 3332                         V_pf_altq_running = 1;
 3333                 PF_RULES_WUNLOCK();
 3334                 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
 3335                 break;
 3336         }
 3337 
 3338         case DIOCSTOPALTQ: {
 3339                 struct pf_altq          *altq;
 3340 
 3341                 PF_RULES_WLOCK();
 3342                 /* disable all altq interfaces on active list */
 3343                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
 3344                         if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
 3345                                 error = pf_disable_altq(altq);
 3346                                 if (error != 0)
 3347                                         break;
 3348                         }
 3349                 }
 3350                 if (error == 0)
 3351                         V_pf_altq_running = 0;
 3352                 PF_RULES_WUNLOCK();
 3353                 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
 3354                 break;
 3355         }
 3356 
 3357         case DIOCADDALTQV0:
 3358         case DIOCADDALTQV1: {
 3359                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
 3360                 struct pf_altq          *altq, *a;
 3361                 struct ifnet            *ifp;
 3362 
 3363                 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
 3364                 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
 3365                 if (error)
 3366                         break;
 3367                 altq->local_flags = 0;
 3368 
 3369                 PF_RULES_WLOCK();
 3370                 if (pa->ticket != V_ticket_altqs_inactive) {
 3371                         PF_RULES_WUNLOCK();
 3372                         free(altq, M_PFALTQ);
 3373                         error = EBUSY;
 3374                         break;
 3375                 }
 3376 
 3377                 /*
 3378                  * if this is for a queue, find the discipline and
 3379                  * copy the necessary fields
 3380                  */
 3381                 if (altq->qname[0] != 0) {
 3382                         if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
 3383                                 PF_RULES_WUNLOCK();
 3384                                 error = EBUSY;
 3385                                 free(altq, M_PFALTQ);
 3386                                 break;
 3387                         }
 3388                         altq->altq_disc = NULL;
 3389                         TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
 3390                                 if (strncmp(a->ifname, altq->ifname,
 3391                                     IFNAMSIZ) == 0) {
 3392                                         altq->altq_disc = a->altq_disc;
 3393                                         break;
 3394                                 }
 3395                         }
 3396                 }
 3397 
 3398                 if ((ifp = ifunit(altq->ifname)) == NULL)
 3399                         altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
 3400                 else
 3401                         error = altq_add(ifp, altq);
 3402 
 3403                 if (error) {
 3404                         PF_RULES_WUNLOCK();
 3405                         free(altq, M_PFALTQ);
 3406                         break;
 3407                 }
 3408 
 3409                 if (altq->qname[0] != 0)
 3410                         TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
 3411                 else
 3412                         TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
 3413                 /* version error check done on import above */
 3414                 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
 3415                 PF_RULES_WUNLOCK();
 3416                 break;
 3417         }
 3418 
 3419         case DIOCGETALTQSV0:
 3420         case DIOCGETALTQSV1: {
 3421                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
 3422                 struct pf_altq          *altq;
 3423 
 3424                 PF_RULES_RLOCK();
 3425                 pa->nr = 0;
 3426                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
 3427                         pa->nr++;
 3428                 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
 3429                         pa->nr++;
 3430                 pa->ticket = V_ticket_altqs_active;
 3431                 PF_RULES_RUNLOCK();
 3432                 break;
 3433         }
 3434 
 3435         case DIOCGETALTQV0:
 3436         case DIOCGETALTQV1: {
 3437                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
 3438                 struct pf_altq          *altq;
 3439 
 3440                 PF_RULES_RLOCK();
 3441                 if (pa->ticket != V_ticket_altqs_active) {
 3442                         PF_RULES_RUNLOCK();
 3443                         error = EBUSY;
 3444                         break;
 3445                 }
 3446                 altq = pf_altq_get_nth_active(pa->nr);
 3447                 if (altq == NULL) {
 3448                         PF_RULES_RUNLOCK();
 3449                         error = EBUSY;
 3450                         break;
 3451                 }
 3452                 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
 3453                 PF_RULES_RUNLOCK();
 3454                 break;
 3455         }
 3456 
 3457         case DIOCCHANGEALTQV0:
 3458         case DIOCCHANGEALTQV1:
 3459                 /* CHANGEALTQ not supported yet! */
 3460                 error = ENODEV;
 3461                 break;
 3462 
 3463         case DIOCGETQSTATSV0:
 3464         case DIOCGETQSTATSV1: {
 3465                 struct pfioc_qstats_v1  *pq = (struct pfioc_qstats_v1 *)addr;
 3466                 struct pf_altq          *altq;
 3467                 int                      nbytes;
 3468                 u_int32_t                version;
 3469 
 3470                 PF_RULES_RLOCK();
 3471                 if (pq->ticket != V_ticket_altqs_active) {
 3472                         PF_RULES_RUNLOCK();
 3473                         error = EBUSY;
 3474                         break;
 3475                 }
 3476                 nbytes = pq->nbytes;
 3477                 altq = pf_altq_get_nth_active(pq->nr);
 3478                 if (altq == NULL) {
 3479                         PF_RULES_RUNLOCK();
 3480                         error = EBUSY;
 3481                         break;
 3482                 }
 3483 
 3484                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
 3485                         PF_RULES_RUNLOCK();
 3486                         error = ENXIO;
 3487                         break;
 3488                 }
 3489                 PF_RULES_RUNLOCK();
 3490                 if (cmd == DIOCGETQSTATSV0)
 3491                         version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
 3492                 else
 3493                         version = pq->version;
 3494                 error = altq_getqstats(altq, pq->buf, &nbytes, version);
 3495                 if (error == 0) {
 3496                         pq->scheduler = altq->scheduler;
 3497                         pq->nbytes = nbytes;
 3498                 }
 3499                 break;
 3500         }
 3501 #endif /* ALTQ */
 3502 
 3503         case DIOCBEGINADDRS: {
 3504                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
 3505 
 3506                 PF_RULES_WLOCK();
 3507                 pf_empty_kpool(&V_pf_pabuf);
 3508                 pp->ticket = ++V_ticket_pabuf;
 3509                 PF_RULES_WUNLOCK();
 3510                 break;
 3511         }
 3512 
 3513         case DIOCADDADDR: {
 3514                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
 3515                 struct pf_kpooladdr     *pa;
 3516                 struct pfi_kkif         *kif = NULL;
 3517 
 3518 #ifndef INET
 3519                 if (pp->af == AF_INET) {
 3520                         error = EAFNOSUPPORT;
 3521                         break;
 3522                 }
 3523 #endif /* INET */
 3524 #ifndef INET6
 3525                 if (pp->af == AF_INET6) {
 3526                         error = EAFNOSUPPORT;
 3527                         break;
 3528                 }
 3529 #endif /* INET6 */
 3530                 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
 3531                     pp->addr.addr.type != PF_ADDR_DYNIFTL &&
 3532                     pp->addr.addr.type != PF_ADDR_TABLE) {
 3533                         error = EINVAL;
 3534                         break;
 3535                 }
 3536                 if (pp->addr.addr.p.dyn != NULL) {
 3537                         error = EINVAL;
 3538                         break;
 3539                 }
 3540                 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
 3541                 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
 3542                 if (error != 0)
 3543                         break;
 3544                 if (pa->ifname[0])
 3545                         kif = pf_kkif_create(M_WAITOK);
 3546                 PF_RULES_WLOCK();
 3547                 if (pp->ticket != V_ticket_pabuf) {
 3548                         PF_RULES_WUNLOCK();
 3549                         if (pa->ifname[0])
 3550                                 pf_kkif_free(kif);
 3551                         free(pa, M_PFRULE);
 3552                         error = EBUSY;
 3553                         break;
 3554                 }
 3555                 if (pa->ifname[0]) {
 3556                         pa->kif = pfi_kkif_attach(kif, pa->ifname);
 3557                         kif = NULL;
 3558                         pfi_kkif_ref(pa->kif);
 3559                 } else
 3560                         pa->kif = NULL;
 3561                 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
 3562                     pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
 3563                         if (pa->ifname[0])
 3564                                 pfi_kkif_unref(pa->kif);
 3565                         PF_RULES_WUNLOCK();
 3566                         free(pa, M_PFRULE);
 3567                         break;
 3568                 }
 3569                 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
 3570                 PF_RULES_WUNLOCK();
 3571                 break;
 3572         }
 3573 
 3574         case DIOCGETADDRS: {
 3575                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
 3576                 struct pf_kpool         *pool;
 3577                 struct pf_kpooladdr     *pa;
 3578 
 3579                 pp->anchor[sizeof(pp->anchor) - 1] = 0;
 3580                 pp->nr = 0;
 3581 
 3582                 PF_RULES_RLOCK();
 3583                 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
 3584                     pp->r_num, 0, 1, 0);
 3585                 if (pool == NULL) {
 3586                         PF_RULES_RUNLOCK();
 3587                         error = EBUSY;
 3588                         break;
 3589                 }
 3590                 TAILQ_FOREACH(pa, &pool->list, entries)
 3591                         pp->nr++;
 3592                 PF_RULES_RUNLOCK();
 3593                 break;
 3594         }
 3595 
 3596         case DIOCGETADDR: {
 3597                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
 3598                 struct pf_kpool         *pool;
 3599                 struct pf_kpooladdr     *pa;
 3600                 u_int32_t                nr = 0;
 3601 
 3602                 pp->anchor[sizeof(pp->anchor) - 1] = 0;
 3603 
 3604                 PF_RULES_RLOCK();
 3605                 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
 3606                     pp->r_num, 0, 1, 1);
 3607                 if (pool == NULL) {
 3608                         PF_RULES_RUNLOCK();
 3609                         error = EBUSY;
 3610                         break;
 3611                 }
 3612                 pa = TAILQ_FIRST(&pool->list);
 3613                 while ((pa != NULL) && (nr < pp->nr)) {
 3614                         pa = TAILQ_NEXT(pa, entries);
 3615                         nr++;
 3616                 }
 3617                 if (pa == NULL) {
 3618                         PF_RULES_RUNLOCK();
 3619                         error = EBUSY;
 3620                         break;
 3621                 }
 3622                 pf_kpooladdr_to_pooladdr(pa, &pp->addr);
 3623                 pf_addr_copyout(&pp->addr.addr);
 3624                 PF_RULES_RUNLOCK();
 3625                 break;
 3626         }
 3627 
 3628         case DIOCCHANGEADDR: {
 3629                 struct pfioc_pooladdr   *pca = (struct pfioc_pooladdr *)addr;
 3630                 struct pf_kpool         *pool;
 3631                 struct pf_kpooladdr     *oldpa = NULL, *newpa = NULL;
 3632                 struct pf_kruleset      *ruleset;
 3633                 struct pfi_kkif         *kif = NULL;
 3634 
 3635                 pca->anchor[sizeof(pca->anchor) - 1] = 0;
 3636 
 3637                 if (pca->action < PF_CHANGE_ADD_HEAD ||
 3638                     pca->action > PF_CHANGE_REMOVE) {
 3639                         error = EINVAL;
 3640                         break;
 3641                 }
 3642                 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
 3643                     pca->addr.addr.type != PF_ADDR_DYNIFTL &&
 3644                     pca->addr.addr.type != PF_ADDR_TABLE) {
 3645                         error = EINVAL;
 3646                         break;
 3647                 }
 3648                 if (pca->addr.addr.p.dyn != NULL) {
 3649                         error = EINVAL;
 3650                         break;
 3651                 }
 3652 
 3653                 if (pca->action != PF_CHANGE_REMOVE) {
 3654 #ifndef INET
 3655                         if (pca->af == AF_INET) {
 3656                                 error = EAFNOSUPPORT;
 3657                                 break;
 3658                         }
 3659 #endif /* INET */
 3660 #ifndef INET6
 3661                         if (pca->af == AF_INET6) {
 3662                                 error = EAFNOSUPPORT;
 3663                                 break;
 3664                         }
 3665 #endif /* INET6 */
 3666                         newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
 3667                         bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
 3668                         if (newpa->ifname[0])
 3669                                 kif = pf_kkif_create(M_WAITOK);
 3670                         newpa->kif = NULL;
 3671                 }
 3672 #define ERROUT(x)       ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
 3673                 PF_RULES_WLOCK();
 3674                 ruleset = pf_find_kruleset(pca->anchor);
 3675                 if (ruleset == NULL)
 3676                         ERROUT(EBUSY);
 3677 
 3678                 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
 3679                     pca->r_num, pca->r_last, 1, 1);
 3680                 if (pool == NULL)
 3681                         ERROUT(EBUSY);
 3682 
 3683                 if (pca->action != PF_CHANGE_REMOVE) {
 3684                         if (newpa->ifname[0]) {
 3685                                 newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
 3686                                 pfi_kkif_ref(newpa->kif);
 3687                                 kif = NULL;
 3688                         }
 3689 
 3690                         switch (newpa->addr.type) {
 3691                         case PF_ADDR_DYNIFTL:
 3692                                 error = pfi_dynaddr_setup(&newpa->addr,
 3693                                     pca->af);
 3694                                 break;
 3695                         case PF_ADDR_TABLE:
 3696                                 newpa->addr.p.tbl = pfr_attach_table(ruleset,
 3697                                     newpa->addr.v.tblname);
 3698                                 if (newpa->addr.p.tbl == NULL)
 3699                                         error = ENOMEM;
 3700                                 break;
 3701                         }
 3702                         if (error)
 3703                                 goto DIOCCHANGEADDR_error;
 3704                 }
 3705 
 3706                 switch (pca->action) {
 3707                 case PF_CHANGE_ADD_HEAD:
 3708                         oldpa = TAILQ_FIRST(&pool->list);
 3709                         break;
 3710                 case PF_CHANGE_ADD_TAIL:
 3711                         oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
 3712                         break;
 3713                 default:
 3714                         oldpa = TAILQ_FIRST(&pool->list);
 3715                         for (int i = 0; oldpa && i < pca->nr; i++)
 3716                                 oldpa = TAILQ_NEXT(oldpa, entries);
 3717 
 3718                         if (oldpa == NULL)
 3719                                 ERROUT(EINVAL);
 3720                 }
 3721 
 3722                 if (pca->action == PF_CHANGE_REMOVE) {
 3723                         TAILQ_REMOVE(&pool->list, oldpa, entries);
 3724                         switch (oldpa->addr.type) {
 3725                         case PF_ADDR_DYNIFTL:
 3726                                 pfi_dynaddr_remove(oldpa->addr.p.dyn);
 3727                                 break;
 3728                         case PF_ADDR_TABLE:
 3729                                 pfr_detach_table(oldpa->addr.p.tbl);
 3730                                 break;
 3731                         }
 3732                         if (oldpa->kif)
 3733                                 pfi_kkif_unref(oldpa->kif);
 3734                         free(oldpa, M_PFRULE);
 3735                 } else {
 3736                         if (oldpa == NULL)
 3737                                 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
 3738                         else if (pca->action == PF_CHANGE_ADD_HEAD ||
 3739                             pca->action == PF_CHANGE_ADD_BEFORE)
 3740                                 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
 3741                         else
 3742                                 TAILQ_INSERT_AFTER(&pool->list, oldpa,
 3743                                     newpa, entries);
 3744                 }
 3745 
 3746                 pool->cur = TAILQ_FIRST(&pool->list);
 3747                 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
 3748                 PF_RULES_WUNLOCK();
 3749                 break;
 3750 
 3751 #undef ERROUT
 3752 DIOCCHANGEADDR_error:
 3753                 if (newpa != NULL) {
 3754                         if (newpa->kif)
 3755                                 pfi_kkif_unref(newpa->kif);
 3756                         free(newpa, M_PFRULE);
 3757                 }
 3758                 PF_RULES_WUNLOCK();
 3759                 pf_kkif_free(kif);
 3760                 break;
 3761         }
 3762 
 3763         case DIOCGETRULESETS: {
 3764                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
 3765                 struct pf_kruleset      *ruleset;
 3766                 struct pf_kanchor       *anchor;
 3767 
 3768                 pr->path[sizeof(pr->path) - 1] = 0;
 3769 
 3770                 PF_RULES_RLOCK();
 3771                 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
 3772                         PF_RULES_RUNLOCK();
 3773                         error = ENOENT;
 3774                         break;
 3775                 }
 3776                 pr->nr = 0;
 3777                 if (ruleset->anchor == NULL) {
 3778                         /* XXX kludge for pf_main_ruleset */
 3779                         RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
 3780                                 if (anchor->parent == NULL)
 3781                                         pr->nr++;
 3782                 } else {
 3783                         RB_FOREACH(anchor, pf_kanchor_node,
 3784                             &ruleset->anchor->children)
 3785                                 pr->nr++;
 3786                 }
 3787                 PF_RULES_RUNLOCK();
 3788                 break;
 3789         }
 3790 
 3791         case DIOCGETRULESET: {
 3792                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
 3793                 struct pf_kruleset      *ruleset;
 3794                 struct pf_kanchor       *anchor;
 3795                 u_int32_t                nr = 0;
 3796 
 3797                 pr->path[sizeof(pr->path) - 1] = 0;
 3798 
 3799                 PF_RULES_RLOCK();
 3800                 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
 3801                         PF_RULES_RUNLOCK();
 3802                         error = ENOENT;
 3803                         break;
 3804                 }
 3805                 pr->name[0] = 0;
 3806                 if (ruleset->anchor == NULL) {
 3807                         /* XXX kludge for pf_main_ruleset */
 3808                         RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
 3809                                 if (anchor->parent == NULL && nr++ == pr->nr) {
 3810                                         strlcpy(pr->name, anchor->name,
 3811                                             sizeof(pr->name));
 3812                                         break;
 3813                                 }
 3814                 } else {
 3815                         RB_FOREACH(anchor, pf_kanchor_node,
 3816                             &ruleset->anchor->children)
 3817                                 if (nr++ == pr->nr) {
 3818                                         strlcpy(pr->name, anchor->name,
 3819                                             sizeof(pr->name));
 3820                                         break;
 3821                                 }
 3822                 }
 3823                 if (!pr->name[0])
 3824                         error = EBUSY;
 3825                 PF_RULES_RUNLOCK();
 3826                 break;
 3827         }
 3828 
 3829         case DIOCRCLRTABLES: {
 3830                 struct pfioc_table *io = (struct pfioc_table *)addr;
 3831 
 3832                 if (io->pfrio_esize != 0) {
 3833                         error = ENODEV;
 3834                         break;
 3835                 }
 3836                 PF_RULES_WLOCK();
 3837                 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
 3838                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
 3839                 PF_RULES_WUNLOCK();
 3840                 break;
 3841         }
 3842 
 3843         case DIOCRADDTABLES: {
 3844                 struct pfioc_table *io = (struct pfioc_table *)addr;
 3845                 struct pfr_table *pfrts;
 3846                 size_t totlen;
 3847 
 3848                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
 3849                         error = ENODEV;
 3850                         break;
 3851                 }
 3852 
 3853                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
 3854                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
 3855                         error = ENOMEM;
 3856                         break;
 3857                 }
 3858 
 3859                 totlen = io->pfrio_size * sizeof(struct pfr_table);
 3860                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
 3861                     M_TEMP, M_WAITOK);
 3862                 error = copyin(io->pfrio_buffer, pfrts, totlen);
 3863                 if (error) {
 3864                         free(pfrts, M_TEMP);
 3865                         break;
 3866                 }
 3867                 PF_RULES_WLOCK();
 3868                 error = pfr_add_tables(pfrts, io->pfrio_size,
 3869                     &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 3870                 PF_RULES_WUNLOCK();
 3871                 free(pfrts, M_TEMP);
 3872                 break;
 3873         }
 3874 
 3875         case DIOCRDELTABLES: {
 3876                 struct pfioc_table *io = (struct pfioc_table *)addr;
 3877                 struct pfr_table *pfrts;
 3878                 size_t totlen;
 3879 
 3880                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
 3881                         error = ENODEV;
 3882                         break;
 3883                 }
 3884 
 3885                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
 3886                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
 3887                         error = ENOMEM;
 3888                         break;
 3889                 }
 3890 
 3891                 totlen = io->pfrio_size * sizeof(struct pfr_table);
 3892                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
 3893                     M_TEMP, M_WAITOK);
 3894                 error = copyin(io->pfrio_buffer, pfrts, totlen);
 3895                 if (error) {
 3896                         free(pfrts, M_TEMP);
 3897                         break;
 3898                 }
 3899                 PF_RULES_WLOCK();
 3900                 error = pfr_del_tables(pfrts, io->pfrio_size,
 3901                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 3902                 PF_RULES_WUNLOCK();
 3903                 free(pfrts, M_TEMP);
 3904                 break;
 3905         }
 3906 
 3907         case DIOCRGETTABLES: {
 3908                 struct pfioc_table *io = (struct pfioc_table *)addr;
 3909                 struct pfr_table *pfrts;
 3910                 size_t totlen;
 3911                 int n;
 3912 
 3913                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
 3914                         error = ENODEV;
 3915                         break;
 3916                 }
 3917                 PF_RULES_RLOCK();
 3918                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
 3919                 if (n < 0) {
 3920                         PF_RULES_RUNLOCK();
 3921                         error = EINVAL;
 3922                         break;
 3923                 }
 3924                 io->pfrio_size = min(io->pfrio_size, n);
 3925 
 3926                 totlen = io->pfrio_size * sizeof(struct pfr_table);
 3927 
 3928                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
 3929                     M_TEMP, M_NOWAIT | M_ZERO);
 3930                 if (pfrts == NULL) {
 3931                         error = ENOMEM;
 3932                         PF_RULES_RUNLOCK();
 3933                         break;
 3934                 }
 3935                 error = pfr_get_tables(&io->pfrio_table, pfrts,
 3936                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 3937                 PF_RULES_RUNLOCK();
 3938                 if (error == 0)
 3939                         error = copyout(pfrts, io->pfrio_buffer, totlen);
 3940                 free(pfrts, M_TEMP);
 3941                 break;
 3942         }
 3943 
 3944         case DIOCRGETTSTATS: {
 3945                 struct pfioc_table *io = (struct pfioc_table *)addr;
 3946                 struct pfr_tstats *pfrtstats;
 3947                 size_t totlen;
 3948                 int n;
 3949 
 3950                 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
 3951                         error = ENODEV;
 3952                         break;
 3953                 }
 3954                 PF_TABLE_STATS_LOCK();
 3955                 PF_RULES_RLOCK();
 3956                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
 3957                 if (n < 0) {
 3958                         PF_RULES_RUNLOCK();
 3959                         PF_TABLE_STATS_UNLOCK();
 3960                         error = EINVAL;
 3961                         break;
 3962                 }
 3963                 io->pfrio_size = min(io->pfrio_size, n);
 3964 
 3965                 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
 3966                 pfrtstats = mallocarray(io->pfrio_size,
 3967                     sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
 3968                 if (pfrtstats == NULL) {
 3969                         error = ENOMEM;
 3970                         PF_RULES_RUNLOCK();
 3971                         PF_TABLE_STATS_UNLOCK();
 3972                         break;
 3973                 }
 3974                 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
 3975                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 3976                 PF_RULES_RUNLOCK();
 3977                 PF_TABLE_STATS_UNLOCK();
 3978                 if (error == 0)
 3979                         error = copyout(pfrtstats, io->pfrio_buffer, totlen);
 3980                 free(pfrtstats, M_TEMP);
 3981                 break;
 3982         }
 3983 
 3984         case DIOCRCLRTSTATS: {
 3985                 struct pfioc_table *io = (struct pfioc_table *)addr;
 3986                 struct pfr_table *pfrts;
 3987                 size_t totlen;
 3988 
 3989                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
 3990                         error = ENODEV;
 3991                         break;
 3992                 }
 3993 
 3994                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
 3995                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
 3996                         /* We used to count tables and use the minimum required
 3997                          * size, so we didn't fail on overly large requests.
 3998                          * Keep doing so. */
 3999                         io->pfrio_size = pf_ioctl_maxcount;
 4000                         break;
 4001                 }
 4002 
 4003                 totlen = io->pfrio_size * sizeof(struct pfr_table);
 4004                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
 4005                     M_TEMP, M_WAITOK);
 4006                 error = copyin(io->pfrio_buffer, pfrts, totlen);
 4007                 if (error) {
 4008                         free(pfrts, M_TEMP);
 4009                         break;
 4010                 }
 4011 
 4012                 PF_TABLE_STATS_LOCK();
 4013                 PF_RULES_RLOCK();
 4014                 error = pfr_clr_tstats(pfrts, io->pfrio_size,
 4015                     &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4016                 PF_RULES_RUNLOCK();
 4017                 PF_TABLE_STATS_UNLOCK();
 4018                 free(pfrts, M_TEMP);
 4019                 break;
 4020         }
 4021 
 4022         case DIOCRSETTFLAGS: {
 4023                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4024                 struct pfr_table *pfrts;
 4025                 size_t totlen;
 4026                 int n;
 4027 
 4028                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
 4029                         error = ENODEV;
 4030                         break;
 4031                 }
 4032 
 4033                 PF_RULES_RLOCK();
 4034                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
 4035                 if (n < 0) {
 4036                         PF_RULES_RUNLOCK();
 4037                         error = EINVAL;
 4038                         break;
 4039                 }
 4040 
 4041                 io->pfrio_size = min(io->pfrio_size, n);
 4042                 PF_RULES_RUNLOCK();
 4043 
 4044                 totlen = io->pfrio_size * sizeof(struct pfr_table);
 4045                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
 4046                     M_TEMP, M_WAITOK);
 4047                 error = copyin(io->pfrio_buffer, pfrts, totlen);
 4048                 if (error) {
 4049                         free(pfrts, M_TEMP);
 4050                         break;
 4051                 }
 4052                 PF_RULES_WLOCK();
 4053                 error = pfr_set_tflags(pfrts, io->pfrio_size,
 4054                     io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
 4055                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4056                 PF_RULES_WUNLOCK();
 4057                 free(pfrts, M_TEMP);
 4058                 break;
 4059         }
 4060 
 4061         case DIOCRCLRADDRS: {
 4062                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4063 
 4064                 if (io->pfrio_esize != 0) {
 4065                         error = ENODEV;
 4066                         break;
 4067                 }
 4068                 PF_RULES_WLOCK();
 4069                 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
 4070                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4071                 PF_RULES_WUNLOCK();
 4072                 break;
 4073         }
 4074 
 4075         case DIOCRADDADDRS: {
 4076                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4077                 struct pfr_addr *pfras;
 4078                 size_t totlen;
 4079 
 4080                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 4081                         error = ENODEV;
 4082                         break;
 4083                 }
 4084                 if (io->pfrio_size < 0 ||
 4085                     io->pfrio_size > pf_ioctl_maxcount ||
 4086                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 4087                         error = EINVAL;
 4088                         break;
 4089                 }
 4090                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 4091                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 4092                     M_TEMP, M_WAITOK);
 4093                 error = copyin(io->pfrio_buffer, pfras, totlen);
 4094                 if (error) {
 4095                         free(pfras, M_TEMP);
 4096                         break;
 4097                 }
 4098                 PF_RULES_WLOCK();
 4099                 error = pfr_add_addrs(&io->pfrio_table, pfras,
 4100                     io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
 4101                     PFR_FLAG_USERIOCTL);
 4102                 PF_RULES_WUNLOCK();
 4103                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
 4104                         error = copyout(pfras, io->pfrio_buffer, totlen);
 4105                 free(pfras, M_TEMP);
 4106                 break;
 4107         }
 4108 
 4109         case DIOCRDELADDRS: {
 4110                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4111                 struct pfr_addr *pfras;
 4112                 size_t totlen;
 4113 
 4114                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 4115                         error = ENODEV;
 4116                         break;
 4117                 }
 4118                 if (io->pfrio_size < 0 ||
 4119                     io->pfrio_size > pf_ioctl_maxcount ||
 4120                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 4121                         error = EINVAL;
 4122                         break;
 4123                 }
 4124                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 4125                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 4126                     M_TEMP, M_WAITOK);
 4127                 error = copyin(io->pfrio_buffer, pfras, totlen);
 4128                 if (error) {
 4129                         free(pfras, M_TEMP);
 4130                         break;
 4131                 }
 4132                 PF_RULES_WLOCK();
 4133                 error = pfr_del_addrs(&io->pfrio_table, pfras,
 4134                     io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
 4135                     PFR_FLAG_USERIOCTL);
 4136                 PF_RULES_WUNLOCK();
 4137                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
 4138                         error = copyout(pfras, io->pfrio_buffer, totlen);
 4139                 free(pfras, M_TEMP);
 4140                 break;
 4141         }
 4142 
 4143         case DIOCRSETADDRS: {
 4144                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4145                 struct pfr_addr *pfras;
 4146                 size_t totlen, count;
 4147 
 4148                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 4149                         error = ENODEV;
 4150                         break;
 4151                 }
 4152                 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
 4153                         error = EINVAL;
 4154                         break;
 4155                 }
 4156                 count = max(io->pfrio_size, io->pfrio_size2);
 4157                 if (count > pf_ioctl_maxcount ||
 4158                     WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
 4159                         error = EINVAL;
 4160                         break;
 4161                 }
 4162                 totlen = count * sizeof(struct pfr_addr);
 4163                 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
 4164                     M_WAITOK);
 4165                 error = copyin(io->pfrio_buffer, pfras, totlen);
 4166                 if (error) {
 4167                         free(pfras, M_TEMP);
 4168                         break;
 4169                 }
 4170                 PF_RULES_WLOCK();
 4171                 error = pfr_set_addrs(&io->pfrio_table, pfras,
 4172                     io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
 4173                     &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
 4174                     PFR_FLAG_USERIOCTL, 0);
 4175                 PF_RULES_WUNLOCK();
 4176                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
 4177                         error = copyout(pfras, io->pfrio_buffer, totlen);
 4178                 free(pfras, M_TEMP);
 4179                 break;
 4180         }
 4181 
 4182         case DIOCRGETADDRS: {
 4183                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4184                 struct pfr_addr *pfras;
 4185                 size_t totlen;
 4186 
 4187                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 4188                         error = ENODEV;
 4189                         break;
 4190                 }
 4191                 if (io->pfrio_size < 0 ||
 4192                     io->pfrio_size > pf_ioctl_maxcount ||
 4193                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 4194                         error = EINVAL;
 4195                         break;
 4196                 }
 4197                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 4198                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 4199                     M_TEMP, M_WAITOK | M_ZERO);
 4200                 PF_RULES_RLOCK();
 4201                 error = pfr_get_addrs(&io->pfrio_table, pfras,
 4202                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4203                 PF_RULES_RUNLOCK();
 4204                 if (error == 0)
 4205                         error = copyout(pfras, io->pfrio_buffer, totlen);
 4206                 free(pfras, M_TEMP);
 4207                 break;
 4208         }
 4209 
 4210         case DIOCRGETASTATS: {
 4211                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4212                 struct pfr_astats *pfrastats;
 4213                 size_t totlen;
 4214 
 4215                 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
 4216                         error = ENODEV;
 4217                         break;
 4218                 }
 4219                 if (io->pfrio_size < 0 ||
 4220                     io->pfrio_size > pf_ioctl_maxcount ||
 4221                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
 4222                         error = EINVAL;
 4223                         break;
 4224                 }
 4225                 totlen = io->pfrio_size * sizeof(struct pfr_astats);
 4226                 pfrastats = mallocarray(io->pfrio_size,
 4227                     sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
 4228                 PF_RULES_RLOCK();
 4229                 error = pfr_get_astats(&io->pfrio_table, pfrastats,
 4230                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4231                 PF_RULES_RUNLOCK();
 4232                 if (error == 0)
 4233                         error = copyout(pfrastats, io->pfrio_buffer, totlen);
 4234                 free(pfrastats, M_TEMP);
 4235                 break;
 4236         }
 4237 
 4238         case DIOCRCLRASTATS: {
 4239                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4240                 struct pfr_addr *pfras;
 4241                 size_t totlen;
 4242 
 4243                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 4244                         error = ENODEV;
 4245                         break;
 4246                 }
 4247                 if (io->pfrio_size < 0 ||
 4248                     io->pfrio_size > pf_ioctl_maxcount ||
 4249                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 4250                         error = EINVAL;
 4251                         break;
 4252                 }
 4253                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 4254                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 4255                     M_TEMP, M_WAITOK);
 4256                 error = copyin(io->pfrio_buffer, pfras, totlen);
 4257                 if (error) {
 4258                         free(pfras, M_TEMP);
 4259                         break;
 4260                 }
 4261                 PF_RULES_WLOCK();
 4262                 error = pfr_clr_astats(&io->pfrio_table, pfras,
 4263                     io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
 4264                     PFR_FLAG_USERIOCTL);
 4265                 PF_RULES_WUNLOCK();
 4266                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
 4267                         error = copyout(pfras, io->pfrio_buffer, totlen);
 4268                 free(pfras, M_TEMP);
 4269                 break;
 4270         }
 4271 
 4272         case DIOCRTSTADDRS: {
 4273                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4274                 struct pfr_addr *pfras;
 4275                 size_t totlen;
 4276 
 4277                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 4278                         error = ENODEV;
 4279                         break;
 4280                 }
 4281                 if (io->pfrio_size < 0 ||
 4282                     io->pfrio_size > pf_ioctl_maxcount ||
 4283                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 4284                         error = EINVAL;
 4285                         break;
 4286                 }
 4287                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 4288                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 4289                     M_TEMP, M_WAITOK);
 4290                 error = copyin(io->pfrio_buffer, pfras, totlen);
 4291                 if (error) {
 4292                         free(pfras, M_TEMP);
 4293                         break;
 4294                 }
 4295                 PF_RULES_RLOCK();
 4296                 error = pfr_tst_addrs(&io->pfrio_table, pfras,
 4297                     io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
 4298                     PFR_FLAG_USERIOCTL);
 4299                 PF_RULES_RUNLOCK();
 4300                 if (error == 0)
 4301                         error = copyout(pfras, io->pfrio_buffer, totlen);
 4302                 free(pfras, M_TEMP);
 4303                 break;
 4304         }
 4305 
 4306         case DIOCRINADEFINE: {
 4307                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4308                 struct pfr_addr *pfras;
 4309                 size_t totlen;
 4310 
 4311                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 4312                         error = ENODEV;
 4313                         break;
 4314                 }
 4315                 if (io->pfrio_size < 0 ||
 4316                     io->pfrio_size > pf_ioctl_maxcount ||
 4317                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 4318                         error = EINVAL;
 4319                         break;
 4320                 }
 4321                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 4322                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 4323                     M_TEMP, M_WAITOK);
 4324                 error = copyin(io->pfrio_buffer, pfras, totlen);
 4325                 if (error) {
 4326                         free(pfras, M_TEMP);
 4327                         break;
 4328                 }
 4329                 PF_RULES_WLOCK();
 4330                 error = pfr_ina_define(&io->pfrio_table, pfras,
 4331                     io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
 4332                     io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4333                 PF_RULES_WUNLOCK();
 4334                 free(pfras, M_TEMP);
 4335                 break;
 4336         }
 4337 
 4338         case DIOCOSFPADD: {
 4339                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
 4340                 PF_RULES_WLOCK();
 4341                 error = pf_osfp_add(io);
 4342                 PF_RULES_WUNLOCK();
 4343                 break;
 4344         }
 4345 
 4346         case DIOCOSFPGET: {
 4347                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
 4348                 PF_RULES_RLOCK();
 4349                 error = pf_osfp_get(io);
 4350                 PF_RULES_RUNLOCK();
 4351                 break;
 4352         }
 4353 
 4354         case DIOCXBEGIN: {
 4355                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
 4356                 struct pfioc_trans_e    *ioes, *ioe;
 4357                 size_t                   totlen;
 4358                 int                      i;
 4359 
 4360                 if (io->esize != sizeof(*ioe)) {
 4361                         error = ENODEV;
 4362                         break;
 4363                 }
 4364                 if (io->size < 0 ||
 4365                     io->size > pf_ioctl_maxcount ||
 4366                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
 4367                         error = EINVAL;
 4368                         break;
 4369                 }
 4370                 totlen = sizeof(struct pfioc_trans_e) * io->size;
 4371                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
 4372                     M_TEMP, M_WAITOK);
 4373                 error = copyin(io->array, ioes, totlen);
 4374                 if (error) {
 4375                         free(ioes, M_TEMP);
 4376                         break;
 4377                 }
 4378                 PF_RULES_WLOCK();
 4379                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
 4380                         ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
 4381                         switch (ioe->rs_num) {
 4382 #ifdef ALTQ
 4383                         case PF_RULESET_ALTQ:
 4384                                 if (ioe->anchor[0]) {
 4385                                         PF_RULES_WUNLOCK();
 4386                                         free(ioes, M_TEMP);
 4387                                         error = EINVAL;
 4388                                         goto fail;
 4389                                 }
 4390                                 if ((error = pf_begin_altq(&ioe->ticket))) {
 4391                                         PF_RULES_WUNLOCK();
 4392                                         free(ioes, M_TEMP);
 4393                                         goto fail;
 4394                                 }
 4395                                 break;
 4396 #endif /* ALTQ */
 4397                         case PF_RULESET_TABLE:
 4398                             {
 4399                                 struct pfr_table table;
 4400 
 4401                                 bzero(&table, sizeof(table));
 4402                                 strlcpy(table.pfrt_anchor, ioe->anchor,
 4403                                     sizeof(table.pfrt_anchor));
 4404                                 if ((error = pfr_ina_begin(&table,
 4405                                     &ioe->ticket, NULL, 0))) {
 4406                                         PF_RULES_WUNLOCK();
 4407                                         free(ioes, M_TEMP);
 4408                                         goto fail;
 4409                                 }
 4410                                 break;
 4411                             }
 4412                         default:
 4413                                 if ((error = pf_begin_rules(&ioe->ticket,
 4414                                     ioe->rs_num, ioe->anchor))) {
 4415                                         PF_RULES_WUNLOCK();
 4416                                         free(ioes, M_TEMP);
 4417                                         goto fail;
 4418                                 }
 4419                                 break;
 4420                         }
 4421                 }
 4422                 PF_RULES_WUNLOCK();
 4423                 error = copyout(ioes, io->array, totlen);
 4424                 free(ioes, M_TEMP);
 4425                 break;
 4426         }
 4427 
 4428         case DIOCXROLLBACK: {
 4429                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
 4430                 struct pfioc_trans_e    *ioe, *ioes;
 4431                 size_t                   totlen;
 4432                 int                      i;
 4433 
 4434                 if (io->esize != sizeof(*ioe)) {
 4435                         error = ENODEV;
 4436                         break;
 4437                 }
 4438                 if (io->size < 0 ||
 4439                     io->size > pf_ioctl_maxcount ||
 4440                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
 4441                         error = EINVAL;
 4442                         break;
 4443                 }
 4444                 totlen = sizeof(struct pfioc_trans_e) * io->size;
 4445                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
 4446                     M_TEMP, M_WAITOK);
 4447                 error = copyin(io->array, ioes, totlen);
 4448                 if (error) {
 4449                         free(ioes, M_TEMP);
 4450                         break;
 4451                 }
 4452                 PF_RULES_WLOCK();
 4453                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
 4454                         ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
 4455                         switch (ioe->rs_num) {
 4456 #ifdef ALTQ
 4457                         case PF_RULESET_ALTQ:
 4458                                 if (ioe->anchor[0]) {
 4459                                         PF_RULES_WUNLOCK();
 4460                                         free(ioes, M_TEMP);
 4461                                         error = EINVAL;
 4462                                         goto fail;
 4463                                 }
 4464                                 if ((error = pf_rollback_altq(ioe->ticket))) {
 4465                                         PF_RULES_WUNLOCK();
 4466                                         free(ioes, M_TEMP);
 4467                                         goto fail; /* really bad */
 4468                                 }
 4469                                 break;
 4470 #endif /* ALTQ */
 4471                         case PF_RULESET_TABLE:
 4472                             {
 4473                                 struct pfr_table table;
 4474 
 4475                                 bzero(&table, sizeof(table));
 4476                                 strlcpy(table.pfrt_anchor, ioe->anchor,
 4477                                     sizeof(table.pfrt_anchor));
 4478                                 if ((error = pfr_ina_rollback(&table,
 4479                                     ioe->ticket, NULL, 0))) {
 4480                                         PF_RULES_WUNLOCK();
 4481                                         free(ioes, M_TEMP);
 4482                                         goto fail; /* really bad */
 4483                                 }
 4484                                 break;
 4485                             }
 4486                         default:
 4487                                 if ((error = pf_rollback_rules(ioe->ticket,
 4488                                     ioe->rs_num, ioe->anchor))) {
 4489                                         PF_RULES_WUNLOCK();
 4490                                         free(ioes, M_TEMP);
 4491                                         goto fail; /* really bad */
 4492                                 }
 4493                                 break;
 4494                         }
 4495                 }
 4496                 PF_RULES_WUNLOCK();
 4497                 free(ioes, M_TEMP);
 4498                 break;
 4499         }
 4500 
 4501         case DIOCXCOMMIT: {
 4502                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
 4503                 struct pfioc_trans_e    *ioe, *ioes;
 4504                 struct pf_kruleset      *rs;
 4505                 size_t                   totlen;
 4506                 int                      i;
 4507 
 4508                 if (io->esize != sizeof(*ioe)) {
 4509                         error = ENODEV;
 4510                         break;
 4511                 }
 4512 
 4513                 if (io->size < 0 ||
 4514                     io->size > pf_ioctl_maxcount ||
 4515                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
 4516                         error = EINVAL;
 4517                         break;
 4518                 }
 4519 
 4520                 totlen = sizeof(struct pfioc_trans_e) * io->size;
 4521                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
 4522                     M_TEMP, M_WAITOK);
 4523                 error = copyin(io->array, ioes, totlen);
 4524                 if (error) {
 4525                         free(ioes, M_TEMP);
 4526                         break;
 4527                 }
 4528                 PF_RULES_WLOCK();
 4529                 /* First makes sure everything will succeed. */
 4530                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
 4531                         ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
 4532                         switch (ioe->rs_num) {
 4533 #ifdef ALTQ
 4534                         case PF_RULESET_ALTQ:
 4535                                 if (ioe->anchor[0]) {
 4536                                         PF_RULES_WUNLOCK();
 4537                                         free(ioes, M_TEMP);
 4538                                         error = EINVAL;
 4539                                         goto fail;
 4540                                 }
 4541                                 if (!V_altqs_inactive_open || ioe->ticket !=
 4542                                     V_ticket_altqs_inactive) {
 4543                                         PF_RULES_WUNLOCK();
 4544                                         free(ioes, M_TEMP);
 4545                                         error = EBUSY;
 4546                                         goto fail;
 4547                                 }
 4548                                 break;
 4549 #endif /* ALTQ */
 4550                         case PF_RULESET_TABLE:
 4551                                 rs = pf_find_kruleset(ioe->anchor);
 4552                                 if (rs == NULL || !rs->topen || ioe->ticket !=
 4553                                     rs->tticket) {
 4554                                         PF_RULES_WUNLOCK();
 4555                                         free(ioes, M_TEMP);
 4556                                         error = EBUSY;
 4557                                         goto fail;
 4558                                 }
 4559                                 break;
 4560                         default:
 4561                                 if (ioe->rs_num < 0 || ioe->rs_num >=
 4562                                     PF_RULESET_MAX) {
 4563                                         PF_RULES_WUNLOCK();
 4564                                         free(ioes, M_TEMP);
 4565                                         error = EINVAL;
 4566                                         goto fail;
 4567                                 }
 4568                                 rs = pf_find_kruleset(ioe->anchor);
 4569                                 if (rs == NULL ||
 4570                                     !rs->rules[ioe->rs_num].inactive.open ||
 4571                                     rs->rules[ioe->rs_num].inactive.ticket !=
 4572                                     ioe->ticket) {
 4573                                         PF_RULES_WUNLOCK();
 4574                                         free(ioes, M_TEMP);
 4575                                         error = EBUSY;
 4576                                         goto fail;
 4577                                 }
 4578                                 break;
 4579                         }
 4580                 }
 4581                 /* Now do the commit - no errors should happen here. */
 4582                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
 4583                         switch (ioe->rs_num) {
 4584 #ifdef ALTQ
 4585                         case PF_RULESET_ALTQ:
 4586                                 if ((error = pf_commit_altq(ioe->ticket))) {
 4587                                         PF_RULES_WUNLOCK();
 4588                                         free(ioes, M_TEMP);
 4589                                         goto fail; /* really bad */
 4590                                 }
 4591                                 break;
 4592 #endif /* ALTQ */
 4593                         case PF_RULESET_TABLE:
 4594                             {
 4595                                 struct pfr_table table;
 4596 
 4597                                 bzero(&table, sizeof(table));
 4598                                 (void)strlcpy(table.pfrt_anchor, ioe->anchor,
 4599                                     sizeof(table.pfrt_anchor));
 4600                                 if ((error = pfr_ina_commit(&table,
 4601                                     ioe->ticket, NULL, NULL, 0))) {
 4602                                         PF_RULES_WUNLOCK();
 4603                                         free(ioes, M_TEMP);
 4604                                         goto fail; /* really bad */
 4605                                 }
 4606                                 break;
 4607                             }
 4608                         default:
 4609                                 if ((error = pf_commit_rules(ioe->ticket,
 4610                                     ioe->rs_num, ioe->anchor))) {
 4611                                         PF_RULES_WUNLOCK();
 4612                                         free(ioes, M_TEMP);
 4613                                         goto fail; /* really bad */
 4614                                 }
 4615                                 break;
 4616                         }
 4617                 }
 4618                 PF_RULES_WUNLOCK();
 4619                 free(ioes, M_TEMP);
 4620                 break;
 4621         }
 4622 
 4623         case DIOCGETSRCNODES: {
 4624                 struct pfioc_src_nodes  *psn = (struct pfioc_src_nodes *)addr;
 4625                 struct pf_srchash       *sh;
 4626                 struct pf_ksrc_node     *n;
 4627                 struct pf_src_node      *p, *pstore;
 4628                 uint32_t                 i, nr = 0;
 4629 
 4630                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
 4631                                 i++, sh++) {
 4632                         PF_HASHROW_LOCK(sh);
 4633                         LIST_FOREACH(n, &sh->nodes, entry)
 4634                                 nr++;
 4635                         PF_HASHROW_UNLOCK(sh);
 4636                 }
 4637 
 4638                 psn->psn_len = min(psn->psn_len,
 4639                     sizeof(struct pf_src_node) * nr);
 4640 
 4641                 if (psn->psn_len == 0) {
 4642                         psn->psn_len = sizeof(struct pf_src_node) * nr;
 4643                         break;
 4644                 }
 4645 
 4646                 nr = 0;
 4647 
 4648                 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
 4649                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
 4650                     i++, sh++) {
 4651                     PF_HASHROW_LOCK(sh);
 4652                     LIST_FOREACH(n, &sh->nodes, entry) {
 4653 
 4654                         if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
 4655                                 break;
 4656 
 4657                         pf_src_node_copy(n, p);
 4658 
 4659                         p++;
 4660                         nr++;
 4661                     }
 4662                     PF_HASHROW_UNLOCK(sh);
 4663                 }
 4664                 error = copyout(pstore, psn->psn_src_nodes,
 4665                     sizeof(struct pf_src_node) * nr);
 4666                 if (error) {
 4667                         free(pstore, M_TEMP);
 4668                         break;
 4669                 }
 4670                 psn->psn_len = sizeof(struct pf_src_node) * nr;
 4671                 free(pstore, M_TEMP);
 4672                 break;
 4673         }
 4674 
 4675         case DIOCCLRSRCNODES: {
 4676                 pf_clear_srcnodes(NULL);
 4677                 pf_purge_expired_src_nodes();
 4678                 break;
 4679         }
 4680 
 4681         case DIOCKILLSRCNODES:
 4682                 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
 4683                 break;
 4684 
 4685         case DIOCKEEPCOUNTERS:
 4686                 error = pf_keepcounters((struct pfioc_nv *)addr);
 4687                 break;
 4688 
 4689         case DIOCGETSYNCOOKIES:
 4690                 error = pf_get_syncookies((struct pfioc_nv *)addr);
 4691                 break;
 4692 
 4693         case DIOCSETSYNCOOKIES:
 4694                 error = pf_set_syncookies((struct pfioc_nv *)addr);
 4695                 break;
 4696 
 4697         case DIOCSETHOSTID: {
 4698                 u_int32_t       *hostid = (u_int32_t *)addr;
 4699 
 4700                 PF_RULES_WLOCK();
 4701                 if (*hostid == 0)
 4702                         V_pf_status.hostid = arc4random();
 4703                 else
 4704                         V_pf_status.hostid = *hostid;
 4705                 PF_RULES_WUNLOCK();
 4706                 break;
 4707         }
 4708 
 4709         case DIOCOSFPFLUSH:
 4710                 PF_RULES_WLOCK();
 4711                 pf_osfp_flush();
 4712                 PF_RULES_WUNLOCK();
 4713                 break;
 4714 
 4715         case DIOCIGETIFACES: {
 4716                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
 4717                 struct pfi_kif *ifstore;
 4718                 size_t bufsiz;
 4719 
 4720                 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
 4721                         error = ENODEV;
 4722                         break;
 4723                 }
 4724 
 4725                 if (io->pfiio_size < 0 ||
 4726                     io->pfiio_size > pf_ioctl_maxcount ||
 4727                     WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
 4728                         error = EINVAL;
 4729                         break;
 4730                 }
 4731 
 4732                 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
 4733 
 4734                 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
 4735                 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
 4736                     M_TEMP, M_WAITOK | M_ZERO);
 4737 
 4738                 PF_RULES_RLOCK();
 4739                 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
 4740                 PF_RULES_RUNLOCK();
 4741                 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
 4742                 free(ifstore, M_TEMP);
 4743                 break;
 4744         }
 4745 
 4746         case DIOCSETIFFLAG: {
 4747                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
 4748 
 4749                 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
 4750 
 4751                 PF_RULES_WLOCK();
 4752                 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
 4753                 PF_RULES_WUNLOCK();
 4754                 break;
 4755         }
 4756 
 4757         case DIOCCLRIFFLAG: {
 4758                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
 4759 
 4760                 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
 4761 
 4762                 PF_RULES_WLOCK();
 4763                 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
 4764                 PF_RULES_WUNLOCK();
 4765                 break;
 4766         }
 4767 
 4768         default:
 4769                 error = ENODEV;
 4770                 break;
 4771         }
 4772 fail:
 4773         if (sx_xlocked(&pf_ioctl_lock))
 4774                 sx_xunlock(&pf_ioctl_lock);
 4775         CURVNET_RESTORE();
 4776 
 4777 #undef ERROUT_IOCTL
 4778 
 4779         return (error);
 4780 }
 4781 
 4782 void
 4783 pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st)
 4784 {
 4785         bzero(sp, sizeof(struct pfsync_state));
 4786 
 4787         /* copy from state key */
 4788         sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
 4789         sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
 4790         sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
 4791         sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
 4792         sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
 4793         sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
 4794         sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
 4795         sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
 4796         sp->proto = st->key[PF_SK_WIRE]->proto;
 4797         sp->af = st->key[PF_SK_WIRE]->af;
 4798 
 4799         /* copy from state */
 4800         strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
 4801         bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
 4802         sp->creation = htonl(time_uptime - st->creation);
 4803         sp->expire = pf_state_expires(st);
 4804         if (sp->expire <= time_uptime)
 4805                 sp->expire = htonl(0);
 4806         else
 4807                 sp->expire = htonl(sp->expire - time_uptime);
 4808 
 4809         sp->direction = st->direction;
 4810         sp->log = st->log;
 4811         sp->timeout = st->timeout;
 4812         sp->state_flags = st->state_flags;
 4813         if (st->src_node)
 4814                 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
 4815         if (st->nat_src_node)
 4816                 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
 4817 
 4818         sp->id = st->id;
 4819         sp->creatorid = st->creatorid;
 4820         pf_state_peer_hton(&st->src, &sp->src);
 4821         pf_state_peer_hton(&st->dst, &sp->dst);
 4822 
 4823         if (st->rule.ptr == NULL)
 4824                 sp->rule = htonl(-1);
 4825         else
 4826                 sp->rule = htonl(st->rule.ptr->nr);
 4827         if (st->anchor.ptr == NULL)
 4828                 sp->anchor = htonl(-1);
 4829         else
 4830                 sp->anchor = htonl(st->anchor.ptr->nr);
 4831         if (st->nat_rule.ptr == NULL)
 4832                 sp->nat_rule = htonl(-1);
 4833         else
 4834                 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
 4835 
 4836         pf_state_counter_hton(st->packets[0], sp->packets[0]);
 4837         pf_state_counter_hton(st->packets[1], sp->packets[1]);
 4838         pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
 4839         pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
 4840 }
 4841 
 4842 void
 4843 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
 4844 {
 4845         bzero(sp, sizeof(*sp));
 4846 
 4847         sp->version = PF_STATE_VERSION;
 4848 
 4849         /* copy from state key */
 4850         sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
 4851         sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
 4852         sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
 4853         sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
 4854         sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
 4855         sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
 4856         sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
 4857         sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
 4858         sp->proto = st->key[PF_SK_WIRE]->proto;
 4859         sp->af = st->key[PF_SK_WIRE]->af;
 4860 
 4861         /* copy from state */
 4862         strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
 4863         strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
 4864             sizeof(sp->orig_ifname));
 4865         bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
 4866         sp->creation = htonl(time_uptime - st->creation);
 4867         sp->expire = pf_state_expires(st);
 4868         if (sp->expire <= time_uptime)
 4869                 sp->expire = htonl(0);
 4870         else
 4871                 sp->expire = htonl(sp->expire - time_uptime);
 4872 
 4873         sp->direction = st->direction;
 4874         sp->log = st->log;
 4875         sp->timeout = st->timeout;
 4876         sp->state_flags = st->state_flags;
 4877         if (st->src_node)
 4878                 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
 4879         if (st->nat_src_node)
 4880                 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
 4881 
 4882         sp->id = st->id;
 4883         sp->creatorid = st->creatorid;
 4884         pf_state_peer_hton(&st->src, &sp->src);
 4885         pf_state_peer_hton(&st->dst, &sp->dst);
 4886 
 4887         if (st->rule.ptr == NULL)
 4888                 sp->rule = htonl(-1);
 4889         else
 4890                 sp->rule = htonl(st->rule.ptr->nr);
 4891         if (st->anchor.ptr == NULL)
 4892                 sp->anchor = htonl(-1);
 4893         else
 4894                 sp->anchor = htonl(st->anchor.ptr->nr);
 4895         if (st->nat_rule.ptr == NULL)
 4896                 sp->nat_rule = htonl(-1);
 4897         else
 4898                 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
 4899 
 4900         sp->packets[0] = st->packets[0];
 4901         sp->packets[1] = st->packets[1];
 4902         sp->bytes[0] = st->bytes[0];
 4903         sp->bytes[1] = st->bytes[1];
 4904 }
 4905 
 4906 static void
 4907 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
 4908 {
 4909         struct pfr_ktable *kt;
 4910 
 4911         KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
 4912 
 4913         kt = aw->p.tbl;
 4914         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
 4915                 kt = kt->pfrkt_root;
 4916         aw->p.tbl = NULL;
 4917         aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
 4918                 kt->pfrkt_cnt : -1;
 4919 }
 4920 
 4921 static int
 4922 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
 4923     size_t number, char **names)
 4924 {
 4925         nvlist_t        *nvc;
 4926 
 4927         nvc = nvlist_create(0);
 4928         if (nvc == NULL)
 4929                 return (ENOMEM);
 4930 
 4931         for (int i = 0; i < number; i++) {
 4932                 nvlist_append_number_array(nvc, "counters",
 4933                     counter_u64_fetch(counters[i]));
 4934                 nvlist_append_string_array(nvc, "names",
 4935                     names[i]);
 4936                 nvlist_append_number_array(nvc, "ids",
 4937                     i);
 4938         }
 4939         nvlist_add_nvlist(nvl, name, nvc);
 4940         nvlist_destroy(nvc);
 4941 
 4942         return (0);
 4943 }
 4944 
 4945 static int
 4946 pf_getstatus(struct pfioc_nv *nv)
 4947 {
 4948         nvlist_t        *nvl = NULL, *nvc = NULL;
 4949         void            *nvlpacked = NULL;
 4950         int              error;
 4951         struct pf_status s;
 4952         char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
 4953         char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
 4954         char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
 4955         PF_RULES_RLOCK_TRACKER;
 4956 
 4957 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
 4958 
 4959         PF_RULES_RLOCK();
 4960 
 4961         nvl = nvlist_create(0);
 4962         if (nvl == NULL)
 4963                 ERROUT(ENOMEM);
 4964 
 4965         nvlist_add_bool(nvl, "running", V_pf_status.running);
 4966         nvlist_add_number(nvl, "since", V_pf_status.since);
 4967         nvlist_add_number(nvl, "debug", V_pf_status.debug);
 4968         nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
 4969         nvlist_add_number(nvl, "states", V_pf_status.states);
 4970         nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
 4971 
 4972         /* counters */
 4973         error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
 4974             PFRES_MAX, pf_reasons);
 4975         if (error != 0)
 4976                 ERROUT(error);
 4977 
 4978         /* lcounters */
 4979         error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
 4980             KLCNT_MAX, pf_lcounter);
 4981         if (error != 0)
 4982                 ERROUT(error);
 4983 
 4984         /* fcounters */
 4985         nvc = nvlist_create(0);
 4986         if (nvc == NULL)
 4987                 ERROUT(ENOMEM);
 4988 
 4989         for (int i = 0; i < FCNT_MAX; i++) {
 4990                 nvlist_append_number_array(nvc, "counters",
 4991                     pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
 4992                 nvlist_append_string_array(nvc, "names",
 4993                     pf_fcounter[i]);
 4994                 nvlist_append_number_array(nvc, "ids",
 4995                     i);
 4996         }
 4997         nvlist_add_nvlist(nvl, "fcounters", nvc);
 4998         nvlist_destroy(nvc);
 4999         nvc = NULL;
 5000 
 5001         /* scounters */
 5002         error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
 5003             SCNT_MAX, pf_fcounter);
 5004         if (error != 0)
 5005                 ERROUT(error);
 5006 
 5007         nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
 5008         nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
 5009             PF_MD5_DIGEST_LENGTH);
 5010 
 5011         pfi_update_status(V_pf_status.ifname, &s);
 5012 
 5013         /* pcounters / bcounters */
 5014         for (int i = 0; i < 2; i++) {
 5015                 for (int j = 0; j < 2; j++) {
 5016                         for (int k = 0; k < 2; k++) {
 5017                                 nvlist_append_number_array(nvl, "pcounters",
 5018                                     s.pcounters[i][j][k]);
 5019                         }
 5020                         nvlist_append_number_array(nvl, "bcounters",
 5021                             s.bcounters[i][j]);
 5022                 }
 5023         }
 5024 
 5025         nvlpacked = nvlist_pack(nvl, &nv->len);
 5026         if (nvlpacked == NULL)
 5027                 ERROUT(ENOMEM);
 5028 
 5029         if (nv->size == 0)
 5030                 ERROUT(0);
 5031         else if (nv->size < nv->len)
 5032                 ERROUT(ENOSPC);
 5033 
 5034         PF_RULES_RUNLOCK();
 5035         error = copyout(nvlpacked, nv->data, nv->len);
 5036         goto done;
 5037 
 5038 #undef ERROUT
 5039 errout:
 5040         PF_RULES_RUNLOCK();
 5041 done:
 5042         free(nvlpacked, M_NVLIST);
 5043         nvlist_destroy(nvc);
 5044         nvlist_destroy(nvl);
 5045 
 5046         return (error);
 5047 }
 5048 
 5049 /*
 5050  * XXX - Check for version mismatch!!!
 5051  */
 5052 static void
 5053 pf_clear_all_states(void)
 5054 {
 5055         struct pf_kstate        *s;
 5056         u_int i;
 5057 
 5058         for (i = 0; i <= pf_hashmask; i++) {
 5059                 struct pf_idhash *ih = &V_pf_idhash[i];
 5060 relock:
 5061                 PF_HASHROW_LOCK(ih);
 5062                 LIST_FOREACH(s, &ih->states, entry) {
 5063                         s->timeout = PFTM_PURGE;
 5064                         /* Don't send out individual delete messages. */
 5065                         s->state_flags |= PFSTATE_NOSYNC;
 5066                         pf_unlink_state(s, PF_ENTER_LOCKED);
 5067                         goto relock;
 5068                 }
 5069                 PF_HASHROW_UNLOCK(ih);
 5070         }
 5071 }
 5072 
 5073 static int
 5074 pf_clear_tables(void)
 5075 {
 5076         struct pfioc_table io;
 5077         int error;
 5078 
 5079         bzero(&io, sizeof(io));
 5080 
 5081         error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
 5082             io.pfrio_flags);
 5083 
 5084         return (error);
 5085 }
 5086 
 5087 static void
 5088 pf_clear_srcnodes(struct pf_ksrc_node *n)
 5089 {
 5090         struct pf_kstate *s;
 5091         int i;
 5092 
 5093         for (i = 0; i <= pf_hashmask; i++) {
 5094                 struct pf_idhash *ih = &V_pf_idhash[i];
 5095 
 5096                 PF_HASHROW_LOCK(ih);
 5097                 LIST_FOREACH(s, &ih->states, entry) {
 5098                         if (n == NULL || n == s->src_node)
 5099                                 s->src_node = NULL;
 5100                         if (n == NULL || n == s->nat_src_node)
 5101                                 s->nat_src_node = NULL;
 5102                 }
 5103                 PF_HASHROW_UNLOCK(ih);
 5104         }
 5105 
 5106         if (n == NULL) {
 5107                 struct pf_srchash *sh;
 5108 
 5109                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
 5110                     i++, sh++) {
 5111                         PF_HASHROW_LOCK(sh);
 5112                         LIST_FOREACH(n, &sh->nodes, entry) {
 5113                                 n->expire = 1;
 5114                                 n->states = 0;
 5115                         }
 5116                         PF_HASHROW_UNLOCK(sh);
 5117                 }
 5118         } else {
 5119                 /* XXX: hash slot should already be locked here. */
 5120                 n->expire = 1;
 5121                 n->states = 0;
 5122         }
 5123 }
 5124 
 5125 static void
 5126 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
 5127 {
 5128         struct pf_ksrc_node_list         kill;
 5129 
 5130         LIST_INIT(&kill);
 5131         for (int i = 0; i <= pf_srchashmask; i++) {
 5132                 struct pf_srchash *sh = &V_pf_srchash[i];
 5133                 struct pf_ksrc_node *sn, *tmp;
 5134 
 5135                 PF_HASHROW_LOCK(sh);
 5136                 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
 5137                         if (PF_MATCHA(psnk->psnk_src.neg,
 5138                               &psnk->psnk_src.addr.v.a.addr,
 5139                               &psnk->psnk_src.addr.v.a.mask,
 5140                               &sn->addr, sn->af) &&
 5141                             PF_MATCHA(psnk->psnk_dst.neg,
 5142                               &psnk->psnk_dst.addr.v.a.addr,
 5143                               &psnk->psnk_dst.addr.v.a.mask,
 5144                               &sn->raddr, sn->af)) {
 5145                                 pf_unlink_src_node(sn);
 5146                                 LIST_INSERT_HEAD(&kill, sn, entry);
 5147                                 sn->expire = 1;
 5148                         }
 5149                 PF_HASHROW_UNLOCK(sh);
 5150         }
 5151 
 5152         for (int i = 0; i <= pf_hashmask; i++) {
 5153                 struct pf_idhash *ih = &V_pf_idhash[i];
 5154                 struct pf_kstate *s;
 5155 
 5156                 PF_HASHROW_LOCK(ih);
 5157                 LIST_FOREACH(s, &ih->states, entry) {
 5158                         if (s->src_node && s->src_node->expire == 1)
 5159                                 s->src_node = NULL;
 5160                         if (s->nat_src_node && s->nat_src_node->expire == 1)
 5161                                 s->nat_src_node = NULL;
 5162                 }
 5163                 PF_HASHROW_UNLOCK(ih);
 5164         }
 5165 
 5166         psnk->psnk_killed = pf_free_src_nodes(&kill);
 5167 }
 5168 
 5169 static int
 5170 pf_keepcounters(struct pfioc_nv *nv)
 5171 {
 5172         nvlist_t        *nvl = NULL;
 5173         void            *nvlpacked = NULL;
 5174         int              error = 0;
 5175 
 5176 #define ERROUT(x)       ERROUT_FUNCTION(on_error, x)
 5177 
 5178         if (nv->len > pf_ioctl_maxcount)
 5179                 ERROUT(ENOMEM);
 5180 
 5181         nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 5182         if (nvlpacked == NULL)
 5183                 ERROUT(ENOMEM);
 5184 
 5185         error = copyin(nv->data, nvlpacked, nv->len);
 5186         if (error)
 5187                 ERROUT(error);
 5188 
 5189         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 5190         if (nvl == NULL)
 5191                 ERROUT(EBADMSG);
 5192 
 5193         if (! nvlist_exists_bool(nvl, "keep_counters"))
 5194                 ERROUT(EBADMSG);
 5195 
 5196         V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
 5197 
 5198 on_error:
 5199         nvlist_destroy(nvl);
 5200         free(nvlpacked, M_NVLIST);
 5201         return (error);
 5202 }
 5203 
 5204 static unsigned int
 5205 pf_clear_states(const struct pf_kstate_kill *kill)
 5206 {
 5207         struct pf_state_key_cmp  match_key;
 5208         struct pf_kstate        *s;
 5209         struct pfi_kkif *kif;
 5210         int              idx;
 5211         unsigned int     killed = 0, dir;
 5212 
 5213         for (unsigned int i = 0; i <= pf_hashmask; i++) {
 5214                 struct pf_idhash *ih = &V_pf_idhash[i];
 5215 
 5216 relock_DIOCCLRSTATES:
 5217                 PF_HASHROW_LOCK(ih);
 5218                 LIST_FOREACH(s, &ih->states, entry) {
 5219                         /* For floating states look at the original kif. */
 5220                         kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
 5221 
 5222                         if (kill->psk_ifname[0] &&
 5223                             strcmp(kill->psk_ifname,
 5224                             kif->pfik_name))
 5225                                 continue;
 5226 
 5227                         if (kill->psk_kill_match) {
 5228                                 bzero(&match_key, sizeof(match_key));
 5229 
 5230                                 if (s->direction == PF_OUT) {
 5231                                         dir = PF_IN;
 5232                                         idx = PF_SK_STACK;
 5233                                 } else {
 5234                                         dir = PF_OUT;
 5235                                         idx = PF_SK_WIRE;
 5236                                 }
 5237 
 5238                                 match_key.af = s->key[idx]->af;
 5239                                 match_key.proto = s->key[idx]->proto;
 5240                                 PF_ACPY(&match_key.addr[0],
 5241                                     &s->key[idx]->addr[1], match_key.af);
 5242                                 match_key.port[0] = s->key[idx]->port[1];
 5243                                 PF_ACPY(&match_key.addr[1],
 5244                                     &s->key[idx]->addr[0], match_key.af);
 5245                                 match_key.port[1] = s->key[idx]->port[0];
 5246                         }
 5247 
 5248                         /*
 5249                          * Don't send out individual
 5250                          * delete messages.
 5251                          */
 5252                         s->state_flags |= PFSTATE_NOSYNC;
 5253                         pf_unlink_state(s, PF_ENTER_LOCKED);
 5254                         killed++;
 5255 
 5256                         if (kill->psk_kill_match)
 5257                                 killed += pf_kill_matching_state(&match_key,
 5258                                     dir);
 5259 
 5260                         goto relock_DIOCCLRSTATES;
 5261                 }
 5262                 PF_HASHROW_UNLOCK(ih);
 5263         }
 5264 
 5265         if (V_pfsync_clear_states_ptr != NULL)
 5266                 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
 5267 
 5268         return (killed);
 5269 }
 5270 
 5271 static void
 5272 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
 5273 {
 5274         struct pf_kstate        *s;
 5275 
 5276         if (kill->psk_pfcmp.id) {
 5277                 if (kill->psk_pfcmp.creatorid == 0)
 5278                         kill->psk_pfcmp.creatorid = V_pf_status.hostid;
 5279                 if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
 5280                     kill->psk_pfcmp.creatorid))) {
 5281                         pf_unlink_state(s, PF_ENTER_LOCKED);
 5282                         *killed = 1;
 5283                 }
 5284                 return;
 5285         }
 5286 
 5287         for (unsigned int i = 0; i <= pf_hashmask; i++)
 5288                 *killed += pf_killstates_row(kill, &V_pf_idhash[i]);
 5289 
 5290         return;
 5291 }
 5292 
 5293 static int
 5294 pf_killstates_nv(struct pfioc_nv *nv)
 5295 {
 5296         struct pf_kstate_kill    kill;
 5297         nvlist_t                *nvl = NULL;
 5298         void                    *nvlpacked = NULL;
 5299         int                      error = 0;
 5300         unsigned int             killed = 0;
 5301 
 5302 #define ERROUT(x)       ERROUT_FUNCTION(on_error, x)
 5303 
 5304         if (nv->len > pf_ioctl_maxcount)
 5305                 ERROUT(ENOMEM);
 5306 
 5307         nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 5308         if (nvlpacked == NULL)
 5309                 ERROUT(ENOMEM);
 5310 
 5311         error = copyin(nv->data, nvlpacked, nv->len);
 5312         if (error)
 5313                 ERROUT(error);
 5314 
 5315         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 5316         if (nvl == NULL)
 5317                 ERROUT(EBADMSG);
 5318 
 5319         error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
 5320         if (error)
 5321                 ERROUT(error);
 5322 
 5323         pf_killstates(&kill, &killed);
 5324 
 5325         free(nvlpacked, M_NVLIST);
 5326         nvlpacked = NULL;
 5327         nvlist_destroy(nvl);
 5328         nvl = nvlist_create(0);
 5329         if (nvl == NULL)
 5330                 ERROUT(ENOMEM);
 5331 
 5332         nvlist_add_number(nvl, "killed", killed);
 5333 
 5334         nvlpacked = nvlist_pack(nvl, &nv->len);
 5335         if (nvlpacked == NULL)
 5336                 ERROUT(ENOMEM);
 5337 
 5338         if (nv->size == 0)
 5339                 ERROUT(0);
 5340         else if (nv->size < nv->len)
 5341                 ERROUT(ENOSPC);
 5342 
 5343         error = copyout(nvlpacked, nv->data, nv->len);
 5344 
 5345 on_error:
 5346         nvlist_destroy(nvl);
 5347         free(nvlpacked, M_NVLIST);
 5348         return (error);
 5349 }
 5350 
 5351 static int
 5352 pf_clearstates_nv(struct pfioc_nv *nv)
 5353 {
 5354         struct pf_kstate_kill    kill;
 5355         nvlist_t                *nvl = NULL;
 5356         void                    *nvlpacked = NULL;
 5357         int                      error = 0;
 5358         unsigned int             killed;
 5359 
 5360 #define ERROUT(x)       ERROUT_FUNCTION(on_error, x)
 5361 
 5362         if (nv->len > pf_ioctl_maxcount)
 5363                 ERROUT(ENOMEM);
 5364 
 5365         nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 5366         if (nvlpacked == NULL)
 5367                 ERROUT(ENOMEM);
 5368 
 5369         error = copyin(nv->data, nvlpacked, nv->len);
 5370         if (error)
 5371                 ERROUT(error);
 5372 
 5373         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 5374         if (nvl == NULL)
 5375                 ERROUT(EBADMSG);
 5376 
 5377         error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
 5378         if (error)
 5379                 ERROUT(error);
 5380 
 5381         killed = pf_clear_states(&kill);
 5382 
 5383         free(nvlpacked, M_NVLIST);
 5384         nvlpacked = NULL;
 5385         nvlist_destroy(nvl);
 5386         nvl = nvlist_create(0);
 5387         if (nvl == NULL)
 5388                 ERROUT(ENOMEM);
 5389 
 5390         nvlist_add_number(nvl, "killed", killed);
 5391 
 5392         nvlpacked = nvlist_pack(nvl, &nv->len);
 5393         if (nvlpacked == NULL)
 5394                 ERROUT(ENOMEM);
 5395 
 5396         if (nv->size == 0)
 5397                 ERROUT(0);
 5398         else if (nv->size < nv->len)
 5399                 ERROUT(ENOSPC);
 5400 
 5401         error = copyout(nvlpacked, nv->data, nv->len);
 5402 
 5403 #undef ERROUT
 5404 on_error:
 5405         nvlist_destroy(nvl);
 5406         free(nvlpacked, M_NVLIST);
 5407         return (error);
 5408 }
 5409 
 5410 static int
 5411 pf_getstate(struct pfioc_nv *nv)
 5412 {
 5413         nvlist_t                *nvl = NULL, *nvls;
 5414         void                    *nvlpacked = NULL;
 5415         struct pf_kstate        *s = NULL;
 5416         int                      error = 0;
 5417         uint64_t                 id, creatorid;
 5418 
 5419 #define ERROUT(x)       ERROUT_FUNCTION(errout, x)
 5420 
 5421         if (nv->len > pf_ioctl_maxcount)
 5422                 ERROUT(ENOMEM);
 5423 
 5424         nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 5425         if (nvlpacked == NULL)
 5426                 ERROUT(ENOMEM);
 5427 
 5428         error = copyin(nv->data, nvlpacked, nv->len);
 5429         if (error)
 5430                 ERROUT(error);
 5431 
 5432         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 5433         if (nvl == NULL)
 5434                 ERROUT(EBADMSG);
 5435 
 5436         PFNV_CHK(pf_nvuint64(nvl, "id", &id));
 5437         PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
 5438 
 5439         s = pf_find_state_byid(id, creatorid);
 5440         if (s == NULL)
 5441                 ERROUT(ENOENT);
 5442 
 5443         free(nvlpacked, M_NVLIST);
 5444         nvlpacked = NULL;
 5445         nvlist_destroy(nvl);
 5446         nvl = nvlist_create(0);
 5447         if (nvl == NULL)
 5448                 ERROUT(ENOMEM);
 5449 
 5450         nvls = pf_state_to_nvstate(s);
 5451         if (nvls == NULL)
 5452                 ERROUT(ENOMEM);
 5453 
 5454         nvlist_add_nvlist(nvl, "state", nvls);
 5455         nvlist_destroy(nvls);
 5456 
 5457         nvlpacked = nvlist_pack(nvl, &nv->len);
 5458         if (nvlpacked == NULL)
 5459                 ERROUT(ENOMEM);
 5460 
 5461         if (nv->size == 0)
 5462                 ERROUT(0);
 5463         else if (nv->size < nv->len)
 5464                 ERROUT(ENOSPC);
 5465 
 5466         error = copyout(nvlpacked, nv->data, nv->len);
 5467 
 5468 #undef ERROUT
 5469 errout:
 5470         if (s != NULL)
 5471                 PF_STATE_UNLOCK(s);
 5472         free(nvlpacked, M_NVLIST);
 5473         nvlist_destroy(nvl);
 5474         return (error);
 5475 }
 5476 
 5477 /*
 5478  * XXX - Check for version mismatch!!!
 5479  */
 5480 
 5481 /*
 5482  * Duplicate pfctl -Fa operation to get rid of as much as we can.
 5483  */
 5484 static int
 5485 shutdown_pf(void)
 5486 {
 5487         int error = 0;
 5488         u_int32_t t[5];
 5489         char nn = '\0';
 5490 
 5491         do {
 5492                 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
 5493                     != 0) {
 5494                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
 5495                         break;
 5496                 }
 5497                 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
 5498                     != 0) {
 5499                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
 5500                         break;          /* XXX: rollback? */
 5501                 }
 5502                 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
 5503                     != 0) {
 5504                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
 5505                         break;          /* XXX: rollback? */
 5506                 }
 5507                 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
 5508                     != 0) {
 5509                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
 5510                         break;          /* XXX: rollback? */
 5511                 }
 5512                 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
 5513                     != 0) {
 5514                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
 5515                         break;          /* XXX: rollback? */
 5516                 }
 5517 
 5518                 /* XXX: these should always succeed here */
 5519                 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
 5520                 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
 5521                 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
 5522                 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
 5523                 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
 5524 
 5525                 if ((error = pf_clear_tables()) != 0)
 5526                         break;
 5527 
 5528 #ifdef ALTQ
 5529                 if ((error = pf_begin_altq(&t[0])) != 0) {
 5530                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
 5531                         break;
 5532                 }
 5533                 pf_commit_altq(t[0]);
 5534 #endif
 5535 
 5536                 pf_clear_all_states();
 5537 
 5538                 pf_clear_srcnodes(NULL);
 5539 
 5540                 /* status does not use malloced mem so no need to cleanup */
 5541                 /* fingerprints and interfaces have their own cleanup code */
 5542         } while(0);
 5543 
 5544         return (error);
 5545 }
 5546 
 5547 static pfil_return_t
 5548 pf_check_return(int chk, struct mbuf **m)
 5549 {
 5550 
 5551         switch (chk) {
 5552         case PF_PASS:
 5553                 if (*m == NULL)
 5554                         return (PFIL_CONSUMED);
 5555                 else
 5556                         return (PFIL_PASS);
 5557                 break;
 5558         default:
 5559                 if (*m != NULL) {
 5560                         m_freem(*m);
 5561                         *m = NULL;
 5562                 }
 5563                 return (PFIL_DROPPED);
 5564         }
 5565 }
 5566 
 5567 #ifdef INET
 5568 static pfil_return_t
 5569 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
 5570     void *ruleset __unused, struct inpcb *inp)
 5571 {
 5572         int chk;
 5573 
 5574         chk = pf_test(PF_IN, flags, ifp, m, inp);
 5575 
 5576         return (pf_check_return(chk, m));
 5577 }
 5578 
 5579 static pfil_return_t
 5580 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
 5581     void *ruleset __unused,  struct inpcb *inp)
 5582 {
 5583         int chk;
 5584 
 5585         chk = pf_test(PF_OUT, flags, ifp, m, inp);
 5586 
 5587         return (pf_check_return(chk, m));
 5588 }
 5589 #endif
 5590 
 5591 #ifdef INET6
 5592 static pfil_return_t
 5593 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
 5594     void *ruleset __unused,  struct inpcb *inp)
 5595 {
 5596         int chk;
 5597 
 5598         /*
 5599          * In case of loopback traffic IPv6 uses the real interface in
 5600          * order to support scoped addresses. In order to support stateful
 5601          * filtering we have change this to lo0 as it is the case in IPv4.
 5602          */
 5603         CURVNET_SET(ifp->if_vnet);
 5604         chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
 5605         CURVNET_RESTORE();
 5606 
 5607         return (pf_check_return(chk, m));
 5608 }
 5609 
 5610 static pfil_return_t
 5611 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
 5612     void *ruleset __unused,  struct inpcb *inp)
 5613 {
 5614         int chk;
 5615 
 5616         CURVNET_SET(ifp->if_vnet);
 5617         chk = pf_test6(PF_OUT, flags, ifp, m, inp);
 5618         CURVNET_RESTORE();
 5619 
 5620         return (pf_check_return(chk, m));
 5621 }
 5622 #endif /* INET6 */
 5623 
 5624 #ifdef INET
 5625 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
 5626 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
 5627 #define V_pf_ip4_in_hook        VNET(pf_ip4_in_hook)
 5628 #define V_pf_ip4_out_hook       VNET(pf_ip4_out_hook)
 5629 #endif
 5630 #ifdef INET6
 5631 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
 5632 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
 5633 #define V_pf_ip6_in_hook        VNET(pf_ip6_in_hook)
 5634 #define V_pf_ip6_out_hook       VNET(pf_ip6_out_hook)
 5635 #endif
 5636 
 5637 static void
 5638 hook_pf(void)
 5639 {
 5640         struct pfil_hook_args pha;
 5641         struct pfil_link_args pla;
 5642         int ret;
 5643 
 5644         if (V_pf_pfil_hooked)
 5645                 return;
 5646 
 5647         pha.pa_version = PFIL_VERSION;
 5648         pha.pa_modname = "pf";
 5649         pha.pa_ruleset = NULL;
 5650 
 5651         pla.pa_version = PFIL_VERSION;
 5652 
 5653 #ifdef INET
 5654         pha.pa_type = PFIL_TYPE_IP4;
 5655         pha.pa_func = pf_check_in;
 5656         pha.pa_flags = PFIL_IN;
 5657         pha.pa_rulname = "default-in";
 5658         V_pf_ip4_in_hook = pfil_add_hook(&pha);
 5659         pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
 5660         pla.pa_head = V_inet_pfil_head;
 5661         pla.pa_hook = V_pf_ip4_in_hook;
 5662         ret = pfil_link(&pla);
 5663         MPASS(ret == 0);
 5664         pha.pa_func = pf_check_out;
 5665         pha.pa_flags = PFIL_OUT;
 5666         pha.pa_rulname = "default-out";
 5667         V_pf_ip4_out_hook = pfil_add_hook(&pha);
 5668         pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
 5669         pla.pa_head = V_inet_pfil_head;
 5670         pla.pa_hook = V_pf_ip4_out_hook;
 5671         ret = pfil_link(&pla);
 5672         MPASS(ret == 0);
 5673 #endif
 5674 #ifdef INET6
 5675         pha.pa_type = PFIL_TYPE_IP6;
 5676         pha.pa_func = pf_check6_in;
 5677         pha.pa_flags = PFIL_IN;
 5678         pha.pa_rulname = "default-in6";
 5679         V_pf_ip6_in_hook = pfil_add_hook(&pha);
 5680         pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
 5681         pla.pa_head = V_inet6_pfil_head;
 5682         pla.pa_hook = V_pf_ip6_in_hook;
 5683         ret = pfil_link(&pla);
 5684         MPASS(ret == 0);
 5685         pha.pa_func = pf_check6_out;
 5686         pha.pa_rulname = "default-out6";
 5687         pha.pa_flags = PFIL_OUT;
 5688         V_pf_ip6_out_hook = pfil_add_hook(&pha);
 5689         pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
 5690         pla.pa_head = V_inet6_pfil_head;
 5691         pla.pa_hook = V_pf_ip6_out_hook;
 5692         ret = pfil_link(&pla);
 5693         MPASS(ret == 0);
 5694 #endif
 5695 
 5696         V_pf_pfil_hooked = 1;
 5697 }
 5698 
 5699 static void
 5700 dehook_pf(void)
 5701 {
 5702 
 5703         if (V_pf_pfil_hooked == 0)
 5704                 return;
 5705 
 5706 #ifdef INET
 5707         pfil_remove_hook(V_pf_ip4_in_hook);
 5708         pfil_remove_hook(V_pf_ip4_out_hook);
 5709 #endif
 5710 #ifdef INET6
 5711         pfil_remove_hook(V_pf_ip6_in_hook);
 5712         pfil_remove_hook(V_pf_ip6_out_hook);
 5713 #endif
 5714 
 5715         V_pf_pfil_hooked = 0;
 5716 }
 5717 
 5718 static void
 5719 pf_load_vnet(void)
 5720 {
 5721         V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
 5722             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
 5723 
 5724         pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
 5725             PF_RULE_TAG_HASH_SIZE_DEFAULT);
 5726 #ifdef ALTQ
 5727         pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
 5728             PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
 5729 #endif
 5730 
 5731         pfattach_vnet();
 5732         V_pf_vnet_active = 1;
 5733 }
 5734 
 5735 static int
 5736 pf_load(void)
 5737 {
 5738         int error;
 5739 
 5740         rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE);
 5741         sx_init(&pf_ioctl_lock, "pf ioctl");
 5742         sx_init(&pf_end_lock, "pf end thread");
 5743 
 5744         pf_mtag_initialize();
 5745 
 5746         pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
 5747         if (pf_dev == NULL)
 5748                 return (ENOMEM);
 5749 
 5750         pf_end_threads = 0;
 5751         error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
 5752         if (error != 0)
 5753                 return (error);
 5754 
 5755         pfi_initialize();
 5756 
 5757         return (0);
 5758 }
 5759 
 5760 static void
 5761 pf_unload_vnet(void)
 5762 {
 5763         int ret;
 5764 
 5765         V_pf_vnet_active = 0;
 5766         V_pf_status.running = 0;
 5767         dehook_pf();
 5768 
 5769         PF_RULES_WLOCK();
 5770         pf_syncookies_cleanup();
 5771         shutdown_pf();
 5772         PF_RULES_WUNLOCK();
 5773 
 5774         ret = swi_remove(V_pf_swi_cookie);
 5775         MPASS(ret == 0);
 5776         ret = intr_event_destroy(V_pf_swi_ie);
 5777         MPASS(ret == 0);
 5778 
 5779         pf_unload_vnet_purge();
 5780 
 5781         pf_normalize_cleanup();
 5782         PF_RULES_WLOCK();
 5783         pfi_cleanup_vnet();
 5784         PF_RULES_WUNLOCK();
 5785         pfr_cleanup();
 5786         pf_osfp_flush();
 5787         pf_cleanup();
 5788         if (IS_DEFAULT_VNET(curvnet))
 5789                 pf_mtag_cleanup();
 5790 
 5791         pf_cleanup_tagset(&V_pf_tags);
 5792 #ifdef ALTQ
 5793         pf_cleanup_tagset(&V_pf_qids);
 5794 #endif
 5795         uma_zdestroy(V_pf_tag_z);
 5796 
 5797 #ifdef PF_WANT_32_TO_64_COUNTER
 5798         PF_RULES_WLOCK();
 5799         LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
 5800 
 5801         MPASS(LIST_EMPTY(&V_pf_allkiflist));
 5802         MPASS(V_pf_allkifcount == 0);
 5803 
 5804         LIST_REMOVE(&V_pf_default_rule, allrulelist);
 5805         V_pf_allrulecount--;
 5806         LIST_REMOVE(V_pf_rulemarker, allrulelist);
 5807 
 5808         /*
 5809          * There are known pf rule leaks when running the test suite.
 5810          */
 5811 #ifdef notyet
 5812         MPASS(LIST_EMPTY(&V_pf_allrulelist));
 5813         MPASS(V_pf_allrulecount == 0);
 5814 #endif
 5815 
 5816         PF_RULES_WUNLOCK();
 5817 
 5818         free(V_pf_kifmarker, PFI_MTYPE);
 5819         free(V_pf_rulemarker, M_PFRULE);
 5820 #endif
 5821 
 5822         /* Free counters last as we updated them during shutdown. */
 5823         pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
 5824         for (int i = 0; i < 2; i++) {
 5825                 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
 5826                 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
 5827         }
 5828         counter_u64_free(V_pf_default_rule.states_cur);
 5829         counter_u64_free(V_pf_default_rule.states_tot);
 5830         counter_u64_free(V_pf_default_rule.src_nodes);
 5831 
 5832         for (int i = 0; i < PFRES_MAX; i++)
 5833                 counter_u64_free(V_pf_status.counters[i]);
 5834         for (int i = 0; i < KLCNT_MAX; i++)
 5835                 counter_u64_free(V_pf_status.lcounters[i]);
 5836         for (int i = 0; i < FCNT_MAX; i++)
 5837                 pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
 5838         for (int i = 0; i < SCNT_MAX; i++)
 5839                 counter_u64_free(V_pf_status.scounters[i]);
 5840 }
 5841 
 5842 static void
 5843 pf_unload(void)
 5844 {
 5845 
 5846         sx_xlock(&pf_end_lock);
 5847         pf_end_threads = 1;
 5848         while (pf_end_threads < 2) {
 5849                 wakeup_one(pf_purge_thread);
 5850                 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
 5851         }
 5852         sx_xunlock(&pf_end_lock);
 5853 
 5854         if (pf_dev != NULL)
 5855                 destroy_dev(pf_dev);
 5856 
 5857         pfi_cleanup();
 5858 
 5859         rm_destroy(&pf_rules_lock);
 5860         sx_destroy(&pf_ioctl_lock);
 5861         sx_destroy(&pf_end_lock);
 5862 }
 5863 
 5864 static void
 5865 vnet_pf_init(void *unused __unused)
 5866 {
 5867 
 5868         pf_load_vnet();
 5869 }
 5870 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 
 5871     vnet_pf_init, NULL);
 5872 
 5873 static void
 5874 vnet_pf_uninit(const void *unused __unused)
 5875 {
 5876 
 5877         pf_unload_vnet();
 5878 } 
 5879 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
 5880 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
 5881     vnet_pf_uninit, NULL);
 5882 
 5883 static int
 5884 pf_modevent(module_t mod, int type, void *data)
 5885 {
 5886         int error = 0;
 5887 
 5888         switch(type) {
 5889         case MOD_LOAD:
 5890                 error = pf_load();
 5891                 break;
 5892         case MOD_UNLOAD:
 5893                 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
 5894                  * the vnet_pf_uninit()s */
 5895                 break;
 5896         default:
 5897                 error = EINVAL;
 5898                 break;
 5899         }
 5900 
 5901         return (error);
 5902 }
 5903 
 5904 static moduledata_t pf_mod = {
 5905         "pf",
 5906         pf_modevent,
 5907         0
 5908 };
 5909 
 5910 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
 5911 MODULE_VERSION(pf, PF_MODVER);

Cache object: 53d290d5e3d9848ae14b51cbf549b06f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.