The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netpfil/pf/pf_ioctl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2001 Daniel Hartmeier
    5  * Copyright (c) 2002,2003 Henning Brauer
    6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  *
   13  *    - Redistributions of source code must retain the above copyright
   14  *      notice, this list of conditions and the following disclaimer.
   15  *    - Redistributions in binary form must reproduce the above
   16  *      copyright notice, this list of conditions and the following
   17  *      disclaimer in the documentation and/or other materials provided
   18  *      with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
   30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   31  * POSSIBILITY OF SUCH DAMAGE.
   32  *
   33  * Effort sponsored in part by the Defense Advanced Research Projects
   34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
   35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
   36  *
   37  *      $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD$");
   42 
   43 #include "opt_inet.h"
   44 #include "opt_inet6.h"
   45 #include "opt_bpf.h"
   46 #include "opt_pf.h"
   47 
   48 #include <sys/param.h>
   49 #include <sys/_bitset.h>
   50 #include <sys/bitset.h>
   51 #include <sys/bus.h>
   52 #include <sys/conf.h>
   53 #include <sys/endian.h>
   54 #include <sys/fcntl.h>
   55 #include <sys/filio.h>
   56 #include <sys/hash.h>
   57 #include <sys/interrupt.h>
   58 #include <sys/jail.h>
   59 #include <sys/kernel.h>
   60 #include <sys/kthread.h>
   61 #include <sys/lock.h>
   62 #include <sys/mbuf.h>
   63 #include <sys/module.h>
   64 #include <sys/nv.h>
   65 #include <sys/proc.h>
   66 #include <sys/sdt.h>
   67 #include <sys/smp.h>
   68 #include <sys/socket.h>
   69 #include <sys/sysctl.h>
   70 #include <sys/md5.h>
   71 #include <sys/ucred.h>
   72 
   73 #include <net/if.h>
   74 #include <net/if_var.h>
   75 #include <net/vnet.h>
   76 #include <net/route.h>
   77 #include <net/pfil.h>
   78 #include <net/pfvar.h>
   79 #include <net/if_pfsync.h>
   80 #include <net/if_pflog.h>
   81 
   82 #include <netinet/in.h>
   83 #include <netinet/ip.h>
   84 #include <netinet/ip_var.h>
   85 #include <netinet6/ip6_var.h>
   86 #include <netinet/ip_icmp.h>
   87 #include <netpfil/pf/pf_nv.h>
   88 
   89 #ifdef INET6
   90 #include <netinet/ip6.h>
   91 #endif /* INET6 */
   92 
   93 #ifdef ALTQ
   94 #include <net/altq/altq.h>
   95 #endif
   96 
   97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
   98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
   99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
  100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
  101 
  102 static struct pf_kpool  *pf_get_kpool(const char *, u_int32_t, u_int8_t,
  103                             u_int32_t, u_int8_t, u_int8_t, u_int8_t);
  104 
  105 static void              pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
  106 static void              pf_empty_kpool(struct pf_kpalist *);
  107 static int               pfioctl(struct cdev *, u_long, caddr_t, int,
  108                             struct thread *);
  109 static int               pf_begin_eth(uint32_t *, const char *);
  110 static void              pf_rollback_eth_cb(struct epoch_context *);
  111 static int               pf_rollback_eth(uint32_t, const char *);
  112 static int               pf_commit_eth(uint32_t, const char *);
  113 static void              pf_free_eth_rule(struct pf_keth_rule *);
  114 #ifdef ALTQ
  115 static int               pf_begin_altq(u_int32_t *);
  116 static int               pf_rollback_altq(u_int32_t);
  117 static int               pf_commit_altq(u_int32_t);
  118 static int               pf_enable_altq(struct pf_altq *);
  119 static int               pf_disable_altq(struct pf_altq *);
  120 static uint16_t          pf_qname2qid(const char *);
  121 static void              pf_qid_unref(uint16_t);
  122 #endif /* ALTQ */
  123 static int               pf_begin_rules(u_int32_t *, int, const char *);
  124 static int               pf_rollback_rules(u_int32_t, int, char *);
  125 static int               pf_setup_pfsync_matching(struct pf_kruleset *);
  126 static void              pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
  127 static void              pf_hash_rule(struct pf_krule *);
  128 static void              pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
  129 static int               pf_commit_rules(u_int32_t, int, char *);
  130 static int               pf_addr_setup(struct pf_kruleset *,
  131                             struct pf_addr_wrap *, sa_family_t);
  132 static void              pf_addr_copyout(struct pf_addr_wrap *);
  133 static void              pf_src_node_copy(const struct pf_ksrc_node *,
  134                             struct pf_src_node *);
  135 #ifdef ALTQ
  136 static int               pf_export_kaltq(struct pf_altq *,
  137                             struct pfioc_altq_v1 *, size_t);
  138 static int               pf_import_kaltq(struct pfioc_altq_v1 *,
  139                             struct pf_altq *, size_t);
  140 #endif /* ALTQ */
  141 
  142 VNET_DEFINE(struct pf_krule,    pf_default_rule);
  143 
  144 static __inline int             pf_krule_compare(struct pf_krule *,
  145                                     struct pf_krule *);
  146 
  147 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
  148 
  149 #ifdef ALTQ
  150 VNET_DEFINE_STATIC(int,         pf_altq_running);
  151 #define V_pf_altq_running       VNET(pf_altq_running)
  152 #endif
  153 
  154 #define TAGID_MAX        50000
  155 struct pf_tagname {
  156         TAILQ_ENTRY(pf_tagname) namehash_entries;
  157         TAILQ_ENTRY(pf_tagname) taghash_entries;
  158         char                    name[PF_TAG_NAME_SIZE];
  159         uint16_t                tag;
  160         int                     ref;
  161 };
  162 
  163 struct pf_tagset {
  164         TAILQ_HEAD(, pf_tagname)        *namehash;
  165         TAILQ_HEAD(, pf_tagname)        *taghash;
  166         unsigned int                     mask;
  167         uint32_t                         seed;
  168         BITSET_DEFINE(, TAGID_MAX)       avail;
  169 };
  170 
  171 VNET_DEFINE(struct pf_tagset, pf_tags);
  172 #define V_pf_tags       VNET(pf_tags)
  173 static unsigned int     pf_rule_tag_hashsize;
  174 #define PF_RULE_TAG_HASH_SIZE_DEFAULT   128
  175 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
  176     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
  177     "Size of pf(4) rule tag hashtable");
  178 
  179 #ifdef ALTQ
  180 VNET_DEFINE(struct pf_tagset, pf_qids);
  181 #define V_pf_qids       VNET(pf_qids)
  182 static unsigned int     pf_queue_tag_hashsize;
  183 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT  128
  184 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
  185     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
  186     "Size of pf(4) queue tag hashtable");
  187 #endif
  188 VNET_DEFINE(uma_zone_t,  pf_tag_z);
  189 #define V_pf_tag_z               VNET(pf_tag_z)
  190 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
  191 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
  192 
  193 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
  194 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
  195 #endif
  196 
  197 static void              pf_init_tagset(struct pf_tagset *, unsigned int *,
  198                             unsigned int);
  199 static void              pf_cleanup_tagset(struct pf_tagset *);
  200 static uint16_t          tagname2hashindex(const struct pf_tagset *, const char *);
  201 static uint16_t          tag2hashindex(const struct pf_tagset *, uint16_t);
  202 static u_int16_t         tagname2tag(struct pf_tagset *, const char *);
  203 static u_int16_t         pf_tagname2tag(const char *);
  204 static void              tag_unref(struct pf_tagset *, u_int16_t);
  205 
  206 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
  207 
  208 struct cdev *pf_dev;
  209 
  210 /*
  211  * XXX - These are new and need to be checked when moveing to a new version
  212  */
  213 static void              pf_clear_all_states(void);
  214 static unsigned int      pf_clear_states(const struct pf_kstate_kill *);
  215 static void              pf_killstates(struct pf_kstate_kill *,
  216                             unsigned int *);
  217 static int               pf_killstates_row(struct pf_kstate_kill *,
  218                             struct pf_idhash *);
  219 static int               pf_killstates_nv(struct pfioc_nv *);
  220 static int               pf_clearstates_nv(struct pfioc_nv *);
  221 static int               pf_getstate(struct pfioc_nv *);
  222 static int               pf_getstatus(struct pfioc_nv *);
  223 static int               pf_clear_tables(void);
  224 static void              pf_clear_srcnodes(struct pf_ksrc_node *);
  225 static void              pf_kill_srcnodes(struct pfioc_src_node_kill *);
  226 static int               pf_keepcounters(struct pfioc_nv *);
  227 static void              pf_tbladdr_copyout(struct pf_addr_wrap *);
  228 
  229 /*
  230  * Wrapper functions for pfil(9) hooks
  231  */
  232 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
  233     int flags, void *ruleset __unused, struct inpcb *inp);
  234 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
  235     int flags, void *ruleset __unused, struct inpcb *inp);
  236 #ifdef INET
  237 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
  238     int flags, void *ruleset __unused, struct inpcb *inp);
  239 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
  240     int flags, void *ruleset __unused, struct inpcb *inp);
  241 #endif
  242 #ifdef INET6
  243 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
  244     int flags, void *ruleset __unused, struct inpcb *inp);
  245 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
  246     int flags, void *ruleset __unused, struct inpcb *inp);
  247 #endif
  248 
  249 static void             hook_pf_eth(void);
  250 static void             hook_pf(void);
  251 static void             dehook_pf_eth(void);
  252 static void             dehook_pf(void);
  253 static int              shutdown_pf(void);
  254 static int              pf_load(void);
  255 static void             pf_unload(void);
  256 
  257 static struct cdevsw pf_cdevsw = {
  258         .d_ioctl =      pfioctl,
  259         .d_name =       PF_NAME,
  260         .d_version =    D_VERSION,
  261 };
  262 
  263 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
  264 #define V_pf_pfil_hooked        VNET(pf_pfil_hooked)
  265 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
  266 #define V_pf_pfil_eth_hooked    VNET(pf_pfil_eth_hooked)
  267 
  268 /*
  269  * We need a flag that is neither hooked nor running to know when
  270  * the VNET is "valid".  We primarily need this to control (global)
  271  * external event, e.g., eventhandlers.
  272  */
  273 VNET_DEFINE(int, pf_vnet_active);
  274 #define V_pf_vnet_active        VNET(pf_vnet_active)
  275 
  276 int pf_end_threads;
  277 struct proc *pf_purge_proc;
  278 
  279 struct rmlock                   pf_rules_lock;
  280 struct sx                       pf_ioctl_lock;
  281 struct sx                       pf_end_lock;
  282 
  283 /* pfsync */
  284 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
  285 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
  286 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
  287 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
  288 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
  289 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
  290 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
  291 
  292 /* pflog */
  293 pflog_packet_t                  *pflog_packet_ptr = NULL;
  294 
  295 /*
  296  * Copy a user-provided string, returning an error if truncation would occur.
  297  * Avoid scanning past "sz" bytes in the source string since there's no
  298  * guarantee that it's nul-terminated.
  299  */
  300 static int
  301 pf_user_strcpy(char *dst, const char *src, size_t sz)
  302 {
  303         if (strnlen(src, sz) == sz)
  304                 return (EINVAL);
  305         (void)strlcpy(dst, src, sz);
  306         return (0);
  307 }
  308 
  309 static void
  310 pfattach_vnet(void)
  311 {
  312         u_int32_t *my_timeout = V_pf_default_rule.timeout;
  313 
  314         bzero(&V_pf_status, sizeof(V_pf_status));
  315 
  316         pf_initialize();
  317         pfr_initialize();
  318         pfi_initialize_vnet();
  319         pf_normalize_init();
  320         pf_syncookies_init();
  321 
  322         V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
  323         V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
  324 
  325         RB_INIT(&V_pf_anchors);
  326         pf_init_kruleset(&pf_main_ruleset);
  327 
  328         pf_init_keth(V_pf_keth);
  329 
  330         /* default rule should never be garbage collected */
  331         V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
  332 #ifdef PF_DEFAULT_TO_DROP
  333         V_pf_default_rule.action = PF_DROP;
  334 #else
  335         V_pf_default_rule.action = PF_PASS;
  336 #endif
  337         V_pf_default_rule.nr = -1;
  338         V_pf_default_rule.rtableid = -1;
  339 
  340         pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
  341         for (int i = 0; i < 2; i++) {
  342                 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
  343                 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
  344         }
  345         V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
  346         V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
  347         V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
  348 
  349         V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
  350             M_WAITOK | M_ZERO);
  351 
  352 #ifdef PF_WANT_32_TO_64_COUNTER
  353         V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
  354         V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
  355         PF_RULES_WLOCK();
  356         LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
  357         LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
  358         V_pf_allrulecount++;
  359         LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
  360         PF_RULES_WUNLOCK();
  361 #endif
  362 
  363         /* initialize default timeouts */
  364         my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
  365         my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
  366         my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
  367         my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
  368         my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
  369         my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
  370         my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
  371         my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
  372         my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
  373         my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
  374         my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
  375         my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
  376         my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
  377         my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
  378         my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
  379         my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
  380         my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
  381         my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
  382         my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
  383         my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
  384 
  385         V_pf_status.debug = PF_DEBUG_URGENT;
  386 
  387         V_pf_pfil_hooked = false;
  388         V_pf_pfil_eth_hooked = false;
  389 
  390         /* XXX do our best to avoid a conflict */
  391         V_pf_status.hostid = arc4random();
  392 
  393         for (int i = 0; i < PFRES_MAX; i++)
  394                 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
  395         for (int i = 0; i < KLCNT_MAX; i++)
  396                 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
  397         for (int i = 0; i < FCNT_MAX; i++)
  398                 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
  399         for (int i = 0; i < SCNT_MAX; i++)
  400                 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
  401 
  402         if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
  403             INTR_MPSAFE, &V_pf_swi_cookie) != 0)
  404                 /* XXXGL: leaked all above. */
  405                 return;
  406 }
  407 
  408 static struct pf_kpool *
  409 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
  410     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
  411     u_int8_t check_ticket)
  412 {
  413         struct pf_kruleset      *ruleset;
  414         struct pf_krule         *rule;
  415         int                      rs_num;
  416 
  417         ruleset = pf_find_kruleset(anchor);
  418         if (ruleset == NULL)
  419                 return (NULL);
  420         rs_num = pf_get_ruleset_number(rule_action);
  421         if (rs_num >= PF_RULESET_MAX)
  422                 return (NULL);
  423         if (active) {
  424                 if (check_ticket && ticket !=
  425                     ruleset->rules[rs_num].active.ticket)
  426                         return (NULL);
  427                 if (r_last)
  428                         rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
  429                             pf_krulequeue);
  430                 else
  431                         rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
  432         } else {
  433                 if (check_ticket && ticket !=
  434                     ruleset->rules[rs_num].inactive.ticket)
  435                         return (NULL);
  436                 if (r_last)
  437                         rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
  438                             pf_krulequeue);
  439                 else
  440                         rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
  441         }
  442         if (!r_last) {
  443                 while ((rule != NULL) && (rule->nr != rule_number))
  444                         rule = TAILQ_NEXT(rule, entries);
  445         }
  446         if (rule == NULL)
  447                 return (NULL);
  448 
  449         return (&rule->rpool);
  450 }
  451 
  452 static void
  453 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
  454 {
  455         struct pf_kpooladdr     *mv_pool_pa;
  456 
  457         while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
  458                 TAILQ_REMOVE(poola, mv_pool_pa, entries);
  459                 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
  460         }
  461 }
  462 
  463 static void
  464 pf_empty_kpool(struct pf_kpalist *poola)
  465 {
  466         struct pf_kpooladdr *pa;
  467 
  468         while ((pa = TAILQ_FIRST(poola)) != NULL) {
  469                 switch (pa->addr.type) {
  470                 case PF_ADDR_DYNIFTL:
  471                         pfi_dynaddr_remove(pa->addr.p.dyn);
  472                         break;
  473                 case PF_ADDR_TABLE:
  474                         /* XXX: this could be unfinished pooladdr on pabuf */
  475                         if (pa->addr.p.tbl != NULL)
  476                                 pfr_detach_table(pa->addr.p.tbl);
  477                         break;
  478                 }
  479                 if (pa->kif)
  480                         pfi_kkif_unref(pa->kif);
  481                 TAILQ_REMOVE(poola, pa, entries);
  482                 free(pa, M_PFRULE);
  483         }
  484 }
  485 
  486 static void
  487 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
  488 {
  489 
  490         PF_RULES_WASSERT();
  491         PF_UNLNKDRULES_ASSERT();
  492 
  493         TAILQ_REMOVE(rulequeue, rule, entries);
  494 
  495         rule->rule_ref |= PFRULE_REFS;
  496         TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
  497 }
  498 
  499 static void
  500 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
  501 {
  502 
  503         PF_RULES_WASSERT();
  504 
  505         PF_UNLNKDRULES_LOCK();
  506         pf_unlink_rule_locked(rulequeue, rule);
  507         PF_UNLNKDRULES_UNLOCK();
  508 }
  509 
  510 static void
  511 pf_free_eth_rule(struct pf_keth_rule *rule)
  512 {
  513         PF_RULES_WASSERT();
  514 
  515         if (rule == NULL)
  516                 return;
  517 
  518         if (rule->tag)
  519                 tag_unref(&V_pf_tags, rule->tag);
  520         if (rule->match_tag)
  521                 tag_unref(&V_pf_tags, rule->match_tag);
  522 #ifdef ALTQ
  523         pf_qid_unref(rule->qid);
  524 #endif
  525 
  526         if (rule->bridge_to)
  527                 pfi_kkif_unref(rule->bridge_to);
  528         if (rule->kif)
  529                 pfi_kkif_unref(rule->kif);
  530 
  531         if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
  532                 pfr_detach_table(rule->ipsrc.addr.p.tbl);
  533         if (rule->ipdst.addr.type == PF_ADDR_TABLE)
  534                 pfr_detach_table(rule->ipdst.addr.p.tbl);
  535 
  536         counter_u64_free(rule->evaluations);
  537         for (int i = 0; i < 2; i++) {
  538                 counter_u64_free(rule->packets[i]);
  539                 counter_u64_free(rule->bytes[i]);
  540         }
  541         uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
  542         pf_keth_anchor_remove(rule);
  543 
  544         free(rule, M_PFRULE);
  545 }
  546 
  547 void
  548 pf_free_rule(struct pf_krule *rule)
  549 {
  550 
  551         PF_RULES_WASSERT();
  552         PF_CONFIG_ASSERT();
  553 
  554         if (rule->tag)
  555                 tag_unref(&V_pf_tags, rule->tag);
  556         if (rule->match_tag)
  557                 tag_unref(&V_pf_tags, rule->match_tag);
  558 #ifdef ALTQ
  559         if (rule->pqid != rule->qid)
  560                 pf_qid_unref(rule->pqid);
  561         pf_qid_unref(rule->qid);
  562 #endif
  563         switch (rule->src.addr.type) {
  564         case PF_ADDR_DYNIFTL:
  565                 pfi_dynaddr_remove(rule->src.addr.p.dyn);
  566                 break;
  567         case PF_ADDR_TABLE:
  568                 pfr_detach_table(rule->src.addr.p.tbl);
  569                 break;
  570         }
  571         switch (rule->dst.addr.type) {
  572         case PF_ADDR_DYNIFTL:
  573                 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
  574                 break;
  575         case PF_ADDR_TABLE:
  576                 pfr_detach_table(rule->dst.addr.p.tbl);
  577                 break;
  578         }
  579         if (rule->overload_tbl)
  580                 pfr_detach_table(rule->overload_tbl);
  581         if (rule->kif)
  582                 pfi_kkif_unref(rule->kif);
  583         pf_kanchor_remove(rule);
  584         pf_empty_kpool(&rule->rpool.list);
  585 
  586         pf_krule_free(rule);
  587 }
  588 
  589 static void
  590 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
  591     unsigned int default_size)
  592 {
  593         unsigned int i;
  594         unsigned int hashsize;
  595 
  596         if (*tunable_size == 0 || !powerof2(*tunable_size))
  597                 *tunable_size = default_size;
  598 
  599         hashsize = *tunable_size;
  600         ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
  601             M_WAITOK);
  602         ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
  603             M_WAITOK);
  604         ts->mask = hashsize - 1;
  605         ts->seed = arc4random();
  606         for (i = 0; i < hashsize; i++) {
  607                 TAILQ_INIT(&ts->namehash[i]);
  608                 TAILQ_INIT(&ts->taghash[i]);
  609         }
  610         BIT_FILL(TAGID_MAX, &ts->avail);
  611 }
  612 
  613 static void
  614 pf_cleanup_tagset(struct pf_tagset *ts)
  615 {
  616         unsigned int i;
  617         unsigned int hashsize;
  618         struct pf_tagname *t, *tmp;
  619 
  620         /*
  621          * Only need to clean up one of the hashes as each tag is hashed
  622          * into each table.
  623          */
  624         hashsize = ts->mask + 1;
  625         for (i = 0; i < hashsize; i++)
  626                 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
  627                         uma_zfree(V_pf_tag_z, t);
  628 
  629         free(ts->namehash, M_PFHASH);
  630         free(ts->taghash, M_PFHASH);
  631 }
  632 
  633 static uint16_t
  634 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
  635 {
  636         size_t len;
  637 
  638         len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
  639         return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
  640 }
  641 
  642 static uint16_t
  643 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
  644 {
  645 
  646         return (tag & ts->mask);
  647 }
  648 
  649 static u_int16_t
  650 tagname2tag(struct pf_tagset *ts, const char *tagname)
  651 {
  652         struct pf_tagname       *tag;
  653         u_int32_t                index;
  654         u_int16_t                new_tagid;
  655 
  656         PF_RULES_WASSERT();
  657 
  658         index = tagname2hashindex(ts, tagname);
  659         TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
  660                 if (strcmp(tagname, tag->name) == 0) {
  661                         tag->ref++;
  662                         return (tag->tag);
  663                 }
  664 
  665         /*
  666          * new entry
  667          *
  668          * to avoid fragmentation, we do a linear search from the beginning
  669          * and take the first free slot we find.
  670          */
  671         new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
  672         /*
  673          * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
  674          * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
  675          * set.  It may also return a bit number greater than TAGID_MAX due
  676          * to rounding of the number of bits in the vector up to a multiple
  677          * of the vector word size at declaration/allocation time.
  678          */
  679         if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
  680                 return (0);
  681 
  682         /* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
  683         BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
  684 
  685         /* allocate and fill new struct pf_tagname */
  686         tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
  687         if (tag == NULL)
  688                 return (0);
  689         strlcpy(tag->name, tagname, sizeof(tag->name));
  690         tag->tag = new_tagid;
  691         tag->ref = 1;
  692 
  693         /* Insert into namehash */
  694         TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
  695 
  696         /* Insert into taghash */
  697         index = tag2hashindex(ts, new_tagid);
  698         TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
  699 
  700         return (tag->tag);
  701 }
  702 
  703 static void
  704 tag_unref(struct pf_tagset *ts, u_int16_t tag)
  705 {
  706         struct pf_tagname       *t;
  707         uint16_t                 index;
  708 
  709         PF_RULES_WASSERT();
  710 
  711         index = tag2hashindex(ts, tag);
  712         TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
  713                 if (tag == t->tag) {
  714                         if (--t->ref == 0) {
  715                                 TAILQ_REMOVE(&ts->taghash[index], t,
  716                                     taghash_entries);
  717                                 index = tagname2hashindex(ts, t->name);
  718                                 TAILQ_REMOVE(&ts->namehash[index], t,
  719                                     namehash_entries);
  720                                 /* Bits are 0-based for BIT_SET() */
  721                                 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
  722                                 uma_zfree(V_pf_tag_z, t);
  723                         }
  724                         break;
  725                 }
  726 }
  727 
  728 static uint16_t
  729 pf_tagname2tag(const char *tagname)
  730 {
  731         return (tagname2tag(&V_pf_tags, tagname));
  732 }
  733 
  734 static int
  735 pf_begin_eth(uint32_t *ticket, const char *anchor)
  736 {
  737         struct pf_keth_rule *rule, *tmp;
  738         struct pf_keth_ruleset *rs;
  739 
  740         PF_RULES_WASSERT();
  741 
  742         rs = pf_find_or_create_keth_ruleset(anchor);
  743         if (rs == NULL)
  744                 return (EINVAL);
  745 
  746         /* Purge old inactive rules. */
  747         TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
  748             tmp) {
  749                 TAILQ_REMOVE(rs->inactive.rules, rule,
  750                     entries);
  751                 pf_free_eth_rule(rule);
  752         }
  753 
  754         *ticket = ++rs->inactive.ticket;
  755         rs->inactive.open = 1;
  756 
  757         return (0);
  758 }
  759 
  760 static void
  761 pf_rollback_eth_cb(struct epoch_context *ctx)
  762 {
  763         struct pf_keth_ruleset *rs;
  764 
  765         rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx);
  766 
  767         CURVNET_SET(rs->vnet);
  768 
  769         PF_RULES_WLOCK();
  770         pf_rollback_eth(rs->inactive.ticket,
  771             rs->anchor ? rs->anchor->path : "");
  772         PF_RULES_WUNLOCK();
  773 
  774         CURVNET_RESTORE();
  775 }
  776 
  777 static int
  778 pf_rollback_eth(uint32_t ticket, const char *anchor)
  779 {
  780         struct pf_keth_rule *rule, *tmp;
  781         struct pf_keth_ruleset *rs;
  782 
  783         PF_RULES_WASSERT();
  784 
  785         rs = pf_find_keth_ruleset(anchor);
  786         if (rs == NULL)
  787                 return (EINVAL);
  788 
  789         if (!rs->inactive.open ||
  790             ticket != rs->inactive.ticket)
  791                 return (0);
  792 
  793         /* Purge old inactive rules. */
  794         TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
  795             tmp) {
  796                 TAILQ_REMOVE(rs->inactive.rules, rule, entries);
  797                 pf_free_eth_rule(rule);
  798         }
  799 
  800         rs->inactive.open = 0;
  801 
  802         pf_remove_if_empty_keth_ruleset(rs);
  803 
  804         return (0);
  805 }
  806 
  807 #define PF_SET_SKIP_STEPS(i)                                    \
  808         do {                                                    \
  809                 while (head[i] != cur) {                        \
  810                         head[i]->skip[i].ptr = cur;             \
  811                         head[i] = TAILQ_NEXT(head[i], entries); \
  812                 }                                               \
  813         } while (0)
  814 
  815 static void
  816 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
  817 {
  818         struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
  819         int i;
  820 
  821         cur = TAILQ_FIRST(rules);
  822         prev = cur;
  823         for (i = 0; i < PFE_SKIP_COUNT; ++i)
  824                 head[i] = cur;
  825         while (cur != NULL) {
  826                 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
  827                         PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
  828                 if (cur->direction != prev->direction)
  829                         PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
  830                 if (cur->proto != prev->proto)
  831                         PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
  832                 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
  833                         PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
  834                 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
  835                         PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
  836 
  837                 prev = cur;
  838                 cur = TAILQ_NEXT(cur, entries);
  839         }
  840         for (i = 0; i < PFE_SKIP_COUNT; ++i)
  841                 PF_SET_SKIP_STEPS(i);
  842 }
  843 
  844 static int
  845 pf_commit_eth(uint32_t ticket, const char *anchor)
  846 {
  847         struct pf_keth_ruleq *rules;
  848         struct pf_keth_ruleset *rs;
  849 
  850         rs = pf_find_keth_ruleset(anchor);
  851         if (rs == NULL) {
  852                 return (EINVAL);
  853         }
  854 
  855         if (!rs->inactive.open ||
  856             ticket != rs->inactive.ticket)
  857                 return (EBUSY);
  858 
  859         PF_RULES_WASSERT();
  860 
  861         pf_eth_calc_skip_steps(rs->inactive.rules);
  862 
  863         rules = rs->active.rules;
  864         ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules);
  865         rs->inactive.rules = rules;
  866         rs->inactive.ticket = rs->active.ticket;
  867 
  868         /* Clean up inactive rules (i.e. previously active rules), only when
  869          * we're sure they're no longer used. */
  870         NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx);
  871 
  872         return (0);
  873 }
  874 
  875 #ifdef ALTQ
  876 static uint16_t
  877 pf_qname2qid(const char *qname)
  878 {
  879         return (tagname2tag(&V_pf_qids, qname));
  880 }
  881 
  882 static void
  883 pf_qid_unref(uint16_t qid)
  884 {
  885         tag_unref(&V_pf_qids, qid);
  886 }
  887 
  888 static int
  889 pf_begin_altq(u_int32_t *ticket)
  890 {
  891         struct pf_altq  *altq, *tmp;
  892         int              error = 0;
  893 
  894         PF_RULES_WASSERT();
  895 
  896         /* Purge the old altq lists */
  897         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
  898                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
  899                         /* detach and destroy the discipline */
  900                         error = altq_remove(altq);
  901                 }
  902                 free(altq, M_PFALTQ);
  903         }
  904         TAILQ_INIT(V_pf_altq_ifs_inactive);
  905         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
  906                 pf_qid_unref(altq->qid);
  907                 free(altq, M_PFALTQ);
  908         }
  909         TAILQ_INIT(V_pf_altqs_inactive);
  910         if (error)
  911                 return (error);
  912         *ticket = ++V_ticket_altqs_inactive;
  913         V_altqs_inactive_open = 1;
  914         return (0);
  915 }
  916 
  917 static int
  918 pf_rollback_altq(u_int32_t ticket)
  919 {
  920         struct pf_altq  *altq, *tmp;
  921         int              error = 0;
  922 
  923         PF_RULES_WASSERT();
  924 
  925         if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
  926                 return (0);
  927         /* Purge the old altq lists */
  928         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
  929                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
  930                         /* detach and destroy the discipline */
  931                         error = altq_remove(altq);
  932                 }
  933                 free(altq, M_PFALTQ);
  934         }
  935         TAILQ_INIT(V_pf_altq_ifs_inactive);
  936         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
  937                 pf_qid_unref(altq->qid);
  938                 free(altq, M_PFALTQ);
  939         }
  940         TAILQ_INIT(V_pf_altqs_inactive);
  941         V_altqs_inactive_open = 0;
  942         return (error);
  943 }
  944 
  945 static int
  946 pf_commit_altq(u_int32_t ticket)
  947 {
  948         struct pf_altqqueue     *old_altqs, *old_altq_ifs;
  949         struct pf_altq          *altq, *tmp;
  950         int                      err, error = 0;
  951 
  952         PF_RULES_WASSERT();
  953 
  954         if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
  955                 return (EBUSY);
  956 
  957         /* swap altqs, keep the old. */
  958         old_altqs = V_pf_altqs_active;
  959         old_altq_ifs = V_pf_altq_ifs_active;
  960         V_pf_altqs_active = V_pf_altqs_inactive;
  961         V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
  962         V_pf_altqs_inactive = old_altqs;
  963         V_pf_altq_ifs_inactive = old_altq_ifs;
  964         V_ticket_altqs_active = V_ticket_altqs_inactive;
  965 
  966         /* Attach new disciplines */
  967         TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
  968                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
  969                         /* attach the discipline */
  970                         error = altq_pfattach(altq);
  971                         if (error == 0 && V_pf_altq_running)
  972                                 error = pf_enable_altq(altq);
  973                         if (error != 0)
  974                                 return (error);
  975                 }
  976         }
  977 
  978         /* Purge the old altq lists */
  979         TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
  980                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
  981                         /* detach and destroy the discipline */
  982                         if (V_pf_altq_running)
  983                                 error = pf_disable_altq(altq);
  984                         err = altq_pfdetach(altq);
  985                         if (err != 0 && error == 0)
  986                                 error = err;
  987                         err = altq_remove(altq);
  988                         if (err != 0 && error == 0)
  989                                 error = err;
  990                 }
  991                 free(altq, M_PFALTQ);
  992         }
  993         TAILQ_INIT(V_pf_altq_ifs_inactive);
  994         TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
  995                 pf_qid_unref(altq->qid);
  996                 free(altq, M_PFALTQ);
  997         }
  998         TAILQ_INIT(V_pf_altqs_inactive);
  999 
 1000         V_altqs_inactive_open = 0;
 1001         return (error);
 1002 }
 1003 
 1004 static int
 1005 pf_enable_altq(struct pf_altq *altq)
 1006 {
 1007         struct ifnet            *ifp;
 1008         struct tb_profile        tb;
 1009         int                      error = 0;
 1010 
 1011         if ((ifp = ifunit(altq->ifname)) == NULL)
 1012                 return (EINVAL);
 1013 
 1014         if (ifp->if_snd.altq_type != ALTQT_NONE)
 1015                 error = altq_enable(&ifp->if_snd);
 1016 
 1017         /* set tokenbucket regulator */
 1018         if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
 1019                 tb.rate = altq->ifbandwidth;
 1020                 tb.depth = altq->tbrsize;
 1021                 error = tbr_set(&ifp->if_snd, &tb);
 1022         }
 1023 
 1024         return (error);
 1025 }
 1026 
 1027 static int
 1028 pf_disable_altq(struct pf_altq *altq)
 1029 {
 1030         struct ifnet            *ifp;
 1031         struct tb_profile        tb;
 1032         int                      error;
 1033 
 1034         if ((ifp = ifunit(altq->ifname)) == NULL)
 1035                 return (EINVAL);
 1036 
 1037         /*
 1038          * when the discipline is no longer referenced, it was overridden
 1039          * by a new one.  if so, just return.
 1040          */
 1041         if (altq->altq_disc != ifp->if_snd.altq_disc)
 1042                 return (0);
 1043 
 1044         error = altq_disable(&ifp->if_snd);
 1045 
 1046         if (error == 0) {
 1047                 /* clear tokenbucket regulator */
 1048                 tb.rate = 0;
 1049                 error = tbr_set(&ifp->if_snd, &tb);
 1050         }
 1051 
 1052         return (error);
 1053 }
 1054 
 1055 static int
 1056 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
 1057     struct pf_altq *altq)
 1058 {
 1059         struct ifnet    *ifp1;
 1060         int              error = 0;
 1061 
 1062         /* Deactivate the interface in question */
 1063         altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
 1064         if ((ifp1 = ifunit(altq->ifname)) == NULL ||
 1065             (remove && ifp1 == ifp)) {
 1066                 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
 1067         } else {
 1068                 error = altq_add(ifp1, altq);
 1069 
 1070                 if (ticket != V_ticket_altqs_inactive)
 1071                         error = EBUSY;
 1072 
 1073                 if (error)
 1074                         free(altq, M_PFALTQ);
 1075         }
 1076 
 1077         return (error);
 1078 }
 1079 
 1080 void
 1081 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
 1082 {
 1083         struct pf_altq  *a1, *a2, *a3;
 1084         u_int32_t        ticket;
 1085         int              error = 0;
 1086 
 1087         /*
 1088          * No need to re-evaluate the configuration for events on interfaces
 1089          * that do not support ALTQ, as it's not possible for such
 1090          * interfaces to be part of the configuration.
 1091          */
 1092         if (!ALTQ_IS_READY(&ifp->if_snd))
 1093                 return;
 1094 
 1095         /* Interrupt userland queue modifications */
 1096         if (V_altqs_inactive_open)
 1097                 pf_rollback_altq(V_ticket_altqs_inactive);
 1098 
 1099         /* Start new altq ruleset */
 1100         if (pf_begin_altq(&ticket))
 1101                 return;
 1102 
 1103         /* Copy the current active set */
 1104         TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
 1105                 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
 1106                 if (a2 == NULL) {
 1107                         error = ENOMEM;
 1108                         break;
 1109                 }
 1110                 bcopy(a1, a2, sizeof(struct pf_altq));
 1111 
 1112                 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
 1113                 if (error)
 1114                         break;
 1115 
 1116                 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
 1117         }
 1118         if (error)
 1119                 goto out;
 1120         TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
 1121                 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
 1122                 if (a2 == NULL) {
 1123                         error = ENOMEM;
 1124                         break;
 1125                 }
 1126                 bcopy(a1, a2, sizeof(struct pf_altq));
 1127 
 1128                 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
 1129                         error = EBUSY;
 1130                         free(a2, M_PFALTQ);
 1131                         break;
 1132                 }
 1133                 a2->altq_disc = NULL;
 1134                 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
 1135                         if (strncmp(a3->ifname, a2->ifname,
 1136                                 IFNAMSIZ) == 0) {
 1137                                 a2->altq_disc = a3->altq_disc;
 1138                                 break;
 1139                         }
 1140                 }
 1141                 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
 1142                 if (error)
 1143                         break;
 1144 
 1145                 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
 1146         }
 1147 
 1148 out:
 1149         if (error != 0)
 1150                 pf_rollback_altq(ticket);
 1151         else
 1152                 pf_commit_altq(ticket);
 1153 }
 1154 #endif /* ALTQ */
 1155 
 1156 static struct pf_krule_global *
 1157 pf_rule_tree_alloc(int flags)
 1158 {
 1159         struct pf_krule_global *tree;
 1160 
 1161         tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
 1162         if (tree == NULL)
 1163                 return (NULL);
 1164         RB_INIT(tree);
 1165         return (tree);
 1166 }
 1167 
 1168 static void
 1169 pf_rule_tree_free(struct pf_krule_global *tree)
 1170 {
 1171 
 1172         free(tree, M_TEMP);
 1173 }
 1174 
 1175 static int
 1176 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
 1177 {
 1178         struct pf_krule_global *tree;
 1179         struct pf_kruleset      *rs;
 1180         struct pf_krule         *rule;
 1181 
 1182         PF_RULES_WASSERT();
 1183 
 1184         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
 1185                 return (EINVAL);
 1186         tree = pf_rule_tree_alloc(M_NOWAIT);
 1187         if (tree == NULL)
 1188                 return (ENOMEM);
 1189         rs = pf_find_or_create_kruleset(anchor);
 1190         if (rs == NULL) {
 1191                 free(tree, M_TEMP);
 1192                 return (EINVAL);
 1193         }
 1194         pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
 1195         rs->rules[rs_num].inactive.tree = tree;
 1196 
 1197         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
 1198                 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
 1199                 rs->rules[rs_num].inactive.rcount--;
 1200         }
 1201         *ticket = ++rs->rules[rs_num].inactive.ticket;
 1202         rs->rules[rs_num].inactive.open = 1;
 1203         return (0);
 1204 }
 1205 
 1206 static int
 1207 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
 1208 {
 1209         struct pf_kruleset      *rs;
 1210         struct pf_krule         *rule;
 1211 
 1212         PF_RULES_WASSERT();
 1213 
 1214         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
 1215                 return (EINVAL);
 1216         rs = pf_find_kruleset(anchor);
 1217         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
 1218             rs->rules[rs_num].inactive.ticket != ticket)
 1219                 return (0);
 1220         while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
 1221                 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
 1222                 rs->rules[rs_num].inactive.rcount--;
 1223         }
 1224         rs->rules[rs_num].inactive.open = 0;
 1225         return (0);
 1226 }
 1227 
 1228 #define PF_MD5_UPD(st, elm)                                             \
 1229                 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
 1230 
 1231 #define PF_MD5_UPD_STR(st, elm)                                         \
 1232                 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
 1233 
 1234 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \
 1235                 (stor) = htonl((st)->elm);                              \
 1236                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
 1237 } while (0)
 1238 
 1239 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \
 1240                 (stor) = htons((st)->elm);                              \
 1241                 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
 1242 } while (0)
 1243 
 1244 static void
 1245 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
 1246 {
 1247         PF_MD5_UPD(pfr, addr.type);
 1248         switch (pfr->addr.type) {
 1249                 case PF_ADDR_DYNIFTL:
 1250                         PF_MD5_UPD(pfr, addr.v.ifname);
 1251                         PF_MD5_UPD(pfr, addr.iflags);
 1252                         break;
 1253                 case PF_ADDR_TABLE:
 1254                         PF_MD5_UPD(pfr, addr.v.tblname);
 1255                         break;
 1256                 case PF_ADDR_ADDRMASK:
 1257                         /* XXX ignore af? */
 1258                         PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
 1259                         PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
 1260                         break;
 1261         }
 1262 
 1263         PF_MD5_UPD(pfr, port[0]);
 1264         PF_MD5_UPD(pfr, port[1]);
 1265         PF_MD5_UPD(pfr, neg);
 1266         PF_MD5_UPD(pfr, port_op);
 1267 }
 1268 
 1269 static void
 1270 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
 1271 {
 1272         u_int16_t x;
 1273         u_int32_t y;
 1274 
 1275         pf_hash_rule_addr(ctx, &rule->src);
 1276         pf_hash_rule_addr(ctx, &rule->dst);
 1277         for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
 1278                 PF_MD5_UPD_STR(rule, label[i]);
 1279         PF_MD5_UPD_STR(rule, ifname);
 1280         PF_MD5_UPD_STR(rule, match_tagname);
 1281         PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
 1282         PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
 1283         PF_MD5_UPD_HTONL(rule, prob, y);
 1284         PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
 1285         PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
 1286         PF_MD5_UPD(rule, uid.op);
 1287         PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
 1288         PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
 1289         PF_MD5_UPD(rule, gid.op);
 1290         PF_MD5_UPD_HTONL(rule, rule_flag, y);
 1291         PF_MD5_UPD(rule, action);
 1292         PF_MD5_UPD(rule, direction);
 1293         PF_MD5_UPD(rule, af);
 1294         PF_MD5_UPD(rule, quick);
 1295         PF_MD5_UPD(rule, ifnot);
 1296         PF_MD5_UPD(rule, match_tag_not);
 1297         PF_MD5_UPD(rule, natpass);
 1298         PF_MD5_UPD(rule, keep_state);
 1299         PF_MD5_UPD(rule, proto);
 1300         PF_MD5_UPD(rule, type);
 1301         PF_MD5_UPD(rule, code);
 1302         PF_MD5_UPD(rule, flags);
 1303         PF_MD5_UPD(rule, flagset);
 1304         PF_MD5_UPD(rule, allow_opts);
 1305         PF_MD5_UPD(rule, rt);
 1306         PF_MD5_UPD(rule, tos);
 1307         if (rule->anchor != NULL)
 1308                 PF_MD5_UPD_STR(rule, anchor->path);
 1309 }
 1310 
 1311 static void
 1312 pf_hash_rule(struct pf_krule *rule)
 1313 {
 1314         MD5_CTX         ctx;
 1315 
 1316         MD5Init(&ctx);
 1317         pf_hash_rule_rolling(&ctx, rule);
 1318         MD5Final(rule->md5sum, &ctx);
 1319 }
 1320 
 1321 static int
 1322 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
 1323 {
 1324 
 1325         return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
 1326 }
 1327 
 1328 static int
 1329 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
 1330 {
 1331         struct pf_kruleset      *rs;
 1332         struct pf_krule         *rule, **old_array, *old_rule;
 1333         struct pf_krulequeue    *old_rules;
 1334         struct pf_krule_global  *old_tree;
 1335         int                      error;
 1336         u_int32_t                old_rcount;
 1337 
 1338         PF_RULES_WASSERT();
 1339 
 1340         if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
 1341                 return (EINVAL);
 1342         rs = pf_find_kruleset(anchor);
 1343         if (rs == NULL || !rs->rules[rs_num].inactive.open ||
 1344             ticket != rs->rules[rs_num].inactive.ticket)
 1345                 return (EBUSY);
 1346 
 1347         /* Calculate checksum for the main ruleset */
 1348         if (rs == &pf_main_ruleset) {
 1349                 error = pf_setup_pfsync_matching(rs);
 1350                 if (error != 0)
 1351                         return (error);
 1352         }
 1353 
 1354         /* Swap rules, keep the old. */
 1355         old_rules = rs->rules[rs_num].active.ptr;
 1356         old_rcount = rs->rules[rs_num].active.rcount;
 1357         old_array = rs->rules[rs_num].active.ptr_array;
 1358         old_tree = rs->rules[rs_num].active.tree;
 1359 
 1360         rs->rules[rs_num].active.ptr =
 1361             rs->rules[rs_num].inactive.ptr;
 1362         rs->rules[rs_num].active.ptr_array =
 1363             rs->rules[rs_num].inactive.ptr_array;
 1364         rs->rules[rs_num].active.tree =
 1365             rs->rules[rs_num].inactive.tree;
 1366         rs->rules[rs_num].active.rcount =
 1367             rs->rules[rs_num].inactive.rcount;
 1368 
 1369         /* Attempt to preserve counter information. */
 1370         if (V_pf_status.keep_counters && old_tree != NULL) {
 1371                 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
 1372                     entries) {
 1373                         old_rule = RB_FIND(pf_krule_global, old_tree, rule);
 1374                         if (old_rule == NULL) {
 1375                                 continue;
 1376                         }
 1377                         pf_counter_u64_critical_enter();
 1378                         pf_counter_u64_add_protected(&rule->evaluations,
 1379                             pf_counter_u64_fetch(&old_rule->evaluations));
 1380                         pf_counter_u64_add_protected(&rule->packets[0],
 1381                             pf_counter_u64_fetch(&old_rule->packets[0]));
 1382                         pf_counter_u64_add_protected(&rule->packets[1],
 1383                             pf_counter_u64_fetch(&old_rule->packets[1]));
 1384                         pf_counter_u64_add_protected(&rule->bytes[0],
 1385                             pf_counter_u64_fetch(&old_rule->bytes[0]));
 1386                         pf_counter_u64_add_protected(&rule->bytes[1],
 1387                             pf_counter_u64_fetch(&old_rule->bytes[1]));
 1388                         pf_counter_u64_critical_exit();
 1389                 }
 1390         }
 1391 
 1392         rs->rules[rs_num].inactive.ptr = old_rules;
 1393         rs->rules[rs_num].inactive.ptr_array = old_array;
 1394         rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
 1395         rs->rules[rs_num].inactive.rcount = old_rcount;
 1396 
 1397         rs->rules[rs_num].active.ticket =
 1398             rs->rules[rs_num].inactive.ticket;
 1399         pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
 1400 
 1401         /* Purge the old rule list. */
 1402         PF_UNLNKDRULES_LOCK();
 1403         while ((rule = TAILQ_FIRST(old_rules)) != NULL)
 1404                 pf_unlink_rule_locked(old_rules, rule);
 1405         PF_UNLNKDRULES_UNLOCK();
 1406         if (rs->rules[rs_num].inactive.ptr_array)
 1407                 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
 1408         rs->rules[rs_num].inactive.ptr_array = NULL;
 1409         rs->rules[rs_num].inactive.rcount = 0;
 1410         rs->rules[rs_num].inactive.open = 0;
 1411         pf_remove_if_empty_kruleset(rs);
 1412         free(old_tree, M_TEMP);
 1413 
 1414         return (0);
 1415 }
 1416 
 1417 static int
 1418 pf_setup_pfsync_matching(struct pf_kruleset *rs)
 1419 {
 1420         MD5_CTX                  ctx;
 1421         struct pf_krule         *rule;
 1422         int                      rs_cnt;
 1423         u_int8_t                 digest[PF_MD5_DIGEST_LENGTH];
 1424 
 1425         MD5Init(&ctx);
 1426         for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
 1427                 /* XXX PF_RULESET_SCRUB as well? */
 1428                 if (rs_cnt == PF_RULESET_SCRUB)
 1429                         continue;
 1430 
 1431                 if (rs->rules[rs_cnt].inactive.ptr_array)
 1432                         free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
 1433                 rs->rules[rs_cnt].inactive.ptr_array = NULL;
 1434 
 1435                 if (rs->rules[rs_cnt].inactive.rcount) {
 1436                         rs->rules[rs_cnt].inactive.ptr_array =
 1437                             malloc(sizeof(caddr_t) *
 1438                             rs->rules[rs_cnt].inactive.rcount,
 1439                             M_TEMP, M_NOWAIT);
 1440 
 1441                         if (!rs->rules[rs_cnt].inactive.ptr_array)
 1442                                 return (ENOMEM);
 1443                 }
 1444 
 1445                 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
 1446                     entries) {
 1447                         pf_hash_rule_rolling(&ctx, rule);
 1448                         (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
 1449                 }
 1450         }
 1451 
 1452         MD5Final(digest, &ctx);
 1453         memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
 1454         return (0);
 1455 }
 1456 
 1457 static int
 1458 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
 1459 {
 1460         int error = 0;
 1461 
 1462         switch (addr->type) {
 1463         case PF_ADDR_TABLE:
 1464                 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
 1465                 if (addr->p.tbl == NULL)
 1466                         error = ENOMEM;
 1467                 break;
 1468         default:
 1469                 error = EINVAL;
 1470         }
 1471 
 1472         return (error);
 1473 }
 1474 
 1475 static int
 1476 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
 1477     sa_family_t af)
 1478 {
 1479         int error = 0;
 1480 
 1481         switch (addr->type) {
 1482         case PF_ADDR_TABLE:
 1483                 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
 1484                 if (addr->p.tbl == NULL)
 1485                         error = ENOMEM;
 1486                 break;
 1487         case PF_ADDR_DYNIFTL:
 1488                 error = pfi_dynaddr_setup(addr, af);
 1489                 break;
 1490         }
 1491 
 1492         return (error);
 1493 }
 1494 
 1495 static void
 1496 pf_addr_copyout(struct pf_addr_wrap *addr)
 1497 {
 1498 
 1499         switch (addr->type) {
 1500         case PF_ADDR_DYNIFTL:
 1501                 pfi_dynaddr_copyout(addr);
 1502                 break;
 1503         case PF_ADDR_TABLE:
 1504                 pf_tbladdr_copyout(addr);
 1505                 break;
 1506         }
 1507 }
 1508 
 1509 static void
 1510 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
 1511 {
 1512         int     secs = time_uptime, diff;
 1513 
 1514         bzero(out, sizeof(struct pf_src_node));
 1515 
 1516         bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
 1517         bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
 1518 
 1519         if (in->rule.ptr != NULL)
 1520                 out->rule.nr = in->rule.ptr->nr;
 1521 
 1522         for (int i = 0; i < 2; i++) {
 1523                 out->bytes[i] = counter_u64_fetch(in->bytes[i]);
 1524                 out->packets[i] = counter_u64_fetch(in->packets[i]);
 1525         }
 1526 
 1527         out->states = in->states;
 1528         out->conn = in->conn;
 1529         out->af = in->af;
 1530         out->ruletype = in->ruletype;
 1531 
 1532         out->creation = secs - in->creation;
 1533         if (out->expire > secs)
 1534                 out->expire -= secs;
 1535         else
 1536                 out->expire = 0;
 1537 
 1538         /* Adjust the connection rate estimate. */
 1539         diff = secs - in->conn_rate.last;
 1540         if (diff >= in->conn_rate.seconds)
 1541                 out->conn_rate.count = 0;
 1542         else
 1543                 out->conn_rate.count -=
 1544                     in->conn_rate.count * diff /
 1545                     in->conn_rate.seconds;
 1546 }
 1547 
 1548 #ifdef ALTQ
 1549 /*
 1550  * Handle export of struct pf_kaltq to user binaries that may be using any
 1551  * version of struct pf_altq.
 1552  */
 1553 static int
 1554 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
 1555 {
 1556         u_int32_t version;
 1557 
 1558         if (ioc_size == sizeof(struct pfioc_altq_v0))
 1559                 version = 0;
 1560         else
 1561                 version = pa->version;
 1562 
 1563         if (version > PFIOC_ALTQ_VERSION)
 1564                 return (EINVAL);
 1565 
 1566 #define ASSIGN(x) exported_q->x = q->x
 1567 #define COPY(x) \
 1568         bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
 1569 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
 1570 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
 1571 
 1572         switch (version) {
 1573         case 0: {
 1574                 struct pf_altq_v0 *exported_q =
 1575                     &((struct pfioc_altq_v0 *)pa)->altq;
 1576 
 1577                 COPY(ifname);
 1578 
 1579                 ASSIGN(scheduler);
 1580                 ASSIGN(tbrsize);
 1581                 exported_q->tbrsize = SATU16(q->tbrsize);
 1582                 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
 1583 
 1584                 COPY(qname);
 1585                 COPY(parent);
 1586                 ASSIGN(parent_qid);
 1587                 exported_q->bandwidth = SATU32(q->bandwidth);
 1588                 ASSIGN(priority);
 1589                 ASSIGN(local_flags);
 1590 
 1591                 ASSIGN(qlimit);
 1592                 ASSIGN(flags);
 1593 
 1594                 if (q->scheduler == ALTQT_HFSC) {
 1595 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
 1596 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
 1597                             SATU32(q->pq_u.hfsc_opts.x)
 1598                         
 1599                         ASSIGN_OPT_SATU32(rtsc_m1);
 1600                         ASSIGN_OPT(rtsc_d);
 1601                         ASSIGN_OPT_SATU32(rtsc_m2);
 1602 
 1603                         ASSIGN_OPT_SATU32(lssc_m1);
 1604                         ASSIGN_OPT(lssc_d);
 1605                         ASSIGN_OPT_SATU32(lssc_m2);
 1606 
 1607                         ASSIGN_OPT_SATU32(ulsc_m1);
 1608                         ASSIGN_OPT(ulsc_d);
 1609                         ASSIGN_OPT_SATU32(ulsc_m2);
 1610 
 1611                         ASSIGN_OPT(flags);
 1612                         
 1613 #undef ASSIGN_OPT
 1614 #undef ASSIGN_OPT_SATU32
 1615                 } else
 1616                         COPY(pq_u);
 1617 
 1618                 ASSIGN(qid);
 1619                 break;
 1620         }
 1621         case 1: {
 1622                 struct pf_altq_v1 *exported_q =
 1623                     &((struct pfioc_altq_v1 *)pa)->altq;
 1624 
 1625                 COPY(ifname);
 1626 
 1627                 ASSIGN(scheduler);
 1628                 ASSIGN(tbrsize);
 1629                 ASSIGN(ifbandwidth);
 1630 
 1631                 COPY(qname);
 1632                 COPY(parent);
 1633                 ASSIGN(parent_qid);
 1634                 ASSIGN(bandwidth);
 1635                 ASSIGN(priority);
 1636                 ASSIGN(local_flags);
 1637 
 1638                 ASSIGN(qlimit);
 1639                 ASSIGN(flags);
 1640                 COPY(pq_u);
 1641 
 1642                 ASSIGN(qid);
 1643                 break;
 1644         }
 1645         default:
 1646                 panic("%s: unhandled struct pfioc_altq version", __func__);
 1647                 break;
 1648         }
 1649 
 1650 #undef ASSIGN
 1651 #undef COPY
 1652 #undef SATU16
 1653 #undef SATU32
 1654 
 1655         return (0);
 1656 }
 1657 
 1658 /*
 1659  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
 1660  * that may be using any version of it.
 1661  */
 1662 static int
 1663 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
 1664 {
 1665         u_int32_t version;
 1666 
 1667         if (ioc_size == sizeof(struct pfioc_altq_v0))
 1668                 version = 0;
 1669         else
 1670                 version = pa->version;
 1671 
 1672         if (version > PFIOC_ALTQ_VERSION)
 1673                 return (EINVAL);
 1674 
 1675 #define ASSIGN(x) q->x = imported_q->x
 1676 #define COPY(x) \
 1677         bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
 1678 
 1679         switch (version) {
 1680         case 0: {
 1681                 struct pf_altq_v0 *imported_q =
 1682                     &((struct pfioc_altq_v0 *)pa)->altq;
 1683 
 1684                 COPY(ifname);
 1685 
 1686                 ASSIGN(scheduler);
 1687                 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
 1688                 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
 1689 
 1690                 COPY(qname);
 1691                 COPY(parent);
 1692                 ASSIGN(parent_qid);
 1693                 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
 1694                 ASSIGN(priority);
 1695                 ASSIGN(local_flags);
 1696 
 1697                 ASSIGN(qlimit);
 1698                 ASSIGN(flags);
 1699 
 1700                 if (imported_q->scheduler == ALTQT_HFSC) {
 1701 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
 1702 
 1703                         /*
 1704                          * The m1 and m2 parameters are being copied from
 1705                          * 32-bit to 64-bit.
 1706                          */
 1707                         ASSIGN_OPT(rtsc_m1);
 1708                         ASSIGN_OPT(rtsc_d);
 1709                         ASSIGN_OPT(rtsc_m2);
 1710 
 1711                         ASSIGN_OPT(lssc_m1);
 1712                         ASSIGN_OPT(lssc_d);
 1713                         ASSIGN_OPT(lssc_m2);
 1714 
 1715                         ASSIGN_OPT(ulsc_m1);
 1716                         ASSIGN_OPT(ulsc_d);
 1717                         ASSIGN_OPT(ulsc_m2);
 1718 
 1719                         ASSIGN_OPT(flags);
 1720                         
 1721 #undef ASSIGN_OPT
 1722                 } else
 1723                         COPY(pq_u);
 1724 
 1725                 ASSIGN(qid);
 1726                 break;
 1727         }
 1728         case 1: {
 1729                 struct pf_altq_v1 *imported_q =
 1730                     &((struct pfioc_altq_v1 *)pa)->altq;
 1731 
 1732                 COPY(ifname);
 1733 
 1734                 ASSIGN(scheduler);
 1735                 ASSIGN(tbrsize);
 1736                 ASSIGN(ifbandwidth);
 1737 
 1738                 COPY(qname);
 1739                 COPY(parent);
 1740                 ASSIGN(parent_qid);
 1741                 ASSIGN(bandwidth);
 1742                 ASSIGN(priority);
 1743                 ASSIGN(local_flags);
 1744 
 1745                 ASSIGN(qlimit);
 1746                 ASSIGN(flags);
 1747                 COPY(pq_u);
 1748 
 1749                 ASSIGN(qid);
 1750                 break;
 1751         }
 1752         default:        
 1753                 panic("%s: unhandled struct pfioc_altq version", __func__);
 1754                 break;
 1755         }
 1756 
 1757 #undef ASSIGN
 1758 #undef COPY
 1759 
 1760         return (0);
 1761 }
 1762 
 1763 static struct pf_altq *
 1764 pf_altq_get_nth_active(u_int32_t n)
 1765 {
 1766         struct pf_altq          *altq;
 1767         u_int32_t                nr;
 1768 
 1769         nr = 0;
 1770         TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
 1771                 if (nr == n)
 1772                         return (altq);
 1773                 nr++;
 1774         }
 1775 
 1776         TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
 1777                 if (nr == n)
 1778                         return (altq);
 1779                 nr++;
 1780         }
 1781 
 1782         return (NULL);
 1783 }
 1784 #endif /* ALTQ */
 1785 
 1786 struct pf_krule *
 1787 pf_krule_alloc(void)
 1788 {
 1789         struct pf_krule *rule;
 1790 
 1791         rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
 1792         mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
 1793         rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
 1794             M_WAITOK | M_ZERO);
 1795         return (rule);
 1796 }
 1797 
 1798 void
 1799 pf_krule_free(struct pf_krule *rule)
 1800 {
 1801 #ifdef PF_WANT_32_TO_64_COUNTER
 1802         bool wowned;
 1803 #endif
 1804 
 1805         if (rule == NULL)
 1806                 return;
 1807 
 1808 #ifdef PF_WANT_32_TO_64_COUNTER
 1809         if (rule->allrulelinked) {
 1810                 wowned = PF_RULES_WOWNED();
 1811                 if (!wowned)
 1812                         PF_RULES_WLOCK();
 1813                 LIST_REMOVE(rule, allrulelist);
 1814                 V_pf_allrulecount--;
 1815                 if (!wowned)
 1816                         PF_RULES_WUNLOCK();
 1817         }
 1818 #endif
 1819 
 1820         pf_counter_u64_deinit(&rule->evaluations);
 1821         for (int i = 0; i < 2; i++) {
 1822                 pf_counter_u64_deinit(&rule->packets[i]);
 1823                 pf_counter_u64_deinit(&rule->bytes[i]);
 1824         }
 1825         counter_u64_free(rule->states_cur);
 1826         counter_u64_free(rule->states_tot);
 1827         counter_u64_free(rule->src_nodes);
 1828         uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
 1829 
 1830         mtx_destroy(&rule->rpool.mtx);
 1831         free(rule, M_PFRULE);
 1832 }
 1833 
 1834 static void
 1835 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
 1836     struct pf_pooladdr *pool)
 1837 {
 1838 
 1839         bzero(pool, sizeof(*pool));
 1840         bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
 1841         strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
 1842 }
 1843 
 1844 static int
 1845 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
 1846     struct pf_kpooladdr *kpool)
 1847 {
 1848         int ret;
 1849 
 1850         bzero(kpool, sizeof(*kpool));
 1851         bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
 1852         ret = pf_user_strcpy(kpool->ifname, pool->ifname,
 1853             sizeof(kpool->ifname));
 1854         return (ret);
 1855 }
 1856 
 1857 static void
 1858 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
 1859 {
 1860         bzero(pool, sizeof(*pool));
 1861 
 1862         bcopy(&kpool->key, &pool->key, sizeof(pool->key));
 1863         bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter));
 1864 
 1865         pool->tblidx = kpool->tblidx;
 1866         pool->proxy_port[0] = kpool->proxy_port[0];
 1867         pool->proxy_port[1] = kpool->proxy_port[1];
 1868         pool->opts = kpool->opts;
 1869 }
 1870 
 1871 static void
 1872 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
 1873 {
 1874         _Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
 1875         _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
 1876 
 1877         bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
 1878         bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
 1879 
 1880         kpool->tblidx = pool->tblidx;
 1881         kpool->proxy_port[0] = pool->proxy_port[0];
 1882         kpool->proxy_port[1] = pool->proxy_port[1];
 1883         kpool->opts = pool->opts;
 1884 }
 1885 
 1886 static void
 1887 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
 1888 {
 1889 
 1890         bzero(rule, sizeof(*rule));
 1891 
 1892         bcopy(&krule->src, &rule->src, sizeof(rule->src));
 1893         bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
 1894 
 1895         for (int i = 0; i < PF_SKIP_COUNT; ++i) {
 1896                 if (rule->skip[i].ptr == NULL)
 1897                         rule->skip[i].nr = -1;
 1898                 else
 1899                         rule->skip[i].nr = krule->skip[i].ptr->nr;
 1900         }
 1901 
 1902         strlcpy(rule->label, krule->label[0], sizeof(rule->label));
 1903         strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
 1904         strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
 1905         strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
 1906         strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
 1907         strlcpy(rule->match_tagname, krule->match_tagname,
 1908             sizeof(rule->match_tagname));
 1909         strlcpy(rule->overload_tblname, krule->overload_tblname,
 1910             sizeof(rule->overload_tblname));
 1911 
 1912         pf_kpool_to_pool(&krule->rpool, &rule->rpool);
 1913 
 1914         rule->evaluations = pf_counter_u64_fetch(&krule->evaluations);
 1915         for (int i = 0; i < 2; i++) {
 1916                 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]);
 1917                 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]);
 1918         }
 1919 
 1920         /* kif, anchor, overload_tbl are not copied over. */
 1921 
 1922         rule->os_fingerprint = krule->os_fingerprint;
 1923 
 1924         rule->rtableid = krule->rtableid;
 1925         bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
 1926         rule->max_states = krule->max_states;
 1927         rule->max_src_nodes = krule->max_src_nodes;
 1928         rule->max_src_states = krule->max_src_states;
 1929         rule->max_src_conn = krule->max_src_conn;
 1930         rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
 1931         rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
 1932         rule->qid = krule->qid;
 1933         rule->pqid = krule->pqid;
 1934         rule->nr = krule->nr;
 1935         rule->prob = krule->prob;
 1936         rule->cuid = krule->cuid;
 1937         rule->cpid = krule->cpid;
 1938 
 1939         rule->return_icmp = krule->return_icmp;
 1940         rule->return_icmp6 = krule->return_icmp6;
 1941         rule->max_mss = krule->max_mss;
 1942         rule->tag = krule->tag;
 1943         rule->match_tag = krule->match_tag;
 1944         rule->scrub_flags = krule->scrub_flags;
 1945 
 1946         bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
 1947         bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
 1948 
 1949         rule->rule_flag = krule->rule_flag;
 1950         rule->action = krule->action;
 1951         rule->direction = krule->direction;
 1952         rule->log = krule->log;
 1953         rule->logif = krule->logif;
 1954         rule->quick = krule->quick;
 1955         rule->ifnot = krule->ifnot;
 1956         rule->match_tag_not = krule->match_tag_not;
 1957         rule->natpass = krule->natpass;
 1958 
 1959         rule->keep_state = krule->keep_state;
 1960         rule->af = krule->af;
 1961         rule->proto = krule->proto;
 1962         rule->type = krule->type;
 1963         rule->code = krule->code;
 1964         rule->flags = krule->flags;
 1965         rule->flagset = krule->flagset;
 1966         rule->min_ttl = krule->min_ttl;
 1967         rule->allow_opts = krule->allow_opts;
 1968         rule->rt = krule->rt;
 1969         rule->return_ttl = krule->return_ttl;
 1970         rule->tos = krule->tos;
 1971         rule->set_tos = krule->set_tos;
 1972         rule->anchor_relative = krule->anchor_relative;
 1973         rule->anchor_wildcard = krule->anchor_wildcard;
 1974 
 1975         rule->flush = krule->flush;
 1976         rule->prio = krule->prio;
 1977         rule->set_prio[0] = krule->set_prio[0];
 1978         rule->set_prio[1] = krule->set_prio[1];
 1979 
 1980         bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
 1981 
 1982         rule->u_states_cur = counter_u64_fetch(krule->states_cur);
 1983         rule->u_states_tot = counter_u64_fetch(krule->states_tot);
 1984         rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
 1985 }
 1986 
 1987 static int
 1988 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
 1989 {
 1990         int ret;
 1991 
 1992 #ifndef INET
 1993         if (rule->af == AF_INET) {
 1994                 return (EAFNOSUPPORT);
 1995         }
 1996 #endif /* INET */
 1997 #ifndef INET6
 1998         if (rule->af == AF_INET6) {
 1999                 return (EAFNOSUPPORT);
 2000         }
 2001 #endif /* INET6 */
 2002 
 2003         ret = pf_check_rule_addr(&rule->src);
 2004         if (ret != 0)
 2005                 return (ret);
 2006         ret = pf_check_rule_addr(&rule->dst);
 2007         if (ret != 0)
 2008                 return (ret);
 2009 
 2010         bcopy(&rule->src, &krule->src, sizeof(rule->src));
 2011         bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
 2012 
 2013         ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
 2014         if (ret != 0)
 2015                 return (ret);
 2016         ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
 2017         if (ret != 0)
 2018                 return (ret);
 2019         ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
 2020         if (ret != 0)
 2021                 return (ret);
 2022         ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
 2023         if (ret != 0)
 2024                 return (ret);
 2025         ret = pf_user_strcpy(krule->tagname, rule->tagname,
 2026             sizeof(rule->tagname));
 2027         if (ret != 0)
 2028                 return (ret);
 2029         ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
 2030             sizeof(rule->match_tagname));
 2031         if (ret != 0)
 2032                 return (ret);
 2033         ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
 2034             sizeof(rule->overload_tblname));
 2035         if (ret != 0)
 2036                 return (ret);
 2037 
 2038         pf_pool_to_kpool(&rule->rpool, &krule->rpool);
 2039 
 2040         /* Don't allow userspace to set evaulations, packets or bytes. */
 2041         /* kif, anchor, overload_tbl are not copied over. */
 2042 
 2043         krule->os_fingerprint = rule->os_fingerprint;
 2044 
 2045         krule->rtableid = rule->rtableid;
 2046         bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
 2047         krule->max_states = rule->max_states;
 2048         krule->max_src_nodes = rule->max_src_nodes;
 2049         krule->max_src_states = rule->max_src_states;
 2050         krule->max_src_conn = rule->max_src_conn;
 2051         krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
 2052         krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
 2053         krule->qid = rule->qid;
 2054         krule->pqid = rule->pqid;
 2055         krule->nr = rule->nr;
 2056         krule->prob = rule->prob;
 2057         krule->cuid = rule->cuid;
 2058         krule->cpid = rule->cpid;
 2059 
 2060         krule->return_icmp = rule->return_icmp;
 2061         krule->return_icmp6 = rule->return_icmp6;
 2062         krule->max_mss = rule->max_mss;
 2063         krule->tag = rule->tag;
 2064         krule->match_tag = rule->match_tag;
 2065         krule->scrub_flags = rule->scrub_flags;
 2066 
 2067         bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
 2068         bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
 2069 
 2070         krule->rule_flag = rule->rule_flag;
 2071         krule->action = rule->action;
 2072         krule->direction = rule->direction;
 2073         krule->log = rule->log;
 2074         krule->logif = rule->logif;
 2075         krule->quick = rule->quick;
 2076         krule->ifnot = rule->ifnot;
 2077         krule->match_tag_not = rule->match_tag_not;
 2078         krule->natpass = rule->natpass;
 2079 
 2080         krule->keep_state = rule->keep_state;
 2081         krule->af = rule->af;
 2082         krule->proto = rule->proto;
 2083         krule->type = rule->type;
 2084         krule->code = rule->code;
 2085         krule->flags = rule->flags;
 2086         krule->flagset = rule->flagset;
 2087         krule->min_ttl = rule->min_ttl;
 2088         krule->allow_opts = rule->allow_opts;
 2089         krule->rt = rule->rt;
 2090         krule->return_ttl = rule->return_ttl;
 2091         krule->tos = rule->tos;
 2092         krule->set_tos = rule->set_tos;
 2093 
 2094         krule->flush = rule->flush;
 2095         krule->prio = rule->prio;
 2096         krule->set_prio[0] = rule->set_prio[0];
 2097         krule->set_prio[1] = rule->set_prio[1];
 2098 
 2099         bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
 2100 
 2101         return (0);
 2102 }
 2103 
 2104 static int
 2105 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk,
 2106     struct pf_kstate_kill *kill)
 2107 {
 2108         int ret;
 2109 
 2110         bzero(kill, sizeof(*kill));
 2111 
 2112         bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp));
 2113         kill->psk_af = psk->psk_af;
 2114         kill->psk_proto = psk->psk_proto;
 2115         bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src));
 2116         bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst));
 2117         ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname,
 2118             sizeof(kill->psk_ifname));
 2119         if (ret != 0)
 2120                 return (ret);
 2121         ret = pf_user_strcpy(kill->psk_label, psk->psk_label,
 2122             sizeof(kill->psk_label));
 2123         if (ret != 0)
 2124                 return (ret);
 2125 
 2126         return (0);
 2127 }
 2128 
 2129 static int
 2130 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
 2131     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
 2132     struct thread *td)
 2133 {
 2134         struct pf_kruleset      *ruleset;
 2135         struct pf_krule         *tail;
 2136         struct pf_kpooladdr     *pa;
 2137         struct pfi_kkif         *kif = NULL;
 2138         int                      rs_num;
 2139         int                      error = 0;
 2140 
 2141         if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
 2142                 error = EINVAL;
 2143                 goto errout_unlocked;
 2144         }
 2145 
 2146 #define ERROUT(x)       ERROUT_FUNCTION(errout, x)
 2147 
 2148         if (rule->ifname[0])
 2149                 kif = pf_kkif_create(M_WAITOK);
 2150         pf_counter_u64_init(&rule->evaluations, M_WAITOK);
 2151         for (int i = 0; i < 2; i++) {
 2152                 pf_counter_u64_init(&rule->packets[i], M_WAITOK);
 2153                 pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
 2154         }
 2155         rule->states_cur = counter_u64_alloc(M_WAITOK);
 2156         rule->states_tot = counter_u64_alloc(M_WAITOK);
 2157         rule->src_nodes = counter_u64_alloc(M_WAITOK);
 2158         rule->cuid = td->td_ucred->cr_ruid;
 2159         rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
 2160         TAILQ_INIT(&rule->rpool.list);
 2161 
 2162         PF_CONFIG_LOCK();
 2163         PF_RULES_WLOCK();
 2164 #ifdef PF_WANT_32_TO_64_COUNTER
 2165         LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
 2166         MPASS(!rule->allrulelinked);
 2167         rule->allrulelinked = true;
 2168         V_pf_allrulecount++;
 2169 #endif
 2170         ruleset = pf_find_kruleset(anchor);
 2171         if (ruleset == NULL)
 2172                 ERROUT(EINVAL);
 2173         rs_num = pf_get_ruleset_number(rule->action);
 2174         if (rs_num >= PF_RULESET_MAX)
 2175                 ERROUT(EINVAL);
 2176         if (ticket != ruleset->rules[rs_num].inactive.ticket) {
 2177                 DPFPRINTF(PF_DEBUG_MISC,
 2178                     ("ticket: %d != [%d]%d\n", ticket, rs_num,
 2179                     ruleset->rules[rs_num].inactive.ticket));
 2180                 ERROUT(EBUSY);
 2181         }
 2182         if (pool_ticket != V_ticket_pabuf) {
 2183                 DPFPRINTF(PF_DEBUG_MISC,
 2184                     ("pool_ticket: %d != %d\n", pool_ticket,
 2185                     V_ticket_pabuf));
 2186                 ERROUT(EBUSY);
 2187         }
 2188         /*
 2189          * XXXMJG hack: there is no mechanism to ensure they started the
 2190          * transaction. Ticket checked above may happen to match by accident,
 2191          * even if nobody called DIOCXBEGIN, let alone this process.
 2192          * Partially work around it by checking if the RB tree got allocated,
 2193          * see pf_begin_rules.
 2194          */
 2195         if (ruleset->rules[rs_num].inactive.tree == NULL) {
 2196                 ERROUT(EINVAL);
 2197         }
 2198 
 2199         tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
 2200             pf_krulequeue);
 2201         if (tail)
 2202                 rule->nr = tail->nr + 1;
 2203         else
 2204                 rule->nr = 0;
 2205         if (rule->ifname[0]) {
 2206                 rule->kif = pfi_kkif_attach(kif, rule->ifname);
 2207                 kif = NULL;
 2208                 pfi_kkif_ref(rule->kif);
 2209         } else
 2210                 rule->kif = NULL;
 2211 
 2212         if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
 2213                 error = EBUSY;
 2214 
 2215 #ifdef ALTQ
 2216         /* set queue IDs */
 2217         if (rule->qname[0] != 0) {
 2218                 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
 2219                         error = EBUSY;
 2220                 else if (rule->pqname[0] != 0) {
 2221                         if ((rule->pqid =
 2222                             pf_qname2qid(rule->pqname)) == 0)
 2223                                 error = EBUSY;
 2224                 } else
 2225                         rule->pqid = rule->qid;
 2226         }
 2227 #endif
 2228         if (rule->tagname[0])
 2229                 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
 2230                         error = EBUSY;
 2231         if (rule->match_tagname[0])
 2232                 if ((rule->match_tag =
 2233                     pf_tagname2tag(rule->match_tagname)) == 0)
 2234                         error = EBUSY;
 2235         if (rule->rt && !rule->direction)
 2236                 error = EINVAL;
 2237         if (!rule->log)
 2238                 rule->logif = 0;
 2239         if (rule->logif >= PFLOGIFS_MAX)
 2240                 error = EINVAL;
 2241         if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
 2242                 error = ENOMEM;
 2243         if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
 2244                 error = ENOMEM;
 2245         if (pf_kanchor_setup(rule, ruleset, anchor_call))
 2246                 error = EINVAL;
 2247         if (rule->scrub_flags & PFSTATE_SETPRIO &&
 2248             (rule->set_prio[0] > PF_PRIO_MAX ||
 2249             rule->set_prio[1] > PF_PRIO_MAX))
 2250                 error = EINVAL;
 2251         TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
 2252                 if (pa->addr.type == PF_ADDR_TABLE) {
 2253                         pa->addr.p.tbl = pfr_attach_table(ruleset,
 2254                             pa->addr.v.tblname);
 2255                         if (pa->addr.p.tbl == NULL)
 2256                                 error = ENOMEM;
 2257                 }
 2258 
 2259         rule->overload_tbl = NULL;
 2260         if (rule->overload_tblname[0]) {
 2261                 if ((rule->overload_tbl = pfr_attach_table(ruleset,
 2262                     rule->overload_tblname)) == NULL)
 2263                         error = EINVAL;
 2264                 else
 2265                         rule->overload_tbl->pfrkt_flags |=
 2266                             PFR_TFLAG_ACTIVE;
 2267         }
 2268 
 2269         pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
 2270         if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
 2271             (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
 2272             (rule->rt > PF_NOPFROUTE)) &&
 2273             (TAILQ_FIRST(&rule->rpool.list) == NULL))
 2274                 error = EINVAL;
 2275 
 2276         if (error) {
 2277                 pf_free_rule(rule);
 2278                 rule = NULL;
 2279                 ERROUT(error);
 2280         }
 2281 
 2282         rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
 2283         TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
 2284             rule, entries);
 2285         ruleset->rules[rs_num].inactive.rcount++;
 2286 
 2287         PF_RULES_WUNLOCK();
 2288         pf_hash_rule(rule);
 2289         if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
 2290                 PF_RULES_WLOCK();
 2291                 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
 2292                 ruleset->rules[rs_num].inactive.rcount--;
 2293                 pf_free_rule(rule);
 2294                 rule = NULL;
 2295                 ERROUT(EEXIST);
 2296         }
 2297         PF_CONFIG_UNLOCK();
 2298 
 2299         return (0);
 2300 
 2301 #undef ERROUT
 2302 errout:
 2303         PF_RULES_WUNLOCK();
 2304         PF_CONFIG_UNLOCK();
 2305 errout_unlocked:
 2306         pf_kkif_free(kif);
 2307         pf_krule_free(rule);
 2308         return (error);
 2309 }
 2310 
 2311 static bool
 2312 pf_label_match(const struct pf_krule *rule, const char *label)
 2313 {
 2314         int i = 0;
 2315 
 2316         while (*rule->label[i]) {
 2317                 if (strcmp(rule->label[i], label) == 0)
 2318                         return (true);
 2319                 i++;
 2320         }
 2321 
 2322         return (false);
 2323 }
 2324 
 2325 static unsigned int
 2326 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
 2327 {
 2328         struct pf_kstate *s;
 2329         int more = 0;
 2330 
 2331         s = pf_find_state_all(key, dir, &more);
 2332         if (s == NULL)
 2333                 return (0);
 2334 
 2335         if (more) {
 2336                 PF_STATE_UNLOCK(s);
 2337                 return (0);
 2338         }
 2339 
 2340         pf_unlink_state(s);
 2341         return (1);
 2342 }
 2343 
 2344 static int
 2345 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
 2346 {
 2347         struct pf_kstate        *s;
 2348         struct pf_state_key     *sk;
 2349         struct pf_addr          *srcaddr, *dstaddr;
 2350         struct pf_state_key_cmp  match_key;
 2351         int                      idx, killed = 0;
 2352         unsigned int             dir;
 2353         u_int16_t                srcport, dstport;
 2354         struct pfi_kkif         *kif;
 2355 
 2356 relock_DIOCKILLSTATES:
 2357         PF_HASHROW_LOCK(ih);
 2358         LIST_FOREACH(s, &ih->states, entry) {
 2359                 /* For floating states look at the original kif. */
 2360                 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
 2361 
 2362                 sk = s->key[PF_SK_WIRE];
 2363                 if (s->direction == PF_OUT) {
 2364                         srcaddr = &sk->addr[1];
 2365                         dstaddr = &sk->addr[0];
 2366                         srcport = sk->port[1];
 2367                         dstport = sk->port[0];
 2368                 } else {
 2369                         srcaddr = &sk->addr[0];
 2370                         dstaddr = &sk->addr[1];
 2371                         srcport = sk->port[0];
 2372                         dstport = sk->port[1];
 2373                 }
 2374 
 2375                 if (psk->psk_af && sk->af != psk->psk_af)
 2376                         continue;
 2377 
 2378                 if (psk->psk_proto && psk->psk_proto != sk->proto)
 2379                         continue;
 2380 
 2381                 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
 2382                     &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
 2383                         continue;
 2384 
 2385                 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
 2386                     &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
 2387                         continue;
 2388 
 2389                 if (!  PF_MATCHA(psk->psk_rt_addr.neg,
 2390                     &psk->psk_rt_addr.addr.v.a.addr,
 2391                     &psk->psk_rt_addr.addr.v.a.mask,
 2392                     &s->rt_addr, sk->af))
 2393                         continue;
 2394 
 2395                 if (psk->psk_src.port_op != 0 &&
 2396                     ! pf_match_port(psk->psk_src.port_op,
 2397                     psk->psk_src.port[0], psk->psk_src.port[1], srcport))
 2398                         continue;
 2399 
 2400                 if (psk->psk_dst.port_op != 0 &&
 2401                     ! pf_match_port(psk->psk_dst.port_op,
 2402                     psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
 2403                         continue;
 2404 
 2405                 if (psk->psk_label[0] &&
 2406                     ! pf_label_match(s->rule.ptr, psk->psk_label))
 2407                         continue;
 2408 
 2409                 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
 2410                     kif->pfik_name))
 2411                         continue;
 2412 
 2413                 if (psk->psk_kill_match) {
 2414                         /* Create the key to find matching states, with lock
 2415                          * held. */
 2416 
 2417                         bzero(&match_key, sizeof(match_key));
 2418 
 2419                         if (s->direction == PF_OUT) {
 2420                                 dir = PF_IN;
 2421                                 idx = PF_SK_STACK;
 2422                         } else {
 2423                                 dir = PF_OUT;
 2424                                 idx = PF_SK_WIRE;
 2425                         }
 2426 
 2427                         match_key.af = s->key[idx]->af;
 2428                         match_key.proto = s->key[idx]->proto;
 2429                         PF_ACPY(&match_key.addr[0],
 2430                             &s->key[idx]->addr[1], match_key.af);
 2431                         match_key.port[0] = s->key[idx]->port[1];
 2432                         PF_ACPY(&match_key.addr[1],
 2433                             &s->key[idx]->addr[0], match_key.af);
 2434                         match_key.port[1] = s->key[idx]->port[0];
 2435                 }
 2436 
 2437                 pf_unlink_state(s);
 2438                 killed++;
 2439 
 2440                 if (psk->psk_kill_match)
 2441                         killed += pf_kill_matching_state(&match_key, dir);
 2442 
 2443                 goto relock_DIOCKILLSTATES;
 2444         }
 2445         PF_HASHROW_UNLOCK(ih);
 2446 
 2447         return (killed);
 2448 }
 2449 
 2450 static int
 2451 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
 2452 {
 2453         int                      error = 0;
 2454         PF_RULES_RLOCK_TRACKER;
 2455 
 2456 #define ERROUT_IOCTL(target, x)                                 \
 2457     do {                                                                \
 2458             error = (x);                                                \
 2459             SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);  \
 2460             goto target;                                                \
 2461     } while (0)
 2462 
 2463 
 2464         /* XXX keep in sync with switch() below */
 2465         if (securelevel_gt(td->td_ucred, 2))
 2466                 switch (cmd) {
 2467                 case DIOCGETRULES:
 2468                 case DIOCGETRULE:
 2469                 case DIOCGETRULENV:
 2470                 case DIOCGETADDRS:
 2471                 case DIOCGETADDR:
 2472                 case DIOCGETSTATE:
 2473                 case DIOCGETSTATENV:
 2474                 case DIOCSETSTATUSIF:
 2475                 case DIOCGETSTATUS:
 2476                 case DIOCGETSTATUSNV:
 2477                 case DIOCCLRSTATUS:
 2478                 case DIOCNATLOOK:
 2479                 case DIOCSETDEBUG:
 2480                 case DIOCGETSTATES:
 2481                 case DIOCGETSTATESV2:
 2482                 case DIOCGETTIMEOUT:
 2483                 case DIOCCLRRULECTRS:
 2484                 case DIOCGETLIMIT:
 2485                 case DIOCGETALTQSV0:
 2486                 case DIOCGETALTQSV1:
 2487                 case DIOCGETALTQV0:
 2488                 case DIOCGETALTQV1:
 2489                 case DIOCGETQSTATSV0:
 2490                 case DIOCGETQSTATSV1:
 2491                 case DIOCGETRULESETS:
 2492                 case DIOCGETRULESET:
 2493                 case DIOCRGETTABLES:
 2494                 case DIOCRGETTSTATS:
 2495                 case DIOCRCLRTSTATS:
 2496                 case DIOCRCLRADDRS:
 2497                 case DIOCRADDADDRS:
 2498                 case DIOCRDELADDRS:
 2499                 case DIOCRSETADDRS:
 2500                 case DIOCRGETADDRS:
 2501                 case DIOCRGETASTATS:
 2502                 case DIOCRCLRASTATS:
 2503                 case DIOCRTSTADDRS:
 2504                 case DIOCOSFPGET:
 2505                 case DIOCGETSRCNODES:
 2506                 case DIOCCLRSRCNODES:
 2507                 case DIOCGETSYNCOOKIES:
 2508                 case DIOCIGETIFACES:
 2509                 case DIOCGIFSPEEDV0:
 2510                 case DIOCGIFSPEEDV1:
 2511                 case DIOCSETIFFLAG:
 2512                 case DIOCCLRIFFLAG:
 2513                 case DIOCGETETHRULES:
 2514                 case DIOCGETETHRULE:
 2515                 case DIOCGETETHRULESETS:
 2516                 case DIOCGETETHRULESET:
 2517                         break;
 2518                 case DIOCRCLRTABLES:
 2519                 case DIOCRADDTABLES:
 2520                 case DIOCRDELTABLES:
 2521                 case DIOCRSETTFLAGS:
 2522                         if (((struct pfioc_table *)addr)->pfrio_flags &
 2523                             PFR_FLAG_DUMMY)
 2524                                 break; /* dummy operation ok */
 2525                         return (EPERM);
 2526                 default:
 2527                         return (EPERM);
 2528                 }
 2529 
 2530         if (!(flags & FWRITE))
 2531                 switch (cmd) {
 2532                 case DIOCGETRULES:
 2533                 case DIOCGETADDRS:
 2534                 case DIOCGETADDR:
 2535                 case DIOCGETSTATE:
 2536                 case DIOCGETSTATENV:
 2537                 case DIOCGETSTATUS:
 2538                 case DIOCGETSTATUSNV:
 2539                 case DIOCGETSTATES:
 2540                 case DIOCGETSTATESV2:
 2541                 case DIOCGETTIMEOUT:
 2542                 case DIOCGETLIMIT:
 2543                 case DIOCGETALTQSV0:
 2544                 case DIOCGETALTQSV1:
 2545                 case DIOCGETALTQV0:
 2546                 case DIOCGETALTQV1:
 2547                 case DIOCGETQSTATSV0:
 2548                 case DIOCGETQSTATSV1:
 2549                 case DIOCGETRULESETS:
 2550                 case DIOCGETRULESET:
 2551                 case DIOCNATLOOK:
 2552                 case DIOCRGETTABLES:
 2553                 case DIOCRGETTSTATS:
 2554                 case DIOCRGETADDRS:
 2555                 case DIOCRGETASTATS:
 2556                 case DIOCRTSTADDRS:
 2557                 case DIOCOSFPGET:
 2558                 case DIOCGETSRCNODES:
 2559                 case DIOCGETSYNCOOKIES:
 2560                 case DIOCIGETIFACES:
 2561                 case DIOCGIFSPEEDV1:
 2562                 case DIOCGIFSPEEDV0:
 2563                 case DIOCGETRULENV:
 2564                 case DIOCGETETHRULES:
 2565                 case DIOCGETETHRULE:
 2566                 case DIOCGETETHRULESETS:
 2567                 case DIOCGETETHRULESET:
 2568                         break;
 2569                 case DIOCRCLRTABLES:
 2570                 case DIOCRADDTABLES:
 2571                 case DIOCRDELTABLES:
 2572                 case DIOCRCLRTSTATS:
 2573                 case DIOCRCLRADDRS:
 2574                 case DIOCRADDADDRS:
 2575                 case DIOCRDELADDRS:
 2576                 case DIOCRSETADDRS:
 2577                 case DIOCRSETTFLAGS:
 2578                         if (((struct pfioc_table *)addr)->pfrio_flags &
 2579                             PFR_FLAG_DUMMY) {
 2580                                 flags |= FWRITE; /* need write lock for dummy */
 2581                                 break; /* dummy operation ok */
 2582                         }
 2583                         return (EACCES);
 2584                 case DIOCGETRULE:
 2585                         if (((struct pfioc_rule *)addr)->action ==
 2586                             PF_GET_CLR_CNTR)
 2587                                 return (EACCES);
 2588                         break;
 2589                 default:
 2590                         return (EACCES);
 2591                 }
 2592 
 2593         CURVNET_SET(TD_TO_VNET(td));
 2594 
 2595         switch (cmd) {
 2596         case DIOCSTART:
 2597                 sx_xlock(&pf_ioctl_lock);
 2598                 if (V_pf_status.running)
 2599                         error = EEXIST;
 2600                 else {
 2601                         hook_pf();
 2602                         if (! TAILQ_EMPTY(V_pf_keth->active.rules))
 2603                                 hook_pf_eth();
 2604                         V_pf_status.running = 1;
 2605                         V_pf_status.since = time_second;
 2606                         new_unrhdr64(&V_pf_stateid, time_second);
 2607 
 2608                         DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
 2609                 }
 2610                 break;
 2611 
 2612         case DIOCSTOP:
 2613                 sx_xlock(&pf_ioctl_lock);
 2614                 if (!V_pf_status.running)
 2615                         error = ENOENT;
 2616                 else {
 2617                         V_pf_status.running = 0;
 2618                         dehook_pf();
 2619                         dehook_pf_eth();
 2620                         V_pf_status.since = time_second;
 2621                         DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
 2622                 }
 2623                 break;
 2624 
 2625         case DIOCGETETHRULES: {
 2626                 struct pfioc_nv         *nv = (struct pfioc_nv *)addr;
 2627                 nvlist_t                *nvl;
 2628                 void                    *packed;
 2629                 struct pf_keth_rule     *tail;
 2630                 struct pf_keth_ruleset  *rs;
 2631                 u_int32_t                ticket, nr;
 2632                 const char              *anchor = "";
 2633 
 2634                 nvl = NULL;
 2635                 packed = NULL;
 2636 
 2637 #define ERROUT(x)       ERROUT_IOCTL(DIOCGETETHRULES_error, x)
 2638 
 2639                 if (nv->len > pf_ioctl_maxcount)
 2640                         ERROUT(ENOMEM);
 2641 
 2642                 /* Copy the request in */
 2643                 packed = malloc(nv->len, M_NVLIST, M_WAITOK);
 2644                 if (packed == NULL)
 2645                         ERROUT(ENOMEM);
 2646 
 2647                 error = copyin(nv->data, packed, nv->len);
 2648                 if (error)
 2649                         ERROUT(error);
 2650 
 2651                 nvl = nvlist_unpack(packed, nv->len, 0);
 2652                 if (nvl == NULL)
 2653                         ERROUT(EBADMSG);
 2654 
 2655                 if (! nvlist_exists_string(nvl, "anchor"))
 2656                         ERROUT(EBADMSG);
 2657 
 2658                 anchor = nvlist_get_string(nvl, "anchor");
 2659 
 2660                 rs = pf_find_keth_ruleset(anchor);
 2661 
 2662                 nvlist_destroy(nvl);
 2663                 nvl = NULL;
 2664                 free(packed, M_NVLIST);
 2665                 packed = NULL;
 2666 
 2667                 if (rs == NULL)
 2668                         ERROUT(ENOENT);
 2669 
 2670                 /* Reply */
 2671                 nvl = nvlist_create(0);
 2672                 if (nvl == NULL)
 2673                         ERROUT(ENOMEM);
 2674 
 2675                 PF_RULES_RLOCK();
 2676 
 2677                 ticket = rs->active.ticket;
 2678                 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
 2679                 if (tail)
 2680                         nr = tail->nr + 1;
 2681                 else
 2682                         nr = 0;
 2683 
 2684                 PF_RULES_RUNLOCK();
 2685 
 2686                 nvlist_add_number(nvl, "ticket", ticket);
 2687                 nvlist_add_number(nvl, "nr", nr);
 2688 
 2689                 packed = nvlist_pack(nvl, &nv->len);
 2690                 if (packed == NULL)
 2691                         ERROUT(ENOMEM);
 2692 
 2693                 if (nv->size == 0)
 2694                         ERROUT(0);
 2695                 else if (nv->size < nv->len)
 2696                         ERROUT(ENOSPC);
 2697 
 2698                 error = copyout(packed, nv->data, nv->len);
 2699 
 2700 #undef ERROUT
 2701 DIOCGETETHRULES_error:
 2702                 free(packed, M_NVLIST);
 2703                 nvlist_destroy(nvl);
 2704                 break;
 2705         }
 2706 
 2707         case DIOCGETETHRULE: {
 2708                 struct epoch_tracker     et;
 2709                 struct pfioc_nv         *nv = (struct pfioc_nv *)addr;
 2710                 nvlist_t                *nvl = NULL;
 2711                 void                    *nvlpacked = NULL;
 2712                 struct pf_keth_rule     *rule = NULL;
 2713                 struct pf_keth_ruleset  *rs;
 2714                 u_int32_t                ticket, nr;
 2715                 bool                     clear = false;
 2716                 const char              *anchor;
 2717 
 2718 #define ERROUT(x)       ERROUT_IOCTL(DIOCGETETHRULE_error, x)
 2719 
 2720                 if (nv->len > pf_ioctl_maxcount)
 2721                         ERROUT(ENOMEM);
 2722 
 2723                 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 2724                 if (nvlpacked == NULL)
 2725                         ERROUT(ENOMEM);
 2726 
 2727                 error = copyin(nv->data, nvlpacked, nv->len);
 2728                 if (error)
 2729                         ERROUT(error);
 2730 
 2731                 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 2732                 if (nvl == NULL)
 2733                         ERROUT(EBADMSG);
 2734                 if (! nvlist_exists_number(nvl, "ticket"))
 2735                         ERROUT(EBADMSG);
 2736                 ticket = nvlist_get_number(nvl, "ticket");
 2737                 if (! nvlist_exists_string(nvl, "anchor"))
 2738                         ERROUT(EBADMSG);
 2739                 anchor = nvlist_get_string(nvl, "anchor");
 2740 
 2741                 if (nvlist_exists_bool(nvl, "clear"))
 2742                         clear = nvlist_get_bool(nvl, "clear");
 2743 
 2744                 if (clear && !(flags & FWRITE))
 2745                         ERROUT(EACCES);
 2746 
 2747                 if (! nvlist_exists_number(nvl, "nr"))
 2748                         ERROUT(EBADMSG);
 2749                 nr = nvlist_get_number(nvl, "nr");
 2750 
 2751                 PF_RULES_RLOCK();
 2752                 rs = pf_find_keth_ruleset(anchor);
 2753                 if (rs == NULL) {
 2754                         PF_RULES_RUNLOCK();
 2755                         ERROUT(ENOENT);
 2756                 }
 2757                 if (ticket != rs->active.ticket) {
 2758                         PF_RULES_RUNLOCK();
 2759                         ERROUT(EBUSY);
 2760                 }
 2761 
 2762                 nvlist_destroy(nvl);
 2763                 nvl = NULL;
 2764                 free(nvlpacked, M_NVLIST);
 2765                 nvlpacked = NULL;
 2766 
 2767                 rule = TAILQ_FIRST(rs->active.rules);
 2768                 while ((rule != NULL) && (rule->nr != nr))
 2769                         rule = TAILQ_NEXT(rule, entries);
 2770                 if (rule == NULL) {
 2771                         PF_RULES_RUNLOCK();
 2772                         ERROUT(ENOENT);
 2773                 }
 2774                 /* Make sure rule can't go away. */
 2775                 NET_EPOCH_ENTER(et);
 2776                 PF_RULES_RUNLOCK();
 2777                 nvl = pf_keth_rule_to_nveth_rule(rule);
 2778                 if (pf_keth_anchor_nvcopyout(rs, rule, nvl))
 2779                         ERROUT(EBUSY);
 2780                 NET_EPOCH_EXIT(et);
 2781                 if (nvl == NULL)
 2782                         ERROUT(ENOMEM);
 2783 
 2784                 nvlpacked = nvlist_pack(nvl, &nv->len);
 2785                 if (nvlpacked == NULL)
 2786                         ERROUT(ENOMEM);
 2787 
 2788                 if (nv->size == 0)
 2789                         ERROUT(0);
 2790                 else if (nv->size < nv->len)
 2791                         ERROUT(ENOSPC);
 2792 
 2793                 error = copyout(nvlpacked, nv->data, nv->len);
 2794                 if (error == 0 && clear) {
 2795                         counter_u64_zero(rule->evaluations);
 2796                         for (int i = 0; i < 2; i++) {
 2797                                 counter_u64_zero(rule->packets[i]);
 2798                                 counter_u64_zero(rule->bytes[i]);
 2799                         }
 2800                 }
 2801 
 2802 #undef ERROUT
 2803 DIOCGETETHRULE_error:
 2804                 free(nvlpacked, M_NVLIST);
 2805                 nvlist_destroy(nvl);
 2806                 break;
 2807         }
 2808 
 2809         case DIOCADDETHRULE: {
 2810                 struct pfioc_nv         *nv = (struct pfioc_nv *)addr;
 2811                 nvlist_t                *nvl = NULL;
 2812                 void                    *nvlpacked = NULL;
 2813                 struct pf_keth_rule     *rule = NULL, *tail = NULL;
 2814                 struct pf_keth_ruleset  *ruleset = NULL;
 2815                 struct pfi_kkif         *kif = NULL, *bridge_to_kif = NULL;
 2816                 const char              *anchor = "", *anchor_call = "";
 2817 
 2818 #define ERROUT(x)       ERROUT_IOCTL(DIOCADDETHRULE_error, x)
 2819 
 2820                 if (nv->len > pf_ioctl_maxcount)
 2821                         ERROUT(ENOMEM);
 2822 
 2823                 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 2824                 if (nvlpacked == NULL)
 2825                         ERROUT(ENOMEM);
 2826 
 2827                 error = copyin(nv->data, nvlpacked, nv->len);
 2828                 if (error)
 2829                         ERROUT(error);
 2830 
 2831                 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 2832                 if (nvl == NULL)
 2833                         ERROUT(EBADMSG);
 2834 
 2835                 if (! nvlist_exists_number(nvl, "ticket"))
 2836                         ERROUT(EBADMSG);
 2837 
 2838                 if (nvlist_exists_string(nvl, "anchor"))
 2839                         anchor = nvlist_get_string(nvl, "anchor");
 2840                 if (nvlist_exists_string(nvl, "anchor_call"))
 2841                         anchor_call = nvlist_get_string(nvl, "anchor_call");
 2842 
 2843                 ruleset = pf_find_keth_ruleset(anchor);
 2844                 if (ruleset == NULL)
 2845                         ERROUT(EINVAL);
 2846 
 2847                 if (nvlist_get_number(nvl, "ticket") !=
 2848                     ruleset->inactive.ticket) {
 2849                         DPFPRINTF(PF_DEBUG_MISC,
 2850                             ("ticket: %d != %d\n",
 2851                             (u_int32_t)nvlist_get_number(nvl, "ticket"),
 2852                             ruleset->inactive.ticket));
 2853                         ERROUT(EBUSY);
 2854                 }
 2855 
 2856                 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
 2857                 if (rule == NULL)
 2858                         ERROUT(ENOMEM);
 2859                 rule->timestamp = NULL;
 2860 
 2861                 error = pf_nveth_rule_to_keth_rule(nvl, rule);
 2862                 if (error != 0)
 2863                         ERROUT(error);
 2864 
 2865                 if (rule->ifname[0])
 2866                         kif = pf_kkif_create(M_WAITOK);
 2867                 if (rule->bridge_to_name[0])
 2868                         bridge_to_kif = pf_kkif_create(M_WAITOK);
 2869                 rule->evaluations = counter_u64_alloc(M_WAITOK);
 2870                 for (int i = 0; i < 2; i++) {
 2871                         rule->packets[i] = counter_u64_alloc(M_WAITOK);
 2872                         rule->bytes[i] = counter_u64_alloc(M_WAITOK);
 2873                 }
 2874                 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
 2875                     M_WAITOK | M_ZERO);
 2876 
 2877                 PF_RULES_WLOCK();
 2878 
 2879                 if (rule->ifname[0]) {
 2880                         rule->kif = pfi_kkif_attach(kif, rule->ifname);
 2881                         pfi_kkif_ref(rule->kif);
 2882                 } else
 2883                         rule->kif = NULL;
 2884                 if (rule->bridge_to_name[0]) {
 2885                         rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
 2886                             rule->bridge_to_name);
 2887                         pfi_kkif_ref(rule->bridge_to);
 2888                 } else
 2889                         rule->bridge_to = NULL;
 2890 
 2891 #ifdef ALTQ
 2892                 /* set queue IDs */
 2893                 if (rule->qname[0] != 0) {
 2894                         if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
 2895                                 error = EBUSY;
 2896                         else
 2897                                 rule->qid = rule->qid;
 2898                 }
 2899 #endif
 2900                 if (rule->tagname[0])
 2901                         if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
 2902                                 error = EBUSY;
 2903                 if (rule->match_tagname[0])
 2904                         if ((rule->match_tag = pf_tagname2tag(
 2905                             rule->match_tagname)) == 0)
 2906                                 error = EBUSY;
 2907 
 2908                 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
 2909                         error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
 2910                 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
 2911                         error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
 2912 
 2913                 if (error) {
 2914                         pf_free_eth_rule(rule);
 2915                         PF_RULES_WUNLOCK();
 2916                         ERROUT(error);
 2917                 }
 2918 
 2919                 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
 2920                         pf_free_eth_rule(rule);
 2921                         PF_RULES_WUNLOCK();
 2922                         ERROUT(EINVAL);
 2923                 }
 2924 
 2925                 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
 2926                 if (tail)
 2927                         rule->nr = tail->nr + 1;
 2928                 else
 2929                         rule->nr = 0;
 2930 
 2931                 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
 2932 
 2933                 PF_RULES_WUNLOCK();
 2934 
 2935 #undef ERROUT
 2936 DIOCADDETHRULE_error:
 2937                 nvlist_destroy(nvl);
 2938                 free(nvlpacked, M_NVLIST);
 2939                 break;
 2940         }
 2941 
 2942         case DIOCGETETHRULESETS: {
 2943                 struct epoch_tracker     et;
 2944                 struct pfioc_nv         *nv = (struct pfioc_nv *)addr;
 2945                 nvlist_t                *nvl = NULL;
 2946                 void                    *nvlpacked = NULL;
 2947                 struct pf_keth_ruleset  *ruleset;
 2948                 struct pf_keth_anchor   *anchor;
 2949                 int                      nr = 0;
 2950 
 2951 #define ERROUT(x)       ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
 2952 
 2953                 if (nv->len > pf_ioctl_maxcount)
 2954                         ERROUT(ENOMEM);
 2955 
 2956                 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 2957                 if (nvlpacked == NULL)
 2958                         ERROUT(ENOMEM);
 2959 
 2960                 error = copyin(nv->data, nvlpacked, nv->len);
 2961                 if (error)
 2962                         ERROUT(error);
 2963 
 2964                 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 2965                 if (nvl == NULL)
 2966                         ERROUT(EBADMSG);
 2967                 if (! nvlist_exists_string(nvl, "path"))
 2968                         ERROUT(EBADMSG);
 2969 
 2970                 NET_EPOCH_ENTER(et);
 2971 
 2972                 if ((ruleset = pf_find_keth_ruleset(
 2973                     nvlist_get_string(nvl, "path"))) == NULL) {
 2974                         NET_EPOCH_EXIT(et);
 2975                         ERROUT(ENOENT);
 2976                 }
 2977 
 2978                 if (ruleset->anchor == NULL) {
 2979                         RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
 2980                                 if (anchor->parent == NULL)
 2981                                         nr++;
 2982                 } else {
 2983                         RB_FOREACH(anchor, pf_keth_anchor_node,
 2984                             &ruleset->anchor->children)
 2985                                 nr++;
 2986                 }
 2987 
 2988                 NET_EPOCH_EXIT(et);
 2989 
 2990                 nvlist_destroy(nvl);
 2991                 nvl = NULL;
 2992                 free(nvlpacked, M_NVLIST);
 2993                 nvlpacked = NULL;
 2994 
 2995                 nvl = nvlist_create(0);
 2996                 if (nvl == NULL)
 2997                         ERROUT(ENOMEM);
 2998 
 2999                 nvlist_add_number(nvl, "nr", nr);
 3000 
 3001                 nvlpacked = nvlist_pack(nvl, &nv->len);
 3002                 if (nvlpacked == NULL)
 3003                         ERROUT(ENOMEM);
 3004 
 3005                 if (nv->size == 0)
 3006                         ERROUT(0);
 3007                 else if (nv->size < nv->len)
 3008                         ERROUT(ENOSPC);
 3009 
 3010                 error = copyout(nvlpacked, nv->data, nv->len);
 3011 
 3012 #undef ERROUT
 3013 DIOCGETETHRULESETS_error:
 3014                 free(nvlpacked, M_NVLIST);
 3015                 nvlist_destroy(nvl);
 3016                 break;
 3017         }
 3018 
 3019         case DIOCGETETHRULESET: {
 3020                 struct epoch_tracker     et;
 3021                 struct pfioc_nv         *nv = (struct pfioc_nv *)addr;
 3022                 nvlist_t                *nvl = NULL;
 3023                 void                    *nvlpacked = NULL;
 3024                 struct pf_keth_ruleset  *ruleset;
 3025                 struct pf_keth_anchor   *anchor;
 3026                 int                      nr = 0, req_nr = 0;
 3027                 bool                     found = false;
 3028 
 3029 #define ERROUT(x)       ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
 3030 
 3031                 if (nv->len > pf_ioctl_maxcount)
 3032                         ERROUT(ENOMEM);
 3033 
 3034                 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 3035                 if (nvlpacked == NULL)
 3036                         ERROUT(ENOMEM);
 3037 
 3038                 error = copyin(nv->data, nvlpacked, nv->len);
 3039                 if (error)
 3040                         ERROUT(error);
 3041 
 3042                 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 3043                 if (nvl == NULL)
 3044                         ERROUT(EBADMSG);
 3045                 if (! nvlist_exists_string(nvl, "path"))
 3046                         ERROUT(EBADMSG);
 3047                 if (! nvlist_exists_number(nvl, "nr"))
 3048                         ERROUT(EBADMSG);
 3049 
 3050                 req_nr = nvlist_get_number(nvl, "nr");
 3051 
 3052                 NET_EPOCH_ENTER(et);
 3053 
 3054                 if ((ruleset = pf_find_keth_ruleset(
 3055                     nvlist_get_string(nvl, "path"))) == NULL) {
 3056                         NET_EPOCH_EXIT(et);
 3057                         ERROUT(ENOENT);
 3058                 }
 3059 
 3060                 nvlist_destroy(nvl);
 3061                 nvl = NULL;
 3062                 free(nvlpacked, M_NVLIST);
 3063                 nvlpacked = NULL;
 3064 
 3065                 nvl = nvlist_create(0);
 3066                 if (nvl == NULL) {
 3067                         NET_EPOCH_EXIT(et);
 3068                         ERROUT(ENOMEM);
 3069                 }
 3070 
 3071                 if (ruleset->anchor == NULL) {
 3072                         RB_FOREACH(anchor, pf_keth_anchor_global,
 3073                             &V_pf_keth_anchors) {
 3074                                 if (anchor->parent == NULL && nr++ == req_nr) {
 3075                                         found = true;
 3076                                         break;
 3077                                 }
 3078                         }
 3079                 } else {
 3080                         RB_FOREACH(anchor, pf_keth_anchor_node,
 3081                              &ruleset->anchor->children) {
 3082                                 if (nr++ == req_nr) {
 3083                                         found = true;
 3084                                         break;
 3085                                 }
 3086                         }
 3087                 }
 3088 
 3089                 NET_EPOCH_EXIT(et);
 3090                 if (found) {
 3091                         nvlist_add_number(nvl, "nr", nr);
 3092                         nvlist_add_string(nvl, "name", anchor->name);
 3093                         if (ruleset->anchor)
 3094                                 nvlist_add_string(nvl, "path",
 3095                                     ruleset->anchor->path);
 3096                         else
 3097                                 nvlist_add_string(nvl, "path", "");
 3098                 } else {
 3099                         ERROUT(EBUSY);
 3100                 }
 3101 
 3102                 nvlpacked = nvlist_pack(nvl, &nv->len);
 3103                 if (nvlpacked == NULL)
 3104                         ERROUT(ENOMEM);
 3105 
 3106                 if (nv->size == 0)
 3107                         ERROUT(0);
 3108                 else if (nv->size < nv->len)
 3109                         ERROUT(ENOSPC);
 3110 
 3111                 error = copyout(nvlpacked, nv->data, nv->len);
 3112 
 3113 #undef ERROUT
 3114 DIOCGETETHRULESET_error:
 3115                 free(nvlpacked, M_NVLIST);
 3116                 nvlist_destroy(nvl);
 3117                 break;
 3118         }
 3119 
 3120         case DIOCADDRULENV: {
 3121                 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
 3122                 nvlist_t        *nvl = NULL;
 3123                 void            *nvlpacked = NULL;
 3124                 struct pf_krule *rule = NULL;
 3125                 const char      *anchor = "", *anchor_call = "";
 3126                 uint32_t         ticket = 0, pool_ticket = 0;
 3127 
 3128 #define ERROUT(x)       ERROUT_IOCTL(DIOCADDRULENV_error, x)
 3129 
 3130                 if (nv->len > pf_ioctl_maxcount)
 3131                         ERROUT(ENOMEM);
 3132 
 3133                 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 3134                 error = copyin(nv->data, nvlpacked, nv->len);
 3135                 if (error)
 3136                         ERROUT(error);
 3137 
 3138                 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 3139                 if (nvl == NULL)
 3140                         ERROUT(EBADMSG);
 3141 
 3142                 if (! nvlist_exists_number(nvl, "ticket"))
 3143                         ERROUT(EINVAL);
 3144                 ticket = nvlist_get_number(nvl, "ticket");
 3145 
 3146                 if (! nvlist_exists_number(nvl, "pool_ticket"))
 3147                         ERROUT(EINVAL);
 3148                 pool_ticket = nvlist_get_number(nvl, "pool_ticket");
 3149 
 3150                 if (! nvlist_exists_nvlist(nvl, "rule"))
 3151                         ERROUT(EINVAL);
 3152 
 3153                 rule = pf_krule_alloc();
 3154                 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
 3155                     rule);
 3156                 if (error)
 3157                         ERROUT(error);
 3158 
 3159                 if (nvlist_exists_string(nvl, "anchor"))
 3160                         anchor = nvlist_get_string(nvl, "anchor");
 3161                 if (nvlist_exists_string(nvl, "anchor_call"))
 3162                         anchor_call = nvlist_get_string(nvl, "anchor_call");
 3163 
 3164                 if ((error = nvlist_error(nvl)))
 3165                         ERROUT(error);
 3166 
 3167                 /* Frees rule on error */
 3168                 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
 3169                     anchor_call, td);
 3170 
 3171                 nvlist_destroy(nvl);
 3172                 free(nvlpacked, M_NVLIST);
 3173                 break;
 3174 #undef ERROUT
 3175 DIOCADDRULENV_error:
 3176                 pf_krule_free(rule);
 3177                 nvlist_destroy(nvl);
 3178                 free(nvlpacked, M_NVLIST);
 3179 
 3180                 break;
 3181         }
 3182         case DIOCADDRULE: {
 3183                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
 3184                 struct pf_krule         *rule;
 3185 
 3186                 rule = pf_krule_alloc();
 3187                 error = pf_rule_to_krule(&pr->rule, rule);
 3188                 if (error != 0) {
 3189                         pf_krule_free(rule);
 3190                         break;
 3191                 }
 3192 
 3193                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
 3194 
 3195                 /* Frees rule on error */
 3196                 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
 3197                     pr->anchor, pr->anchor_call, td);
 3198                 break;
 3199         }
 3200 
 3201         case DIOCGETRULES: {
 3202                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
 3203                 struct pf_kruleset      *ruleset;
 3204                 struct pf_krule         *tail;
 3205                 int                      rs_num;
 3206 
 3207                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
 3208 
 3209                 PF_RULES_WLOCK();
 3210                 ruleset = pf_find_kruleset(pr->anchor);
 3211                 if (ruleset == NULL) {
 3212                         PF_RULES_WUNLOCK();
 3213                         error = EINVAL;
 3214                         break;
 3215                 }
 3216                 rs_num = pf_get_ruleset_number(pr->rule.action);
 3217                 if (rs_num >= PF_RULESET_MAX) {
 3218                         PF_RULES_WUNLOCK();
 3219                         error = EINVAL;
 3220                         break;
 3221                 }
 3222                 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
 3223                     pf_krulequeue);
 3224                 if (tail)
 3225                         pr->nr = tail->nr + 1;
 3226                 else
 3227                         pr->nr = 0;
 3228                 pr->ticket = ruleset->rules[rs_num].active.ticket;
 3229                 PF_RULES_WUNLOCK();
 3230                 break;
 3231         }
 3232 
 3233         case DIOCGETRULE: {
 3234                 struct pfioc_rule       *pr = (struct pfioc_rule *)addr;
 3235                 struct pf_kruleset      *ruleset;
 3236                 struct pf_krule         *rule;
 3237                 int                      rs_num;
 3238 
 3239                 pr->anchor[sizeof(pr->anchor) - 1] = 0;
 3240 
 3241                 PF_RULES_WLOCK();
 3242                 ruleset = pf_find_kruleset(pr->anchor);
 3243                 if (ruleset == NULL) {
 3244                         PF_RULES_WUNLOCK();
 3245                         error = EINVAL;
 3246                         break;
 3247                 }
 3248                 rs_num = pf_get_ruleset_number(pr->rule.action);
 3249                 if (rs_num >= PF_RULESET_MAX) {
 3250                         PF_RULES_WUNLOCK();
 3251                         error = EINVAL;
 3252                         break;
 3253                 }
 3254                 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
 3255                         PF_RULES_WUNLOCK();
 3256                         error = EBUSY;
 3257                         break;
 3258                 }
 3259                 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
 3260                 while ((rule != NULL) && (rule->nr != pr->nr))
 3261                         rule = TAILQ_NEXT(rule, entries);
 3262                 if (rule == NULL) {
 3263                         PF_RULES_WUNLOCK();
 3264                         error = EBUSY;
 3265                         break;
 3266                 }
 3267 
 3268                 pf_krule_to_rule(rule, &pr->rule);
 3269 
 3270                 if (pf_kanchor_copyout(ruleset, rule, pr)) {
 3271                         PF_RULES_WUNLOCK();
 3272                         error = EBUSY;
 3273                         break;
 3274                 }
 3275                 pf_addr_copyout(&pr->rule.src.addr);
 3276                 pf_addr_copyout(&pr->rule.dst.addr);
 3277 
 3278                 if (pr->action == PF_GET_CLR_CNTR) {
 3279                         pf_counter_u64_zero(&rule->evaluations);
 3280                         for (int i = 0; i < 2; i++) {
 3281                                 pf_counter_u64_zero(&rule->packets[i]);
 3282                                 pf_counter_u64_zero(&rule->bytes[i]);
 3283                         }
 3284                         counter_u64_zero(rule->states_tot);
 3285                 }
 3286                 PF_RULES_WUNLOCK();
 3287                 break;
 3288         }
 3289 
 3290         case DIOCGETRULENV: {
 3291                 struct pfioc_nv         *nv = (struct pfioc_nv *)addr;
 3292                 nvlist_t                *nvrule = NULL;
 3293                 nvlist_t                *nvl = NULL;
 3294                 struct pf_kruleset      *ruleset;
 3295                 struct pf_krule         *rule;
 3296                 void                    *nvlpacked = NULL;
 3297                 int                      rs_num, nr;
 3298                 bool                     clear_counter = false;
 3299 
 3300 #define ERROUT(x)       ERROUT_IOCTL(DIOCGETRULENV_error, x)
 3301 
 3302                 if (nv->len > pf_ioctl_maxcount)
 3303                         ERROUT(ENOMEM);
 3304 
 3305                 /* Copy the request in */
 3306                 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 3307                 if (nvlpacked == NULL)
 3308                         ERROUT(ENOMEM);
 3309 
 3310                 error = copyin(nv->data, nvlpacked, nv->len);
 3311                 if (error)
 3312                         ERROUT(error);
 3313 
 3314                 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 3315                 if (nvl == NULL)
 3316                         ERROUT(EBADMSG);
 3317 
 3318                 if (! nvlist_exists_string(nvl, "anchor"))
 3319                         ERROUT(EBADMSG);
 3320                 if (! nvlist_exists_number(nvl, "ruleset"))
 3321                         ERROUT(EBADMSG);
 3322                 if (! nvlist_exists_number(nvl, "ticket"))
 3323                         ERROUT(EBADMSG);
 3324                 if (! nvlist_exists_number(nvl, "nr"))
 3325                         ERROUT(EBADMSG);
 3326 
 3327                 if (nvlist_exists_bool(nvl, "clear_counter"))
 3328                         clear_counter = nvlist_get_bool(nvl, "clear_counter");
 3329 
 3330                 if (clear_counter && !(flags & FWRITE))
 3331                         ERROUT(EACCES);
 3332 
 3333                 nr = nvlist_get_number(nvl, "nr");
 3334 
 3335                 PF_RULES_WLOCK();
 3336                 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
 3337                 if (ruleset == NULL) {
 3338                         PF_RULES_WUNLOCK();
 3339                         ERROUT(ENOENT);
 3340                 }
 3341 
 3342                 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
 3343                 if (rs_num >= PF_RULESET_MAX) {
 3344                         PF_RULES_WUNLOCK();
 3345                         ERROUT(EINVAL);
 3346                 }
 3347 
 3348                 if (nvlist_get_number(nvl, "ticket") !=
 3349                     ruleset->rules[rs_num].active.ticket) {
 3350                         PF_RULES_WUNLOCK();
 3351                         ERROUT(EBUSY);
 3352                 }
 3353 
 3354                 if ((error = nvlist_error(nvl))) {
 3355                         PF_RULES_WUNLOCK();
 3356                         ERROUT(error);
 3357                 }
 3358 
 3359                 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
 3360                 while ((rule != NULL) && (rule->nr != nr))
 3361                         rule = TAILQ_NEXT(rule, entries);
 3362                 if (rule == NULL) {
 3363                         PF_RULES_WUNLOCK();
 3364                         ERROUT(EBUSY);
 3365                 }
 3366 
 3367                 nvrule = pf_krule_to_nvrule(rule);
 3368 
 3369                 nvlist_destroy(nvl);
 3370                 nvl = nvlist_create(0);
 3371                 if (nvl == NULL) {
 3372                         PF_RULES_WUNLOCK();
 3373                         ERROUT(ENOMEM);
 3374                 }
 3375                 nvlist_add_number(nvl, "nr", nr);
 3376                 nvlist_add_nvlist(nvl, "rule", nvrule);
 3377                 nvlist_destroy(nvrule);
 3378                 nvrule = NULL;
 3379                 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
 3380                         PF_RULES_WUNLOCK();
 3381                         ERROUT(EBUSY);
 3382                 }
 3383 
 3384                 free(nvlpacked, M_NVLIST);
 3385                 nvlpacked = nvlist_pack(nvl, &nv->len);
 3386                 if (nvlpacked == NULL) {
 3387                         PF_RULES_WUNLOCK();
 3388                         ERROUT(ENOMEM);
 3389                 }
 3390 
 3391                 if (nv->size == 0) {
 3392                         PF_RULES_WUNLOCK();
 3393                         ERROUT(0);
 3394                 }
 3395                 else if (nv->size < nv->len) {
 3396                         PF_RULES_WUNLOCK();
 3397                         ERROUT(ENOSPC);
 3398                 }
 3399 
 3400                 if (clear_counter) {
 3401                         pf_counter_u64_zero(&rule->evaluations);
 3402                         for (int i = 0; i < 2; i++) {
 3403                                 pf_counter_u64_zero(&rule->packets[i]);
 3404                                 pf_counter_u64_zero(&rule->bytes[i]);
 3405                         }
 3406                         counter_u64_zero(rule->states_tot);
 3407                 }
 3408                 PF_RULES_WUNLOCK();
 3409 
 3410                 error = copyout(nvlpacked, nv->data, nv->len);
 3411 
 3412 #undef ERROUT
 3413 DIOCGETRULENV_error:
 3414                 free(nvlpacked, M_NVLIST);
 3415                 nvlist_destroy(nvrule);
 3416                 nvlist_destroy(nvl);
 3417 
 3418                 break;
 3419         }
 3420 
 3421         case DIOCCHANGERULE: {
 3422                 struct pfioc_rule       *pcr = (struct pfioc_rule *)addr;
 3423                 struct pf_kruleset      *ruleset;
 3424                 struct pf_krule         *oldrule = NULL, *newrule = NULL;
 3425                 struct pfi_kkif         *kif = NULL;
 3426                 struct pf_kpooladdr     *pa;
 3427                 u_int32_t                nr = 0;
 3428                 int                      rs_num;
 3429 
 3430                 pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
 3431 
 3432                 if (pcr->action < PF_CHANGE_ADD_HEAD ||
 3433                     pcr->action > PF_CHANGE_GET_TICKET) {
 3434                         error = EINVAL;
 3435                         break;
 3436                 }
 3437                 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
 3438                         error = EINVAL;
 3439                         break;
 3440                 }
 3441 
 3442                 if (pcr->action != PF_CHANGE_REMOVE) {
 3443                         newrule = pf_krule_alloc();
 3444                         error = pf_rule_to_krule(&pcr->rule, newrule);
 3445                         if (error != 0) {
 3446                                 pf_krule_free(newrule);
 3447                                 break;
 3448                         }
 3449 
 3450                         if (newrule->ifname[0])
 3451                                 kif = pf_kkif_create(M_WAITOK);
 3452                         pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
 3453                         for (int i = 0; i < 2; i++) {
 3454                                 pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
 3455                                 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
 3456                         }
 3457                         newrule->states_cur = counter_u64_alloc(M_WAITOK);
 3458                         newrule->states_tot = counter_u64_alloc(M_WAITOK);
 3459                         newrule->src_nodes = counter_u64_alloc(M_WAITOK);
 3460                         newrule->cuid = td->td_ucred->cr_ruid;
 3461                         newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
 3462                         TAILQ_INIT(&newrule->rpool.list);
 3463                 }
 3464 #define ERROUT(x)       ERROUT_IOCTL(DIOCCHANGERULE_error, x)
 3465 
 3466                 PF_CONFIG_LOCK();
 3467                 PF_RULES_WLOCK();
 3468 #ifdef PF_WANT_32_TO_64_COUNTER
 3469                 if (newrule != NULL) {
 3470                         LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
 3471                         newrule->allrulelinked = true;
 3472                         V_pf_allrulecount++;
 3473                 }
 3474 #endif
 3475 
 3476                 if (!(pcr->action == PF_CHANGE_REMOVE ||
 3477                     pcr->action == PF_CHANGE_GET_TICKET) &&
 3478                     pcr->pool_ticket != V_ticket_pabuf)
 3479                         ERROUT(EBUSY);
 3480 
 3481                 ruleset = pf_find_kruleset(pcr->anchor);
 3482                 if (ruleset == NULL)
 3483                         ERROUT(EINVAL);
 3484 
 3485                 rs_num = pf_get_ruleset_number(pcr->rule.action);
 3486                 if (rs_num >= PF_RULESET_MAX)
 3487                         ERROUT(EINVAL);
 3488 
 3489                 /*
 3490                  * XXXMJG: there is no guarantee that the ruleset was
 3491                  * created by the usual route of calling DIOCXBEGIN.
 3492                  * As a result it is possible the rule tree will not
 3493                  * be allocated yet. Hack around it by doing it here.
 3494                  * Note it is fine to let the tree persist in case of
 3495                  * error as it will be freed down the road on future
 3496                  * updates (if need be).
 3497                  */
 3498                 if (ruleset->rules[rs_num].active.tree == NULL) {
 3499                         ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
 3500                         if (ruleset->rules[rs_num].active.tree == NULL) {
 3501                                 ERROUT(ENOMEM);
 3502                         }
 3503                 }
 3504 
 3505                 if (pcr->action == PF_CHANGE_GET_TICKET) {
 3506                         pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
 3507                         ERROUT(0);
 3508                 } else if (pcr->ticket !=
 3509                             ruleset->rules[rs_num].active.ticket)
 3510                                 ERROUT(EINVAL);
 3511 
 3512                 if (pcr->action != PF_CHANGE_REMOVE) {
 3513                         if (newrule->ifname[0]) {
 3514                                 newrule->kif = pfi_kkif_attach(kif,
 3515                                     newrule->ifname);
 3516                                 kif = NULL;
 3517                                 pfi_kkif_ref(newrule->kif);
 3518                         } else
 3519                                 newrule->kif = NULL;
 3520 
 3521                         if (newrule->rtableid > 0 &&
 3522                             newrule->rtableid >= rt_numfibs)
 3523                                 error = EBUSY;
 3524 
 3525 #ifdef ALTQ
 3526                         /* set queue IDs */
 3527                         if (newrule->qname[0] != 0) {
 3528                                 if ((newrule->qid =
 3529                                     pf_qname2qid(newrule->qname)) == 0)
 3530                                         error = EBUSY;
 3531                                 else if (newrule->pqname[0] != 0) {
 3532                                         if ((newrule->pqid =
 3533                                             pf_qname2qid(newrule->pqname)) == 0)
 3534                                                 error = EBUSY;
 3535                                 } else
 3536                                         newrule->pqid = newrule->qid;
 3537                         }
 3538 #endif /* ALTQ */
 3539                         if (newrule->tagname[0])
 3540                                 if ((newrule->tag =
 3541                                     pf_tagname2tag(newrule->tagname)) == 0)
 3542                                         error = EBUSY;
 3543                         if (newrule->match_tagname[0])
 3544                                 if ((newrule->match_tag = pf_tagname2tag(
 3545                                     newrule->match_tagname)) == 0)
 3546                                         error = EBUSY;
 3547                         if (newrule->rt && !newrule->direction)
 3548                                 error = EINVAL;
 3549                         if (!newrule->log)
 3550                                 newrule->logif = 0;
 3551                         if (newrule->logif >= PFLOGIFS_MAX)
 3552                                 error = EINVAL;
 3553                         if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
 3554                                 error = ENOMEM;
 3555                         if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
 3556                                 error = ENOMEM;
 3557                         if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
 3558                                 error = EINVAL;
 3559                         TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
 3560                                 if (pa->addr.type == PF_ADDR_TABLE) {
 3561                                         pa->addr.p.tbl =
 3562                                             pfr_attach_table(ruleset,
 3563                                             pa->addr.v.tblname);
 3564                                         if (pa->addr.p.tbl == NULL)
 3565                                                 error = ENOMEM;
 3566                                 }
 3567 
 3568                         newrule->overload_tbl = NULL;
 3569                         if (newrule->overload_tblname[0]) {
 3570                                 if ((newrule->overload_tbl = pfr_attach_table(
 3571                                     ruleset, newrule->overload_tblname)) ==
 3572                                     NULL)
 3573                                         error = EINVAL;
 3574                                 else
 3575                                         newrule->overload_tbl->pfrkt_flags |=
 3576                                             PFR_TFLAG_ACTIVE;
 3577                         }
 3578 
 3579                         pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
 3580                         if (((((newrule->action == PF_NAT) ||
 3581                             (newrule->action == PF_RDR) ||
 3582                             (newrule->action == PF_BINAT) ||
 3583                             (newrule->rt > PF_NOPFROUTE)) &&
 3584                             !newrule->anchor)) &&
 3585                             (TAILQ_FIRST(&newrule->rpool.list) == NULL))
 3586                                 error = EINVAL;
 3587 
 3588                         if (error) {
 3589                                 pf_free_rule(newrule);
 3590                                 PF_RULES_WUNLOCK();
 3591                                 PF_CONFIG_UNLOCK();
 3592                                 break;
 3593                         }
 3594 
 3595                         newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
 3596                 }
 3597                 pf_empty_kpool(&V_pf_pabuf);
 3598 
 3599                 if (pcr->action == PF_CHANGE_ADD_HEAD)
 3600                         oldrule = TAILQ_FIRST(
 3601                             ruleset->rules[rs_num].active.ptr);
 3602                 else if (pcr->action == PF_CHANGE_ADD_TAIL)
 3603                         oldrule = TAILQ_LAST(
 3604                             ruleset->rules[rs_num].active.ptr, pf_krulequeue);
 3605                 else {
 3606                         oldrule = TAILQ_FIRST(
 3607                             ruleset->rules[rs_num].active.ptr);
 3608                         while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
 3609                                 oldrule = TAILQ_NEXT(oldrule, entries);
 3610                         if (oldrule == NULL) {
 3611                                 if (newrule != NULL)
 3612                                         pf_free_rule(newrule);
 3613                                 PF_RULES_WUNLOCK();
 3614                                 PF_CONFIG_UNLOCK();
 3615                                 error = EINVAL;
 3616                                 break;
 3617                         }
 3618                 }
 3619 
 3620                 if (pcr->action == PF_CHANGE_REMOVE) {
 3621                         pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
 3622                             oldrule);
 3623                         RB_REMOVE(pf_krule_global,
 3624                             ruleset->rules[rs_num].active.tree, oldrule);
 3625                         ruleset->rules[rs_num].active.rcount--;
 3626                 } else {
 3627                         pf_hash_rule(newrule);
 3628                         if (RB_INSERT(pf_krule_global,
 3629                             ruleset->rules[rs_num].active.tree, newrule) != NULL) {
 3630                                 pf_free_rule(newrule);
 3631                                 PF_RULES_WUNLOCK();
 3632                                 PF_CONFIG_UNLOCK();
 3633                                 error = EEXIST;
 3634                                 break;
 3635                         }
 3636 
 3637                         if (oldrule == NULL)
 3638                                 TAILQ_INSERT_TAIL(
 3639                                     ruleset->rules[rs_num].active.ptr,
 3640                                     newrule, entries);
 3641                         else if (pcr->action == PF_CHANGE_ADD_HEAD ||
 3642                             pcr->action == PF_CHANGE_ADD_BEFORE)
 3643                                 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
 3644                         else
 3645                                 TAILQ_INSERT_AFTER(
 3646                                     ruleset->rules[rs_num].active.ptr,
 3647                                     oldrule, newrule, entries);
 3648                         ruleset->rules[rs_num].active.rcount++;
 3649                 }
 3650 
 3651                 nr = 0;
 3652                 TAILQ_FOREACH(oldrule,
 3653                     ruleset->rules[rs_num].active.ptr, entries)
 3654                         oldrule->nr = nr++;
 3655 
 3656                 ruleset->rules[rs_num].active.ticket++;
 3657 
 3658                 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
 3659                 pf_remove_if_empty_kruleset(ruleset);
 3660 
 3661                 PF_RULES_WUNLOCK();
 3662                 PF_CONFIG_UNLOCK();
 3663                 break;
 3664 
 3665 #undef ERROUT
 3666 DIOCCHANGERULE_error:
 3667                 PF_RULES_WUNLOCK();
 3668                 PF_CONFIG_UNLOCK();
 3669                 pf_krule_free(newrule);
 3670                 pf_kkif_free(kif);
 3671                 break;
 3672         }
 3673 
 3674         case DIOCCLRSTATES: {
 3675                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
 3676                 struct pf_kstate_kill    kill;
 3677 
 3678                 error = pf_state_kill_to_kstate_kill(psk, &kill);
 3679                 if (error)
 3680                         break;
 3681 
 3682                 psk->psk_killed = pf_clear_states(&kill);
 3683                 break;
 3684         }
 3685 
 3686         case DIOCCLRSTATESNV: {
 3687                 error = pf_clearstates_nv((struct pfioc_nv *)addr);
 3688                 break;
 3689         }
 3690 
 3691         case DIOCKILLSTATES: {
 3692                 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
 3693                 struct pf_kstate_kill    kill;
 3694 
 3695                 error = pf_state_kill_to_kstate_kill(psk, &kill);
 3696                 if (error)
 3697                         break;
 3698 
 3699                 psk->psk_killed = 0;
 3700                 pf_killstates(&kill, &psk->psk_killed);
 3701                 break;
 3702         }
 3703 
 3704         case DIOCKILLSTATESNV: {
 3705                 error = pf_killstates_nv((struct pfioc_nv *)addr);
 3706                 break;
 3707         }
 3708 
 3709         case DIOCADDSTATE: {
 3710                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
 3711                 struct pfsync_state     *sp = &ps->state;
 3712 
 3713                 if (sp->timeout >= PFTM_MAX) {
 3714                         error = EINVAL;
 3715                         break;
 3716                 }
 3717                 if (V_pfsync_state_import_ptr != NULL) {
 3718                         PF_RULES_RLOCK();
 3719                         error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL);
 3720                         PF_RULES_RUNLOCK();
 3721                 } else
 3722                         error = EOPNOTSUPP;
 3723                 break;
 3724         }
 3725 
 3726         case DIOCGETSTATE: {
 3727                 struct pfioc_state      *ps = (struct pfioc_state *)addr;
 3728                 struct pf_kstate        *s;
 3729 
 3730                 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
 3731                 if (s == NULL) {
 3732                         error = ENOENT;
 3733                         break;
 3734                 }
 3735 
 3736                 pfsync_state_export(&ps->state, s);
 3737                 PF_STATE_UNLOCK(s);
 3738                 break;
 3739         }
 3740 
 3741         case DIOCGETSTATENV: {
 3742                 error = pf_getstate((struct pfioc_nv *)addr);
 3743                 break;
 3744         }
 3745 
 3746         case DIOCGETSTATES: {
 3747                 struct pfioc_states     *ps = (struct pfioc_states *)addr;
 3748                 struct pf_kstate        *s;
 3749                 struct pfsync_state     *pstore, *p;
 3750                 int                      i, nr;
 3751                 size_t                   slice_count = 16, count;
 3752                 void                    *out;
 3753 
 3754                 if (ps->ps_len <= 0) {
 3755                         nr = uma_zone_get_cur(V_pf_state_z);
 3756                         ps->ps_len = sizeof(struct pfsync_state) * nr;
 3757                         break;
 3758                 }
 3759 
 3760                 out = ps->ps_states;
 3761                 pstore = mallocarray(slice_count,
 3762                     sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO);
 3763                 nr = 0;
 3764 
 3765                 for (i = 0; i <= pf_hashmask; i++) {
 3766                         struct pf_idhash *ih = &V_pf_idhash[i];
 3767 
 3768 DIOCGETSTATES_retry:
 3769                         p = pstore;
 3770 
 3771                         if (LIST_EMPTY(&ih->states))
 3772                                 continue;
 3773 
 3774                         PF_HASHROW_LOCK(ih);
 3775                         count = 0;
 3776                         LIST_FOREACH(s, &ih->states, entry) {
 3777                                 if (s->timeout == PFTM_UNLINKED)
 3778                                         continue;
 3779                                 count++;
 3780                         }
 3781 
 3782                         if (count > slice_count) {
 3783                                 PF_HASHROW_UNLOCK(ih);
 3784                                 free(pstore, M_TEMP);
 3785                                 slice_count = count * 2;
 3786                                 pstore = mallocarray(slice_count,
 3787                                     sizeof(struct pfsync_state), M_TEMP,
 3788                                     M_WAITOK | M_ZERO);
 3789                                 goto DIOCGETSTATES_retry;
 3790                         }
 3791 
 3792                         if ((nr+count) * sizeof(*p) > ps->ps_len) {
 3793                                 PF_HASHROW_UNLOCK(ih);
 3794                                 goto DIOCGETSTATES_full;
 3795                         }
 3796 
 3797                         LIST_FOREACH(s, &ih->states, entry) {
 3798                                 if (s->timeout == PFTM_UNLINKED)
 3799                                         continue;
 3800 
 3801                                 pfsync_state_export(p, s);
 3802                                 p++;
 3803                                 nr++;
 3804                         }
 3805                         PF_HASHROW_UNLOCK(ih);
 3806                         error = copyout(pstore, out,
 3807                             sizeof(struct pfsync_state) * count);
 3808                         if (error)
 3809                                 break;
 3810                         out = ps->ps_states + nr;
 3811                 }
 3812 DIOCGETSTATES_full:
 3813                 ps->ps_len = sizeof(struct pfsync_state) * nr;
 3814                 free(pstore, M_TEMP);
 3815 
 3816                 break;
 3817         }
 3818 
 3819         case DIOCGETSTATESV2: {
 3820                 struct pfioc_states_v2  *ps = (struct pfioc_states_v2 *)addr;
 3821                 struct pf_kstate        *s;
 3822                 struct pf_state_export  *pstore, *p;
 3823                 int i, nr;
 3824                 size_t slice_count = 16, count;
 3825                 void *out;
 3826 
 3827                 if (ps->ps_req_version > PF_STATE_VERSION) {
 3828                         error = ENOTSUP;
 3829                         break;
 3830                 }
 3831 
 3832                 if (ps->ps_len <= 0) {
 3833                         nr = uma_zone_get_cur(V_pf_state_z);
 3834                         ps->ps_len = sizeof(struct pf_state_export) * nr;
 3835                         break;
 3836                 }
 3837 
 3838                 out = ps->ps_states;
 3839                 pstore = mallocarray(slice_count,
 3840                     sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
 3841                 nr = 0;
 3842 
 3843                 for (i = 0; i <= pf_hashmask; i++) {
 3844                         struct pf_idhash *ih = &V_pf_idhash[i];
 3845 
 3846 DIOCGETSTATESV2_retry:
 3847                         p = pstore;
 3848 
 3849                         if (LIST_EMPTY(&ih->states))
 3850                                 continue;
 3851 
 3852                         PF_HASHROW_LOCK(ih);
 3853                         count = 0;
 3854                         LIST_FOREACH(s, &ih->states, entry) {
 3855                                 if (s->timeout == PFTM_UNLINKED)
 3856                                         continue;
 3857                                 count++;
 3858                         }
 3859 
 3860                         if (count > slice_count) {
 3861                                 PF_HASHROW_UNLOCK(ih);
 3862                                 free(pstore, M_TEMP);
 3863                                 slice_count = count * 2;
 3864                                 pstore = mallocarray(slice_count,
 3865                                     sizeof(struct pf_state_export), M_TEMP,
 3866                                     M_WAITOK | M_ZERO);
 3867                                 goto DIOCGETSTATESV2_retry;
 3868                         }
 3869 
 3870                         if ((nr+count) * sizeof(*p) > ps->ps_len) {
 3871                                 PF_HASHROW_UNLOCK(ih);
 3872                                 goto DIOCGETSTATESV2_full;
 3873                         }
 3874 
 3875                         LIST_FOREACH(s, &ih->states, entry) {
 3876                                 if (s->timeout == PFTM_UNLINKED)
 3877                                         continue;
 3878 
 3879                                 pf_state_export(p, s);
 3880                                 p++;
 3881                                 nr++;
 3882                         }
 3883                         PF_HASHROW_UNLOCK(ih);
 3884                         error = copyout(pstore, out,
 3885                             sizeof(struct pf_state_export) * count);
 3886                         if (error)
 3887                                 break;
 3888                         out = ps->ps_states + nr;
 3889                 }
 3890 DIOCGETSTATESV2_full:
 3891                 ps->ps_len = nr * sizeof(struct pf_state_export);
 3892                 free(pstore, M_TEMP);
 3893 
 3894                 break;
 3895         }
 3896 
 3897         case DIOCGETSTATUS: {
 3898                 struct pf_status *s = (struct pf_status *)addr;
 3899 
 3900                 PF_RULES_RLOCK();
 3901                 s->running = V_pf_status.running;
 3902                 s->since   = V_pf_status.since;
 3903                 s->debug   = V_pf_status.debug;
 3904                 s->hostid  = V_pf_status.hostid;
 3905                 s->states  = V_pf_status.states;
 3906                 s->src_nodes = V_pf_status.src_nodes;
 3907 
 3908                 for (int i = 0; i < PFRES_MAX; i++)
 3909                         s->counters[i] =
 3910                             counter_u64_fetch(V_pf_status.counters[i]);
 3911                 for (int i = 0; i < LCNT_MAX; i++)
 3912                         s->lcounters[i] =
 3913                             counter_u64_fetch(V_pf_status.lcounters[i]);
 3914                 for (int i = 0; i < FCNT_MAX; i++)
 3915                         s->fcounters[i] =
 3916                             pf_counter_u64_fetch(&V_pf_status.fcounters[i]);
 3917                 for (int i = 0; i < SCNT_MAX; i++)
 3918                         s->scounters[i] =
 3919                             counter_u64_fetch(V_pf_status.scounters[i]);
 3920 
 3921                 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
 3922                 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
 3923                     PF_MD5_DIGEST_LENGTH);
 3924 
 3925                 pfi_update_status(s->ifname, s);
 3926                 PF_RULES_RUNLOCK();
 3927                 break;
 3928         }
 3929 
 3930         case DIOCGETSTATUSNV: {
 3931                 error = pf_getstatus((struct pfioc_nv *)addr);
 3932                 break;
 3933         }
 3934 
 3935         case DIOCSETSTATUSIF: {
 3936                 struct pfioc_if *pi = (struct pfioc_if *)addr;
 3937 
 3938                 if (pi->ifname[0] == 0) {
 3939                         bzero(V_pf_status.ifname, IFNAMSIZ);
 3940                         break;
 3941                 }
 3942                 PF_RULES_WLOCK();
 3943                 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
 3944                 PF_RULES_WUNLOCK();
 3945                 break;
 3946         }
 3947 
 3948         case DIOCCLRSTATUS: {
 3949                 PF_RULES_WLOCK();
 3950                 for (int i = 0; i < PFRES_MAX; i++)
 3951                         counter_u64_zero(V_pf_status.counters[i]);
 3952                 for (int i = 0; i < FCNT_MAX; i++)
 3953                         pf_counter_u64_zero(&V_pf_status.fcounters[i]);
 3954                 for (int i = 0; i < SCNT_MAX; i++)
 3955                         counter_u64_zero(V_pf_status.scounters[i]);
 3956                 for (int i = 0; i < KLCNT_MAX; i++)
 3957                         counter_u64_zero(V_pf_status.lcounters[i]);
 3958                 V_pf_status.since = time_second;
 3959                 if (*V_pf_status.ifname)
 3960                         pfi_update_status(V_pf_status.ifname, NULL);
 3961                 PF_RULES_WUNLOCK();
 3962                 break;
 3963         }
 3964 
 3965         case DIOCNATLOOK: {
 3966                 struct pfioc_natlook    *pnl = (struct pfioc_natlook *)addr;
 3967                 struct pf_state_key     *sk;
 3968                 struct pf_kstate        *state;
 3969                 struct pf_state_key_cmp  key;
 3970                 int                      m = 0, direction = pnl->direction;
 3971                 int                      sidx, didx;
 3972 
 3973                 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
 3974                 sidx = (direction == PF_IN) ? 1 : 0;
 3975                 didx = (direction == PF_IN) ? 0 : 1;
 3976 
 3977                 if (!pnl->proto ||
 3978                     PF_AZERO(&pnl->saddr, pnl->af) ||
 3979                     PF_AZERO(&pnl->daddr, pnl->af) ||
 3980                     ((pnl->proto == IPPROTO_TCP ||
 3981                     pnl->proto == IPPROTO_UDP) &&
 3982                     (!pnl->dport || !pnl->sport)))
 3983                         error = EINVAL;
 3984                 else {
 3985                         bzero(&key, sizeof(key));
 3986                         key.af = pnl->af;
 3987                         key.proto = pnl->proto;
 3988                         PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
 3989                         key.port[sidx] = pnl->sport;
 3990                         PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
 3991                         key.port[didx] = pnl->dport;
 3992 
 3993                         state = pf_find_state_all(&key, direction, &m);
 3994                         if (state == NULL) {
 3995                                 error = ENOENT;
 3996                         } else {
 3997                                 if (m > 1) {
 3998                                         PF_STATE_UNLOCK(state);
 3999                                         error = E2BIG;  /* more than one state */
 4000                                 } else {
 4001                                         sk = state->key[sidx];
 4002                                         PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
 4003                                         pnl->rsport = sk->port[sidx];
 4004                                         PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
 4005                                         pnl->rdport = sk->port[didx];
 4006                                         PF_STATE_UNLOCK(state);
 4007                                 }
 4008                         }
 4009                 }
 4010                 break;
 4011         }
 4012 
 4013         case DIOCSETTIMEOUT: {
 4014                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
 4015                 int              old;
 4016 
 4017                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
 4018                     pt->seconds < 0) {
 4019                         error = EINVAL;
 4020                         break;
 4021                 }
 4022                 PF_RULES_WLOCK();
 4023                 old = V_pf_default_rule.timeout[pt->timeout];
 4024                 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
 4025                         pt->seconds = 1;
 4026                 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
 4027                 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
 4028                         wakeup(pf_purge_thread);
 4029                 pt->seconds = old;
 4030                 PF_RULES_WUNLOCK();
 4031                 break;
 4032         }
 4033 
 4034         case DIOCGETTIMEOUT: {
 4035                 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
 4036 
 4037                 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
 4038                         error = EINVAL;
 4039                         break;
 4040                 }
 4041                 PF_RULES_RLOCK();
 4042                 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
 4043                 PF_RULES_RUNLOCK();
 4044                 break;
 4045         }
 4046 
 4047         case DIOCGETLIMIT: {
 4048                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
 4049 
 4050                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
 4051                         error = EINVAL;
 4052                         break;
 4053                 }
 4054                 PF_RULES_RLOCK();
 4055                 pl->limit = V_pf_limits[pl->index].limit;
 4056                 PF_RULES_RUNLOCK();
 4057                 break;
 4058         }
 4059 
 4060         case DIOCSETLIMIT: {
 4061                 struct pfioc_limit      *pl = (struct pfioc_limit *)addr;
 4062                 int                      old_limit;
 4063 
 4064                 PF_RULES_WLOCK();
 4065                 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
 4066                     V_pf_limits[pl->index].zone == NULL) {
 4067                         PF_RULES_WUNLOCK();
 4068                         error = EINVAL;
 4069                         break;
 4070                 }
 4071                 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
 4072                 old_limit = V_pf_limits[pl->index].limit;
 4073                 V_pf_limits[pl->index].limit = pl->limit;
 4074                 pl->limit = old_limit;
 4075                 PF_RULES_WUNLOCK();
 4076                 break;
 4077         }
 4078 
 4079         case DIOCSETDEBUG: {
 4080                 u_int32_t       *level = (u_int32_t *)addr;
 4081 
 4082                 PF_RULES_WLOCK();
 4083                 V_pf_status.debug = *level;
 4084                 PF_RULES_WUNLOCK();
 4085                 break;
 4086         }
 4087 
 4088         case DIOCCLRRULECTRS: {
 4089                 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
 4090                 struct pf_kruleset      *ruleset = &pf_main_ruleset;
 4091                 struct pf_krule         *rule;
 4092 
 4093                 PF_RULES_WLOCK();
 4094                 TAILQ_FOREACH(rule,
 4095                     ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
 4096                         pf_counter_u64_zero(&rule->evaluations);
 4097                         for (int i = 0; i < 2; i++) {
 4098                                 pf_counter_u64_zero(&rule->packets[i]);
 4099                                 pf_counter_u64_zero(&rule->bytes[i]);
 4100                         }
 4101                 }
 4102                 PF_RULES_WUNLOCK();
 4103                 break;
 4104         }
 4105 
 4106         case DIOCGIFSPEEDV0:
 4107         case DIOCGIFSPEEDV1: {
 4108                 struct pf_ifspeed_v1    *psp = (struct pf_ifspeed_v1 *)addr;
 4109                 struct pf_ifspeed_v1    ps;
 4110                 struct ifnet            *ifp;
 4111 
 4112                 if (psp->ifname[0] == '\0') {
 4113                         error = EINVAL;
 4114                         break;
 4115                 }
 4116 
 4117                 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
 4118                 if (error != 0)
 4119                         break;
 4120                 ifp = ifunit(ps.ifname);
 4121                 if (ifp != NULL) {
 4122                         psp->baudrate32 =
 4123                             (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
 4124                         if (cmd == DIOCGIFSPEEDV1)
 4125                                 psp->baudrate = ifp->if_baudrate;
 4126                 } else {
 4127                         error = EINVAL;
 4128                 }
 4129                 break;
 4130         }
 4131 
 4132 #ifdef ALTQ
 4133         case DIOCSTARTALTQ: {
 4134                 struct pf_altq          *altq;
 4135 
 4136                 PF_RULES_WLOCK();
 4137                 /* enable all altq interfaces on active list */
 4138                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
 4139                         if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
 4140                                 error = pf_enable_altq(altq);
 4141                                 if (error != 0)
 4142                                         break;
 4143                         }
 4144                 }
 4145                 if (error == 0)
 4146                         V_pf_altq_running = 1;
 4147                 PF_RULES_WUNLOCK();
 4148                 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
 4149                 break;
 4150         }
 4151 
 4152         case DIOCSTOPALTQ: {
 4153                 struct pf_altq          *altq;
 4154 
 4155                 PF_RULES_WLOCK();
 4156                 /* disable all altq interfaces on active list */
 4157                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
 4158                         if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
 4159                                 error = pf_disable_altq(altq);
 4160                                 if (error != 0)
 4161                                         break;
 4162                         }
 4163                 }
 4164                 if (error == 0)
 4165                         V_pf_altq_running = 0;
 4166                 PF_RULES_WUNLOCK();
 4167                 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
 4168                 break;
 4169         }
 4170 
 4171         case DIOCADDALTQV0:
 4172         case DIOCADDALTQV1: {
 4173                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
 4174                 struct pf_altq          *altq, *a;
 4175                 struct ifnet            *ifp;
 4176 
 4177                 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
 4178                 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
 4179                 if (error)
 4180                         break;
 4181                 altq->local_flags = 0;
 4182 
 4183                 PF_RULES_WLOCK();
 4184                 if (pa->ticket != V_ticket_altqs_inactive) {
 4185                         PF_RULES_WUNLOCK();
 4186                         free(altq, M_PFALTQ);
 4187                         error = EBUSY;
 4188                         break;
 4189                 }
 4190 
 4191                 /*
 4192                  * if this is for a queue, find the discipline and
 4193                  * copy the necessary fields
 4194                  */
 4195                 if (altq->qname[0] != 0) {
 4196                         if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
 4197                                 PF_RULES_WUNLOCK();
 4198                                 error = EBUSY;
 4199                                 free(altq, M_PFALTQ);
 4200                                 break;
 4201                         }
 4202                         altq->altq_disc = NULL;
 4203                         TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
 4204                                 if (strncmp(a->ifname, altq->ifname,
 4205                                     IFNAMSIZ) == 0) {
 4206                                         altq->altq_disc = a->altq_disc;
 4207                                         break;
 4208                                 }
 4209                         }
 4210                 }
 4211 
 4212                 if ((ifp = ifunit(altq->ifname)) == NULL)
 4213                         altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
 4214                 else
 4215                         error = altq_add(ifp, altq);
 4216 
 4217                 if (error) {
 4218                         PF_RULES_WUNLOCK();
 4219                         free(altq, M_PFALTQ);
 4220                         break;
 4221                 }
 4222 
 4223                 if (altq->qname[0] != 0)
 4224                         TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
 4225                 else
 4226                         TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
 4227                 /* version error check done on import above */
 4228                 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
 4229                 PF_RULES_WUNLOCK();
 4230                 break;
 4231         }
 4232 
 4233         case DIOCGETALTQSV0:
 4234         case DIOCGETALTQSV1: {
 4235                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
 4236                 struct pf_altq          *altq;
 4237 
 4238                 PF_RULES_RLOCK();
 4239                 pa->nr = 0;
 4240                 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
 4241                         pa->nr++;
 4242                 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
 4243                         pa->nr++;
 4244                 pa->ticket = V_ticket_altqs_active;
 4245                 PF_RULES_RUNLOCK();
 4246                 break;
 4247         }
 4248 
 4249         case DIOCGETALTQV0:
 4250         case DIOCGETALTQV1: {
 4251                 struct pfioc_altq_v1    *pa = (struct pfioc_altq_v1 *)addr;
 4252                 struct pf_altq          *altq;
 4253 
 4254                 PF_RULES_RLOCK();
 4255                 if (pa->ticket != V_ticket_altqs_active) {
 4256                         PF_RULES_RUNLOCK();
 4257                         error = EBUSY;
 4258                         break;
 4259                 }
 4260                 altq = pf_altq_get_nth_active(pa->nr);
 4261                 if (altq == NULL) {
 4262                         PF_RULES_RUNLOCK();
 4263                         error = EBUSY;
 4264                         break;
 4265                 }
 4266                 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
 4267                 PF_RULES_RUNLOCK();
 4268                 break;
 4269         }
 4270 
 4271         case DIOCCHANGEALTQV0:
 4272         case DIOCCHANGEALTQV1:
 4273                 /* CHANGEALTQ not supported yet! */
 4274                 error = ENODEV;
 4275                 break;
 4276 
 4277         case DIOCGETQSTATSV0:
 4278         case DIOCGETQSTATSV1: {
 4279                 struct pfioc_qstats_v1  *pq = (struct pfioc_qstats_v1 *)addr;
 4280                 struct pf_altq          *altq;
 4281                 int                      nbytes;
 4282                 u_int32_t                version;
 4283 
 4284                 PF_RULES_RLOCK();
 4285                 if (pq->ticket != V_ticket_altqs_active) {
 4286                         PF_RULES_RUNLOCK();
 4287                         error = EBUSY;
 4288                         break;
 4289                 }
 4290                 nbytes = pq->nbytes;
 4291                 altq = pf_altq_get_nth_active(pq->nr);
 4292                 if (altq == NULL) {
 4293                         PF_RULES_RUNLOCK();
 4294                         error = EBUSY;
 4295                         break;
 4296                 }
 4297 
 4298                 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
 4299                         PF_RULES_RUNLOCK();
 4300                         error = ENXIO;
 4301                         break;
 4302                 }
 4303                 PF_RULES_RUNLOCK();
 4304                 if (cmd == DIOCGETQSTATSV0)
 4305                         version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
 4306                 else
 4307                         version = pq->version;
 4308                 error = altq_getqstats(altq, pq->buf, &nbytes, version);
 4309                 if (error == 0) {
 4310                         pq->scheduler = altq->scheduler;
 4311                         pq->nbytes = nbytes;
 4312                 }
 4313                 break;
 4314         }
 4315 #endif /* ALTQ */
 4316 
 4317         case DIOCBEGINADDRS: {
 4318                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
 4319 
 4320                 PF_RULES_WLOCK();
 4321                 pf_empty_kpool(&V_pf_pabuf);
 4322                 pp->ticket = ++V_ticket_pabuf;
 4323                 PF_RULES_WUNLOCK();
 4324                 break;
 4325         }
 4326 
 4327         case DIOCADDADDR: {
 4328                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
 4329                 struct pf_kpooladdr     *pa;
 4330                 struct pfi_kkif         *kif = NULL;
 4331 
 4332 #ifndef INET
 4333                 if (pp->af == AF_INET) {
 4334                         error = EAFNOSUPPORT;
 4335                         break;
 4336                 }
 4337 #endif /* INET */
 4338 #ifndef INET6
 4339                 if (pp->af == AF_INET6) {
 4340                         error = EAFNOSUPPORT;
 4341                         break;
 4342                 }
 4343 #endif /* INET6 */
 4344                 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
 4345                     pp->addr.addr.type != PF_ADDR_DYNIFTL &&
 4346                     pp->addr.addr.type != PF_ADDR_TABLE) {
 4347                         error = EINVAL;
 4348                         break;
 4349                 }
 4350                 if (pp->addr.addr.p.dyn != NULL) {
 4351                         error = EINVAL;
 4352                         break;
 4353                 }
 4354                 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
 4355                 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
 4356                 if (error != 0)
 4357                         break;
 4358                 if (pa->ifname[0])
 4359                         kif = pf_kkif_create(M_WAITOK);
 4360                 PF_RULES_WLOCK();
 4361                 if (pp->ticket != V_ticket_pabuf) {
 4362                         PF_RULES_WUNLOCK();
 4363                         if (pa->ifname[0])
 4364                                 pf_kkif_free(kif);
 4365                         free(pa, M_PFRULE);
 4366                         error = EBUSY;
 4367                         break;
 4368                 }
 4369                 if (pa->ifname[0]) {
 4370                         pa->kif = pfi_kkif_attach(kif, pa->ifname);
 4371                         kif = NULL;
 4372                         pfi_kkif_ref(pa->kif);
 4373                 } else
 4374                         pa->kif = NULL;
 4375                 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
 4376                     pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
 4377                         if (pa->ifname[0])
 4378                                 pfi_kkif_unref(pa->kif);
 4379                         PF_RULES_WUNLOCK();
 4380                         free(pa, M_PFRULE);
 4381                         break;
 4382                 }
 4383                 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
 4384                 PF_RULES_WUNLOCK();
 4385                 break;
 4386         }
 4387 
 4388         case DIOCGETADDRS: {
 4389                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
 4390                 struct pf_kpool         *pool;
 4391                 struct pf_kpooladdr     *pa;
 4392 
 4393                 pp->anchor[sizeof(pp->anchor) - 1] = 0;
 4394                 pp->nr = 0;
 4395 
 4396                 PF_RULES_RLOCK();
 4397                 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
 4398                     pp->r_num, 0, 1, 0);
 4399                 if (pool == NULL) {
 4400                         PF_RULES_RUNLOCK();
 4401                         error = EBUSY;
 4402                         break;
 4403                 }
 4404                 TAILQ_FOREACH(pa, &pool->list, entries)
 4405                         pp->nr++;
 4406                 PF_RULES_RUNLOCK();
 4407                 break;
 4408         }
 4409 
 4410         case DIOCGETADDR: {
 4411                 struct pfioc_pooladdr   *pp = (struct pfioc_pooladdr *)addr;
 4412                 struct pf_kpool         *pool;
 4413                 struct pf_kpooladdr     *pa;
 4414                 u_int32_t                nr = 0;
 4415 
 4416                 pp->anchor[sizeof(pp->anchor) - 1] = 0;
 4417 
 4418                 PF_RULES_RLOCK();
 4419                 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
 4420                     pp->r_num, 0, 1, 1);
 4421                 if (pool == NULL) {
 4422                         PF_RULES_RUNLOCK();
 4423                         error = EBUSY;
 4424                         break;
 4425                 }
 4426                 pa = TAILQ_FIRST(&pool->list);
 4427                 while ((pa != NULL) && (nr < pp->nr)) {
 4428                         pa = TAILQ_NEXT(pa, entries);
 4429                         nr++;
 4430                 }
 4431                 if (pa == NULL) {
 4432                         PF_RULES_RUNLOCK();
 4433                         error = EBUSY;
 4434                         break;
 4435                 }
 4436                 pf_kpooladdr_to_pooladdr(pa, &pp->addr);
 4437                 pf_addr_copyout(&pp->addr.addr);
 4438                 PF_RULES_RUNLOCK();
 4439                 break;
 4440         }
 4441 
 4442         case DIOCCHANGEADDR: {
 4443                 struct pfioc_pooladdr   *pca = (struct pfioc_pooladdr *)addr;
 4444                 struct pf_kpool         *pool;
 4445                 struct pf_kpooladdr     *oldpa = NULL, *newpa = NULL;
 4446                 struct pf_kruleset      *ruleset;
 4447                 struct pfi_kkif         *kif = NULL;
 4448 
 4449                 pca->anchor[sizeof(pca->anchor) - 1] = 0;
 4450 
 4451                 if (pca->action < PF_CHANGE_ADD_HEAD ||
 4452                     pca->action > PF_CHANGE_REMOVE) {
 4453                         error = EINVAL;
 4454                         break;
 4455                 }
 4456                 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
 4457                     pca->addr.addr.type != PF_ADDR_DYNIFTL &&
 4458                     pca->addr.addr.type != PF_ADDR_TABLE) {
 4459                         error = EINVAL;
 4460                         break;
 4461                 }
 4462                 if (pca->addr.addr.p.dyn != NULL) {
 4463                         error = EINVAL;
 4464                         break;
 4465                 }
 4466 
 4467                 if (pca->action != PF_CHANGE_REMOVE) {
 4468 #ifndef INET
 4469                         if (pca->af == AF_INET) {
 4470                                 error = EAFNOSUPPORT;
 4471                                 break;
 4472                         }
 4473 #endif /* INET */
 4474 #ifndef INET6
 4475                         if (pca->af == AF_INET6) {
 4476                                 error = EAFNOSUPPORT;
 4477                                 break;
 4478                         }
 4479 #endif /* INET6 */
 4480                         newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
 4481                         bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
 4482                         if (newpa->ifname[0])
 4483                                 kif = pf_kkif_create(M_WAITOK);
 4484                         newpa->kif = NULL;
 4485                 }
 4486 #define ERROUT(x)       ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
 4487                 PF_RULES_WLOCK();
 4488                 ruleset = pf_find_kruleset(pca->anchor);
 4489                 if (ruleset == NULL)
 4490                         ERROUT(EBUSY);
 4491 
 4492                 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
 4493                     pca->r_num, pca->r_last, 1, 1);
 4494                 if (pool == NULL)
 4495                         ERROUT(EBUSY);
 4496 
 4497                 if (pca->action != PF_CHANGE_REMOVE) {
 4498                         if (newpa->ifname[0]) {
 4499                                 newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
 4500                                 pfi_kkif_ref(newpa->kif);
 4501                                 kif = NULL;
 4502                         }
 4503 
 4504                         switch (newpa->addr.type) {
 4505                         case PF_ADDR_DYNIFTL:
 4506                                 error = pfi_dynaddr_setup(&newpa->addr,
 4507                                     pca->af);
 4508                                 break;
 4509                         case PF_ADDR_TABLE:
 4510                                 newpa->addr.p.tbl = pfr_attach_table(ruleset,
 4511                                     newpa->addr.v.tblname);
 4512                                 if (newpa->addr.p.tbl == NULL)
 4513                                         error = ENOMEM;
 4514                                 break;
 4515                         }
 4516                         if (error)
 4517                                 goto DIOCCHANGEADDR_error;
 4518                 }
 4519 
 4520                 switch (pca->action) {
 4521                 case PF_CHANGE_ADD_HEAD:
 4522                         oldpa = TAILQ_FIRST(&pool->list);
 4523                         break;
 4524                 case PF_CHANGE_ADD_TAIL:
 4525                         oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
 4526                         break;
 4527                 default:
 4528                         oldpa = TAILQ_FIRST(&pool->list);
 4529                         for (int i = 0; oldpa && i < pca->nr; i++)
 4530                                 oldpa = TAILQ_NEXT(oldpa, entries);
 4531 
 4532                         if (oldpa == NULL)
 4533                                 ERROUT(EINVAL);
 4534                 }
 4535 
 4536                 if (pca->action == PF_CHANGE_REMOVE) {
 4537                         TAILQ_REMOVE(&pool->list, oldpa, entries);
 4538                         switch (oldpa->addr.type) {
 4539                         case PF_ADDR_DYNIFTL:
 4540                                 pfi_dynaddr_remove(oldpa->addr.p.dyn);
 4541                                 break;
 4542                         case PF_ADDR_TABLE:
 4543                                 pfr_detach_table(oldpa->addr.p.tbl);
 4544                                 break;
 4545                         }
 4546                         if (oldpa->kif)
 4547                                 pfi_kkif_unref(oldpa->kif);
 4548                         free(oldpa, M_PFRULE);
 4549                 } else {
 4550                         if (oldpa == NULL)
 4551                                 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
 4552                         else if (pca->action == PF_CHANGE_ADD_HEAD ||
 4553                             pca->action == PF_CHANGE_ADD_BEFORE)
 4554                                 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
 4555                         else
 4556                                 TAILQ_INSERT_AFTER(&pool->list, oldpa,
 4557                                     newpa, entries);
 4558                 }
 4559 
 4560                 pool->cur = TAILQ_FIRST(&pool->list);
 4561                 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
 4562                 PF_RULES_WUNLOCK();
 4563                 break;
 4564 
 4565 #undef ERROUT
 4566 DIOCCHANGEADDR_error:
 4567                 if (newpa != NULL) {
 4568                         if (newpa->kif)
 4569                                 pfi_kkif_unref(newpa->kif);
 4570                         free(newpa, M_PFRULE);
 4571                 }
 4572                 PF_RULES_WUNLOCK();
 4573                 pf_kkif_free(kif);
 4574                 break;
 4575         }
 4576 
 4577         case DIOCGETRULESETS: {
 4578                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
 4579                 struct pf_kruleset      *ruleset;
 4580                 struct pf_kanchor       *anchor;
 4581 
 4582                 pr->path[sizeof(pr->path) - 1] = 0;
 4583 
 4584                 PF_RULES_RLOCK();
 4585                 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
 4586                         PF_RULES_RUNLOCK();
 4587                         error = ENOENT;
 4588                         break;
 4589                 }
 4590                 pr->nr = 0;
 4591                 if (ruleset->anchor == NULL) {
 4592                         /* XXX kludge for pf_main_ruleset */
 4593                         RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
 4594                                 if (anchor->parent == NULL)
 4595                                         pr->nr++;
 4596                 } else {
 4597                         RB_FOREACH(anchor, pf_kanchor_node,
 4598                             &ruleset->anchor->children)
 4599                                 pr->nr++;
 4600                 }
 4601                 PF_RULES_RUNLOCK();
 4602                 break;
 4603         }
 4604 
 4605         case DIOCGETRULESET: {
 4606                 struct pfioc_ruleset    *pr = (struct pfioc_ruleset *)addr;
 4607                 struct pf_kruleset      *ruleset;
 4608                 struct pf_kanchor       *anchor;
 4609                 u_int32_t                nr = 0;
 4610 
 4611                 pr->path[sizeof(pr->path) - 1] = 0;
 4612 
 4613                 PF_RULES_RLOCK();
 4614                 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
 4615                         PF_RULES_RUNLOCK();
 4616                         error = ENOENT;
 4617                         break;
 4618                 }
 4619                 pr->name[0] = 0;
 4620                 if (ruleset->anchor == NULL) {
 4621                         /* XXX kludge for pf_main_ruleset */
 4622                         RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
 4623                                 if (anchor->parent == NULL && nr++ == pr->nr) {
 4624                                         strlcpy(pr->name, anchor->name,
 4625                                             sizeof(pr->name));
 4626                                         break;
 4627                                 }
 4628                 } else {
 4629                         RB_FOREACH(anchor, pf_kanchor_node,
 4630                             &ruleset->anchor->children)
 4631                                 if (nr++ == pr->nr) {
 4632                                         strlcpy(pr->name, anchor->name,
 4633                                             sizeof(pr->name));
 4634                                         break;
 4635                                 }
 4636                 }
 4637                 if (!pr->name[0])
 4638                         error = EBUSY;
 4639                 PF_RULES_RUNLOCK();
 4640                 break;
 4641         }
 4642 
 4643         case DIOCRCLRTABLES: {
 4644                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4645 
 4646                 if (io->pfrio_esize != 0) {
 4647                         error = ENODEV;
 4648                         break;
 4649                 }
 4650                 PF_RULES_WLOCK();
 4651                 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
 4652                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4653                 PF_RULES_WUNLOCK();
 4654                 break;
 4655         }
 4656 
 4657         case DIOCRADDTABLES: {
 4658                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4659                 struct pfr_table *pfrts;
 4660                 size_t totlen;
 4661 
 4662                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
 4663                         error = ENODEV;
 4664                         break;
 4665                 }
 4666 
 4667                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
 4668                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
 4669                         error = ENOMEM;
 4670                         break;
 4671                 }
 4672 
 4673                 totlen = io->pfrio_size * sizeof(struct pfr_table);
 4674                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
 4675                     M_TEMP, M_WAITOK);
 4676                 error = copyin(io->pfrio_buffer, pfrts, totlen);
 4677                 if (error) {
 4678                         free(pfrts, M_TEMP);
 4679                         break;
 4680                 }
 4681                 PF_RULES_WLOCK();
 4682                 error = pfr_add_tables(pfrts, io->pfrio_size,
 4683                     &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4684                 PF_RULES_WUNLOCK();
 4685                 free(pfrts, M_TEMP);
 4686                 break;
 4687         }
 4688 
 4689         case DIOCRDELTABLES: {
 4690                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4691                 struct pfr_table *pfrts;
 4692                 size_t totlen;
 4693 
 4694                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
 4695                         error = ENODEV;
 4696                         break;
 4697                 }
 4698 
 4699                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
 4700                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
 4701                         error = ENOMEM;
 4702                         break;
 4703                 }
 4704 
 4705                 totlen = io->pfrio_size * sizeof(struct pfr_table);
 4706                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
 4707                     M_TEMP, M_WAITOK);
 4708                 error = copyin(io->pfrio_buffer, pfrts, totlen);
 4709                 if (error) {
 4710                         free(pfrts, M_TEMP);
 4711                         break;
 4712                 }
 4713                 PF_RULES_WLOCK();
 4714                 error = pfr_del_tables(pfrts, io->pfrio_size,
 4715                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4716                 PF_RULES_WUNLOCK();
 4717                 free(pfrts, M_TEMP);
 4718                 break;
 4719         }
 4720 
 4721         case DIOCRGETTABLES: {
 4722                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4723                 struct pfr_table *pfrts;
 4724                 size_t totlen;
 4725                 int n;
 4726 
 4727                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
 4728                         error = ENODEV;
 4729                         break;
 4730                 }
 4731                 PF_RULES_RLOCK();
 4732                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
 4733                 if (n < 0) {
 4734                         PF_RULES_RUNLOCK();
 4735                         error = EINVAL;
 4736                         break;
 4737                 }
 4738                 io->pfrio_size = min(io->pfrio_size, n);
 4739 
 4740                 totlen = io->pfrio_size * sizeof(struct pfr_table);
 4741 
 4742                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
 4743                     M_TEMP, M_NOWAIT | M_ZERO);
 4744                 if (pfrts == NULL) {
 4745                         error = ENOMEM;
 4746                         PF_RULES_RUNLOCK();
 4747                         break;
 4748                 }
 4749                 error = pfr_get_tables(&io->pfrio_table, pfrts,
 4750                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4751                 PF_RULES_RUNLOCK();
 4752                 if (error == 0)
 4753                         error = copyout(pfrts, io->pfrio_buffer, totlen);
 4754                 free(pfrts, M_TEMP);
 4755                 break;
 4756         }
 4757 
 4758         case DIOCRGETTSTATS: {
 4759                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4760                 struct pfr_tstats *pfrtstats;
 4761                 size_t totlen;
 4762                 int n;
 4763 
 4764                 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
 4765                         error = ENODEV;
 4766                         break;
 4767                 }
 4768                 PF_TABLE_STATS_LOCK();
 4769                 PF_RULES_RLOCK();
 4770                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
 4771                 if (n < 0) {
 4772                         PF_RULES_RUNLOCK();
 4773                         PF_TABLE_STATS_UNLOCK();
 4774                         error = EINVAL;
 4775                         break;
 4776                 }
 4777                 io->pfrio_size = min(io->pfrio_size, n);
 4778 
 4779                 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
 4780                 pfrtstats = mallocarray(io->pfrio_size,
 4781                     sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
 4782                 if (pfrtstats == NULL) {
 4783                         error = ENOMEM;
 4784                         PF_RULES_RUNLOCK();
 4785                         PF_TABLE_STATS_UNLOCK();
 4786                         break;
 4787                 }
 4788                 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
 4789                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4790                 PF_RULES_RUNLOCK();
 4791                 PF_TABLE_STATS_UNLOCK();
 4792                 if (error == 0)
 4793                         error = copyout(pfrtstats, io->pfrio_buffer, totlen);
 4794                 free(pfrtstats, M_TEMP);
 4795                 break;
 4796         }
 4797 
 4798         case DIOCRCLRTSTATS: {
 4799                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4800                 struct pfr_table *pfrts;
 4801                 size_t totlen;
 4802 
 4803                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
 4804                         error = ENODEV;
 4805                         break;
 4806                 }
 4807 
 4808                 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
 4809                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
 4810                         /* We used to count tables and use the minimum required
 4811                          * size, so we didn't fail on overly large requests.
 4812                          * Keep doing so. */
 4813                         io->pfrio_size = pf_ioctl_maxcount;
 4814                         break;
 4815                 }
 4816 
 4817                 totlen = io->pfrio_size * sizeof(struct pfr_table);
 4818                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
 4819                     M_TEMP, M_WAITOK);
 4820                 error = copyin(io->pfrio_buffer, pfrts, totlen);
 4821                 if (error) {
 4822                         free(pfrts, M_TEMP);
 4823                         break;
 4824                 }
 4825 
 4826                 PF_TABLE_STATS_LOCK();
 4827                 PF_RULES_RLOCK();
 4828                 error = pfr_clr_tstats(pfrts, io->pfrio_size,
 4829                     &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4830                 PF_RULES_RUNLOCK();
 4831                 PF_TABLE_STATS_UNLOCK();
 4832                 free(pfrts, M_TEMP);
 4833                 break;
 4834         }
 4835 
 4836         case DIOCRSETTFLAGS: {
 4837                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4838                 struct pfr_table *pfrts;
 4839                 size_t totlen;
 4840                 int n;
 4841 
 4842                 if (io->pfrio_esize != sizeof(struct pfr_table)) {
 4843                         error = ENODEV;
 4844                         break;
 4845                 }
 4846 
 4847                 PF_RULES_RLOCK();
 4848                 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
 4849                 if (n < 0) {
 4850                         PF_RULES_RUNLOCK();
 4851                         error = EINVAL;
 4852                         break;
 4853                 }
 4854 
 4855                 io->pfrio_size = min(io->pfrio_size, n);
 4856                 PF_RULES_RUNLOCK();
 4857 
 4858                 totlen = io->pfrio_size * sizeof(struct pfr_table);
 4859                 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
 4860                     M_TEMP, M_WAITOK);
 4861                 error = copyin(io->pfrio_buffer, pfrts, totlen);
 4862                 if (error) {
 4863                         free(pfrts, M_TEMP);
 4864                         break;
 4865                 }
 4866                 PF_RULES_WLOCK();
 4867                 error = pfr_set_tflags(pfrts, io->pfrio_size,
 4868                     io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
 4869                     &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4870                 PF_RULES_WUNLOCK();
 4871                 free(pfrts, M_TEMP);
 4872                 break;
 4873         }
 4874 
 4875         case DIOCRCLRADDRS: {
 4876                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4877 
 4878                 if (io->pfrio_esize != 0) {
 4879                         error = ENODEV;
 4880                         break;
 4881                 }
 4882                 PF_RULES_WLOCK();
 4883                 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
 4884                     io->pfrio_flags | PFR_FLAG_USERIOCTL);
 4885                 PF_RULES_WUNLOCK();
 4886                 break;
 4887         }
 4888 
 4889         case DIOCRADDADDRS: {
 4890                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4891                 struct pfr_addr *pfras;
 4892                 size_t totlen;
 4893 
 4894                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 4895                         error = ENODEV;
 4896                         break;
 4897                 }
 4898                 if (io->pfrio_size < 0 ||
 4899                     io->pfrio_size > pf_ioctl_maxcount ||
 4900                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 4901                         error = EINVAL;
 4902                         break;
 4903                 }
 4904                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 4905                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 4906                     M_TEMP, M_WAITOK);
 4907                 error = copyin(io->pfrio_buffer, pfras, totlen);
 4908                 if (error) {
 4909                         free(pfras, M_TEMP);
 4910                         break;
 4911                 }
 4912                 PF_RULES_WLOCK();
 4913                 error = pfr_add_addrs(&io->pfrio_table, pfras,
 4914                     io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
 4915                     PFR_FLAG_USERIOCTL);
 4916                 PF_RULES_WUNLOCK();
 4917                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
 4918                         error = copyout(pfras, io->pfrio_buffer, totlen);
 4919                 free(pfras, M_TEMP);
 4920                 break;
 4921         }
 4922 
 4923         case DIOCRDELADDRS: {
 4924                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4925                 struct pfr_addr *pfras;
 4926                 size_t totlen;
 4927 
 4928                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 4929                         error = ENODEV;
 4930                         break;
 4931                 }
 4932                 if (io->pfrio_size < 0 ||
 4933                     io->pfrio_size > pf_ioctl_maxcount ||
 4934                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 4935                         error = EINVAL;
 4936                         break;
 4937                 }
 4938                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 4939                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 4940                     M_TEMP, M_WAITOK);
 4941                 error = copyin(io->pfrio_buffer, pfras, totlen);
 4942                 if (error) {
 4943                         free(pfras, M_TEMP);
 4944                         break;
 4945                 }
 4946                 PF_RULES_WLOCK();
 4947                 error = pfr_del_addrs(&io->pfrio_table, pfras,
 4948                     io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
 4949                     PFR_FLAG_USERIOCTL);
 4950                 PF_RULES_WUNLOCK();
 4951                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
 4952                         error = copyout(pfras, io->pfrio_buffer, totlen);
 4953                 free(pfras, M_TEMP);
 4954                 break;
 4955         }
 4956 
 4957         case DIOCRSETADDRS: {
 4958                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4959                 struct pfr_addr *pfras;
 4960                 size_t totlen, count;
 4961 
 4962                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 4963                         error = ENODEV;
 4964                         break;
 4965                 }
 4966                 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
 4967                         error = EINVAL;
 4968                         break;
 4969                 }
 4970                 count = max(io->pfrio_size, io->pfrio_size2);
 4971                 if (count > pf_ioctl_maxcount ||
 4972                     WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
 4973                         error = EINVAL;
 4974                         break;
 4975                 }
 4976                 totlen = count * sizeof(struct pfr_addr);
 4977                 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
 4978                     M_WAITOK);
 4979                 error = copyin(io->pfrio_buffer, pfras, totlen);
 4980                 if (error) {
 4981                         free(pfras, M_TEMP);
 4982                         break;
 4983                 }
 4984                 PF_RULES_WLOCK();
 4985                 error = pfr_set_addrs(&io->pfrio_table, pfras,
 4986                     io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
 4987                     &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
 4988                     PFR_FLAG_USERIOCTL, 0);
 4989                 PF_RULES_WUNLOCK();
 4990                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
 4991                         error = copyout(pfras, io->pfrio_buffer, totlen);
 4992                 free(pfras, M_TEMP);
 4993                 break;
 4994         }
 4995 
 4996         case DIOCRGETADDRS: {
 4997                 struct pfioc_table *io = (struct pfioc_table *)addr;
 4998                 struct pfr_addr *pfras;
 4999                 size_t totlen;
 5000 
 5001                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 5002                         error = ENODEV;
 5003                         break;
 5004                 }
 5005                 if (io->pfrio_size < 0 ||
 5006                     io->pfrio_size > pf_ioctl_maxcount ||
 5007                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 5008                         error = EINVAL;
 5009                         break;
 5010                 }
 5011                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 5012                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 5013                     M_TEMP, M_WAITOK | M_ZERO);
 5014                 PF_RULES_RLOCK();
 5015                 error = pfr_get_addrs(&io->pfrio_table, pfras,
 5016                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 5017                 PF_RULES_RUNLOCK();
 5018                 if (error == 0)
 5019                         error = copyout(pfras, io->pfrio_buffer, totlen);
 5020                 free(pfras, M_TEMP);
 5021                 break;
 5022         }
 5023 
 5024         case DIOCRGETASTATS: {
 5025                 struct pfioc_table *io = (struct pfioc_table *)addr;
 5026                 struct pfr_astats *pfrastats;
 5027                 size_t totlen;
 5028 
 5029                 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
 5030                         error = ENODEV;
 5031                         break;
 5032                 }
 5033                 if (io->pfrio_size < 0 ||
 5034                     io->pfrio_size > pf_ioctl_maxcount ||
 5035                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
 5036                         error = EINVAL;
 5037                         break;
 5038                 }
 5039                 totlen = io->pfrio_size * sizeof(struct pfr_astats);
 5040                 pfrastats = mallocarray(io->pfrio_size,
 5041                     sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
 5042                 PF_RULES_RLOCK();
 5043                 error = pfr_get_astats(&io->pfrio_table, pfrastats,
 5044                     &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 5045                 PF_RULES_RUNLOCK();
 5046                 if (error == 0)
 5047                         error = copyout(pfrastats, io->pfrio_buffer, totlen);
 5048                 free(pfrastats, M_TEMP);
 5049                 break;
 5050         }
 5051 
 5052         case DIOCRCLRASTATS: {
 5053                 struct pfioc_table *io = (struct pfioc_table *)addr;
 5054                 struct pfr_addr *pfras;
 5055                 size_t totlen;
 5056 
 5057                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 5058                         error = ENODEV;
 5059                         break;
 5060                 }
 5061                 if (io->pfrio_size < 0 ||
 5062                     io->pfrio_size > pf_ioctl_maxcount ||
 5063                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 5064                         error = EINVAL;
 5065                         break;
 5066                 }
 5067                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 5068                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 5069                     M_TEMP, M_WAITOK);
 5070                 error = copyin(io->pfrio_buffer, pfras, totlen);
 5071                 if (error) {
 5072                         free(pfras, M_TEMP);
 5073                         break;
 5074                 }
 5075                 PF_RULES_WLOCK();
 5076                 error = pfr_clr_astats(&io->pfrio_table, pfras,
 5077                     io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
 5078                     PFR_FLAG_USERIOCTL);
 5079                 PF_RULES_WUNLOCK();
 5080                 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
 5081                         error = copyout(pfras, io->pfrio_buffer, totlen);
 5082                 free(pfras, M_TEMP);
 5083                 break;
 5084         }
 5085 
 5086         case DIOCRTSTADDRS: {
 5087                 struct pfioc_table *io = (struct pfioc_table *)addr;
 5088                 struct pfr_addr *pfras;
 5089                 size_t totlen;
 5090 
 5091                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 5092                         error = ENODEV;
 5093                         break;
 5094                 }
 5095                 if (io->pfrio_size < 0 ||
 5096                     io->pfrio_size > pf_ioctl_maxcount ||
 5097                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 5098                         error = EINVAL;
 5099                         break;
 5100                 }
 5101                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 5102                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 5103                     M_TEMP, M_WAITOK);
 5104                 error = copyin(io->pfrio_buffer, pfras, totlen);
 5105                 if (error) {
 5106                         free(pfras, M_TEMP);
 5107                         break;
 5108                 }
 5109                 PF_RULES_RLOCK();
 5110                 error = pfr_tst_addrs(&io->pfrio_table, pfras,
 5111                     io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
 5112                     PFR_FLAG_USERIOCTL);
 5113                 PF_RULES_RUNLOCK();
 5114                 if (error == 0)
 5115                         error = copyout(pfras, io->pfrio_buffer, totlen);
 5116                 free(pfras, M_TEMP);
 5117                 break;
 5118         }
 5119 
 5120         case DIOCRINADEFINE: {
 5121                 struct pfioc_table *io = (struct pfioc_table *)addr;
 5122                 struct pfr_addr *pfras;
 5123                 size_t totlen;
 5124 
 5125                 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
 5126                         error = ENODEV;
 5127                         break;
 5128                 }
 5129                 if (io->pfrio_size < 0 ||
 5130                     io->pfrio_size > pf_ioctl_maxcount ||
 5131                     WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
 5132                         error = EINVAL;
 5133                         break;
 5134                 }
 5135                 totlen = io->pfrio_size * sizeof(struct pfr_addr);
 5136                 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
 5137                     M_TEMP, M_WAITOK);
 5138                 error = copyin(io->pfrio_buffer, pfras, totlen);
 5139                 if (error) {
 5140                         free(pfras, M_TEMP);
 5141                         break;
 5142                 }
 5143                 PF_RULES_WLOCK();
 5144                 error = pfr_ina_define(&io->pfrio_table, pfras,
 5145                     io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
 5146                     io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
 5147                 PF_RULES_WUNLOCK();
 5148                 free(pfras, M_TEMP);
 5149                 break;
 5150         }
 5151 
 5152         case DIOCOSFPADD: {
 5153                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
 5154                 PF_RULES_WLOCK();
 5155                 error = pf_osfp_add(io);
 5156                 PF_RULES_WUNLOCK();
 5157                 break;
 5158         }
 5159 
 5160         case DIOCOSFPGET: {
 5161                 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
 5162                 PF_RULES_RLOCK();
 5163                 error = pf_osfp_get(io);
 5164                 PF_RULES_RUNLOCK();
 5165                 break;
 5166         }
 5167 
 5168         case DIOCXBEGIN: {
 5169                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
 5170                 struct pfioc_trans_e    *ioes, *ioe;
 5171                 size_t                   totlen;
 5172                 int                      i;
 5173 
 5174                 if (io->esize != sizeof(*ioe)) {
 5175                         error = ENODEV;
 5176                         break;
 5177                 }
 5178                 if (io->size < 0 ||
 5179                     io->size > pf_ioctl_maxcount ||
 5180                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
 5181                         error = EINVAL;
 5182                         break;
 5183                 }
 5184                 totlen = sizeof(struct pfioc_trans_e) * io->size;
 5185                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
 5186                     M_TEMP, M_WAITOK);
 5187                 error = copyin(io->array, ioes, totlen);
 5188                 if (error) {
 5189                         free(ioes, M_TEMP);
 5190                         break;
 5191                 }
 5192                 /* Ensure there's no more ethernet rules to clean up. */
 5193                 NET_EPOCH_DRAIN_CALLBACKS();
 5194                 PF_RULES_WLOCK();
 5195                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
 5196                         ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
 5197                         switch (ioe->rs_num) {
 5198                         case PF_RULESET_ETH:
 5199                                 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
 5200                                         PF_RULES_WUNLOCK();
 5201                                         free(ioes, M_TEMP);
 5202                                         goto fail;
 5203                                 }
 5204                                 break;
 5205 #ifdef ALTQ
 5206                         case PF_RULESET_ALTQ:
 5207                                 if (ioe->anchor[0]) {
 5208                                         PF_RULES_WUNLOCK();
 5209                                         free(ioes, M_TEMP);
 5210                                         error = EINVAL;
 5211                                         goto fail;
 5212                                 }
 5213                                 if ((error = pf_begin_altq(&ioe->ticket))) {
 5214                                         PF_RULES_WUNLOCK();
 5215                                         free(ioes, M_TEMP);
 5216                                         goto fail;
 5217                                 }
 5218                                 break;
 5219 #endif /* ALTQ */
 5220                         case PF_RULESET_TABLE:
 5221                             {
 5222                                 struct pfr_table table;
 5223 
 5224                                 bzero(&table, sizeof(table));
 5225                                 strlcpy(table.pfrt_anchor, ioe->anchor,
 5226                                     sizeof(table.pfrt_anchor));
 5227                                 if ((error = pfr_ina_begin(&table,
 5228                                     &ioe->ticket, NULL, 0))) {
 5229                                         PF_RULES_WUNLOCK();
 5230                                         free(ioes, M_TEMP);
 5231                                         goto fail;
 5232                                 }
 5233                                 break;
 5234                             }
 5235                         default:
 5236                                 if ((error = pf_begin_rules(&ioe->ticket,
 5237                                     ioe->rs_num, ioe->anchor))) {
 5238                                         PF_RULES_WUNLOCK();
 5239                                         free(ioes, M_TEMP);
 5240                                         goto fail;
 5241                                 }
 5242                                 break;
 5243                         }
 5244                 }
 5245                 PF_RULES_WUNLOCK();
 5246                 error = copyout(ioes, io->array, totlen);
 5247                 free(ioes, M_TEMP);
 5248                 break;
 5249         }
 5250 
 5251         case DIOCXROLLBACK: {
 5252                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
 5253                 struct pfioc_trans_e    *ioe, *ioes;
 5254                 size_t                   totlen;
 5255                 int                      i;
 5256 
 5257                 if (io->esize != sizeof(*ioe)) {
 5258                         error = ENODEV;
 5259                         break;
 5260                 }
 5261                 if (io->size < 0 ||
 5262                     io->size > pf_ioctl_maxcount ||
 5263                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
 5264                         error = EINVAL;
 5265                         break;
 5266                 }
 5267                 totlen = sizeof(struct pfioc_trans_e) * io->size;
 5268                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
 5269                     M_TEMP, M_WAITOK);
 5270                 error = copyin(io->array, ioes, totlen);
 5271                 if (error) {
 5272                         free(ioes, M_TEMP);
 5273                         break;
 5274                 }
 5275                 PF_RULES_WLOCK();
 5276                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
 5277                         ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
 5278                         switch (ioe->rs_num) {
 5279                         case PF_RULESET_ETH:
 5280                                 if ((error = pf_rollback_eth(ioe->ticket,
 5281                                     ioe->anchor))) {
 5282                                         PF_RULES_WUNLOCK();
 5283                                         free(ioes, M_TEMP);
 5284                                         goto fail; /* really bad */
 5285                                 }
 5286                                 break;
 5287 #ifdef ALTQ
 5288                         case PF_RULESET_ALTQ:
 5289                                 if (ioe->anchor[0]) {
 5290                                         PF_RULES_WUNLOCK();
 5291                                         free(ioes, M_TEMP);
 5292                                         error = EINVAL;
 5293                                         goto fail;
 5294                                 }
 5295                                 if ((error = pf_rollback_altq(ioe->ticket))) {
 5296                                         PF_RULES_WUNLOCK();
 5297                                         free(ioes, M_TEMP);
 5298                                         goto fail; /* really bad */
 5299                                 }
 5300                                 break;
 5301 #endif /* ALTQ */
 5302                         case PF_RULESET_TABLE:
 5303                             {
 5304                                 struct pfr_table table;
 5305 
 5306                                 bzero(&table, sizeof(table));
 5307                                 strlcpy(table.pfrt_anchor, ioe->anchor,
 5308                                     sizeof(table.pfrt_anchor));
 5309                                 if ((error = pfr_ina_rollback(&table,
 5310                                     ioe->ticket, NULL, 0))) {
 5311                                         PF_RULES_WUNLOCK();
 5312                                         free(ioes, M_TEMP);
 5313                                         goto fail; /* really bad */
 5314                                 }
 5315                                 break;
 5316                             }
 5317                         default:
 5318                                 if ((error = pf_rollback_rules(ioe->ticket,
 5319                                     ioe->rs_num, ioe->anchor))) {
 5320                                         PF_RULES_WUNLOCK();
 5321                                         free(ioes, M_TEMP);
 5322                                         goto fail; /* really bad */
 5323                                 }
 5324                                 break;
 5325                         }
 5326                 }
 5327                 PF_RULES_WUNLOCK();
 5328                 free(ioes, M_TEMP);
 5329                 break;
 5330         }
 5331 
 5332         case DIOCXCOMMIT: {
 5333                 struct pfioc_trans      *io = (struct pfioc_trans *)addr;
 5334                 struct pfioc_trans_e    *ioe, *ioes;
 5335                 struct pf_kruleset      *rs;
 5336                 struct pf_keth_ruleset  *ers;
 5337                 size_t                   totlen;
 5338                 int                      i;
 5339 
 5340                 if (io->esize != sizeof(*ioe)) {
 5341                         error = ENODEV;
 5342                         break;
 5343                 }
 5344 
 5345                 if (io->size < 0 ||
 5346                     io->size > pf_ioctl_maxcount ||
 5347                     WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
 5348                         error = EINVAL;
 5349                         break;
 5350                 }
 5351 
 5352                 totlen = sizeof(struct pfioc_trans_e) * io->size;
 5353                 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
 5354                     M_TEMP, M_WAITOK);
 5355                 error = copyin(io->array, ioes, totlen);
 5356                 if (error) {
 5357                         free(ioes, M_TEMP);
 5358                         break;
 5359                 }
 5360                 PF_RULES_WLOCK();
 5361                 /* First makes sure everything will succeed. */
 5362                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
 5363                         ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
 5364                         switch (ioe->rs_num) {
 5365                         case PF_RULESET_ETH:
 5366                                 ers = pf_find_keth_ruleset(ioe->anchor);
 5367                                 if (ers == NULL || ioe->ticket == 0 ||
 5368                                     ioe->ticket != ers->inactive.ticket) {
 5369                                         PF_RULES_WUNLOCK();
 5370                                         free(ioes, M_TEMP);
 5371                                         error = EINVAL;
 5372                                         goto fail;
 5373                                 }
 5374                                 break;
 5375 #ifdef ALTQ
 5376                         case PF_RULESET_ALTQ:
 5377                                 if (ioe->anchor[0]) {
 5378                                         PF_RULES_WUNLOCK();
 5379                                         free(ioes, M_TEMP);
 5380                                         error = EINVAL;
 5381                                         goto fail;
 5382                                 }
 5383                                 if (!V_altqs_inactive_open || ioe->ticket !=
 5384                                     V_ticket_altqs_inactive) {
 5385                                         PF_RULES_WUNLOCK();
 5386                                         free(ioes, M_TEMP);
 5387                                         error = EBUSY;
 5388                                         goto fail;
 5389                                 }
 5390                                 break;
 5391 #endif /* ALTQ */
 5392                         case PF_RULESET_TABLE:
 5393                                 rs = pf_find_kruleset(ioe->anchor);
 5394                                 if (rs == NULL || !rs->topen || ioe->ticket !=
 5395                                     rs->tticket) {
 5396                                         PF_RULES_WUNLOCK();
 5397                                         free(ioes, M_TEMP);
 5398                                         error = EBUSY;
 5399                                         goto fail;
 5400                                 }
 5401                                 break;
 5402                         default:
 5403                                 if (ioe->rs_num < 0 || ioe->rs_num >=
 5404                                     PF_RULESET_MAX) {
 5405                                         PF_RULES_WUNLOCK();
 5406                                         free(ioes, M_TEMP);
 5407                                         error = EINVAL;
 5408                                         goto fail;
 5409                                 }
 5410                                 rs = pf_find_kruleset(ioe->anchor);
 5411                                 if (rs == NULL ||
 5412                                     !rs->rules[ioe->rs_num].inactive.open ||
 5413                                     rs->rules[ioe->rs_num].inactive.ticket !=
 5414                                     ioe->ticket) {
 5415                                         PF_RULES_WUNLOCK();
 5416                                         free(ioes, M_TEMP);
 5417                                         error = EBUSY;
 5418                                         goto fail;
 5419                                 }
 5420                                 break;
 5421                         }
 5422                 }
 5423                 /* Now do the commit - no errors should happen here. */
 5424                 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
 5425                         switch (ioe->rs_num) {
 5426                         case PF_RULESET_ETH:
 5427                                 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
 5428                                         PF_RULES_WUNLOCK();
 5429                                         free(ioes, M_TEMP);
 5430                                         goto fail; /* really bad */
 5431                                 }
 5432                                 break;
 5433 #ifdef ALTQ
 5434                         case PF_RULESET_ALTQ:
 5435                                 if ((error = pf_commit_altq(ioe->ticket))) {
 5436                                         PF_RULES_WUNLOCK();
 5437                                         free(ioes, M_TEMP);
 5438                                         goto fail; /* really bad */
 5439                                 }
 5440                                 break;
 5441 #endif /* ALTQ */
 5442                         case PF_RULESET_TABLE:
 5443                             {
 5444                                 struct pfr_table table;
 5445 
 5446                                 bzero(&table, sizeof(table));
 5447                                 (void)strlcpy(table.pfrt_anchor, ioe->anchor,
 5448                                     sizeof(table.pfrt_anchor));
 5449                                 if ((error = pfr_ina_commit(&table,
 5450                                     ioe->ticket, NULL, NULL, 0))) {
 5451                                         PF_RULES_WUNLOCK();
 5452                                         free(ioes, M_TEMP);
 5453                                         goto fail; /* really bad */
 5454                                 }
 5455                                 break;
 5456                             }
 5457                         default:
 5458                                 if ((error = pf_commit_rules(ioe->ticket,
 5459                                     ioe->rs_num, ioe->anchor))) {
 5460                                         PF_RULES_WUNLOCK();
 5461                                         free(ioes, M_TEMP);
 5462                                         goto fail; /* really bad */
 5463                                 }
 5464                                 break;
 5465                         }
 5466                 }
 5467                 PF_RULES_WUNLOCK();
 5468 
 5469                 /* Only hook into EtherNet taffic if we've got rules for it. */
 5470                 if (! TAILQ_EMPTY(V_pf_keth->active.rules))
 5471                         hook_pf_eth();
 5472                 else
 5473                         dehook_pf_eth();
 5474 
 5475                 free(ioes, M_TEMP);
 5476                 break;
 5477         }
 5478 
 5479         case DIOCGETSRCNODES: {
 5480                 struct pfioc_src_nodes  *psn = (struct pfioc_src_nodes *)addr;
 5481                 struct pf_srchash       *sh;
 5482                 struct pf_ksrc_node     *n;
 5483                 struct pf_src_node      *p, *pstore;
 5484                 uint32_t                 i, nr = 0;
 5485 
 5486                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
 5487                                 i++, sh++) {
 5488                         PF_HASHROW_LOCK(sh);
 5489                         LIST_FOREACH(n, &sh->nodes, entry)
 5490                                 nr++;
 5491                         PF_HASHROW_UNLOCK(sh);
 5492                 }
 5493 
 5494                 psn->psn_len = min(psn->psn_len,
 5495                     sizeof(struct pf_src_node) * nr);
 5496 
 5497                 if (psn->psn_len == 0) {
 5498                         psn->psn_len = sizeof(struct pf_src_node) * nr;
 5499                         break;
 5500                 }
 5501 
 5502                 nr = 0;
 5503 
 5504                 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
 5505                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
 5506                     i++, sh++) {
 5507                     PF_HASHROW_LOCK(sh);
 5508                     LIST_FOREACH(n, &sh->nodes, entry) {
 5509 
 5510                         if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
 5511                                 break;
 5512 
 5513                         pf_src_node_copy(n, p);
 5514 
 5515                         p++;
 5516                         nr++;
 5517                     }
 5518                     PF_HASHROW_UNLOCK(sh);
 5519                 }
 5520                 error = copyout(pstore, psn->psn_src_nodes,
 5521                     sizeof(struct pf_src_node) * nr);
 5522                 if (error) {
 5523                         free(pstore, M_TEMP);
 5524                         break;
 5525                 }
 5526                 psn->psn_len = sizeof(struct pf_src_node) * nr;
 5527                 free(pstore, M_TEMP);
 5528                 break;
 5529         }
 5530 
 5531         case DIOCCLRSRCNODES: {
 5532                 pf_clear_srcnodes(NULL);
 5533                 pf_purge_expired_src_nodes();
 5534                 break;
 5535         }
 5536 
 5537         case DIOCKILLSRCNODES:
 5538                 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
 5539                 break;
 5540 
 5541 #ifdef COMPAT_FREEBSD13
 5542         case DIOCKEEPCOUNTERS_FREEBSD13:
 5543 #endif
 5544         case DIOCKEEPCOUNTERS:
 5545                 error = pf_keepcounters((struct pfioc_nv *)addr);
 5546                 break;
 5547 
 5548         case DIOCGETSYNCOOKIES:
 5549                 error = pf_get_syncookies((struct pfioc_nv *)addr);
 5550                 break;
 5551 
 5552         case DIOCSETSYNCOOKIES:
 5553                 error = pf_set_syncookies((struct pfioc_nv *)addr);
 5554                 break;
 5555 
 5556         case DIOCSETHOSTID: {
 5557                 u_int32_t       *hostid = (u_int32_t *)addr;
 5558 
 5559                 PF_RULES_WLOCK();
 5560                 if (*hostid == 0)
 5561                         V_pf_status.hostid = arc4random();
 5562                 else
 5563                         V_pf_status.hostid = *hostid;
 5564                 PF_RULES_WUNLOCK();
 5565                 break;
 5566         }
 5567 
 5568         case DIOCOSFPFLUSH:
 5569                 PF_RULES_WLOCK();
 5570                 pf_osfp_flush();
 5571                 PF_RULES_WUNLOCK();
 5572                 break;
 5573 
 5574         case DIOCIGETIFACES: {
 5575                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
 5576                 struct pfi_kif *ifstore;
 5577                 size_t bufsiz;
 5578 
 5579                 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
 5580                         error = ENODEV;
 5581                         break;
 5582                 }
 5583 
 5584                 if (io->pfiio_size < 0 ||
 5585                     io->pfiio_size > pf_ioctl_maxcount ||
 5586                     WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
 5587                         error = EINVAL;
 5588                         break;
 5589                 }
 5590 
 5591                 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
 5592 
 5593                 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
 5594                 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
 5595                     M_TEMP, M_WAITOK | M_ZERO);
 5596 
 5597                 PF_RULES_RLOCK();
 5598                 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
 5599                 PF_RULES_RUNLOCK();
 5600                 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
 5601                 free(ifstore, M_TEMP);
 5602                 break;
 5603         }
 5604 
 5605         case DIOCSETIFFLAG: {
 5606                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
 5607 
 5608                 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
 5609 
 5610                 PF_RULES_WLOCK();
 5611                 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
 5612                 PF_RULES_WUNLOCK();
 5613                 break;
 5614         }
 5615 
 5616         case DIOCCLRIFFLAG: {
 5617                 struct pfioc_iface *io = (struct pfioc_iface *)addr;
 5618 
 5619                 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
 5620 
 5621                 PF_RULES_WLOCK();
 5622                 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
 5623                 PF_RULES_WUNLOCK();
 5624                 break;
 5625         }
 5626 
 5627         default:
 5628                 error = ENODEV;
 5629                 break;
 5630         }
 5631 fail:
 5632         if (sx_xlocked(&pf_ioctl_lock))
 5633                 sx_xunlock(&pf_ioctl_lock);
 5634         CURVNET_RESTORE();
 5635 
 5636 #undef ERROUT_IOCTL
 5637 
 5638         return (error);
 5639 }
 5640 
 5641 void
 5642 pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st)
 5643 {
 5644         bzero(sp, sizeof(struct pfsync_state));
 5645 
 5646         /* copy from state key */
 5647         sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
 5648         sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
 5649         sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
 5650         sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
 5651         sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
 5652         sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
 5653         sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
 5654         sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
 5655         sp->proto = st->key[PF_SK_WIRE]->proto;
 5656         sp->af = st->key[PF_SK_WIRE]->af;
 5657 
 5658         /* copy from state */
 5659         strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
 5660         bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
 5661         sp->creation = htonl(time_uptime - st->creation);
 5662         sp->expire = pf_state_expires(st);
 5663         if (sp->expire <= time_uptime)
 5664                 sp->expire = htonl(0);
 5665         else
 5666                 sp->expire = htonl(sp->expire - time_uptime);
 5667 
 5668         sp->direction = st->direction;
 5669         sp->log = st->log;
 5670         sp->timeout = st->timeout;
 5671         sp->state_flags = st->state_flags;
 5672         if (st->src_node)
 5673                 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
 5674         if (st->nat_src_node)
 5675                 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
 5676 
 5677         sp->id = st->id;
 5678         sp->creatorid = st->creatorid;
 5679         pf_state_peer_hton(&st->src, &sp->src);
 5680         pf_state_peer_hton(&st->dst, &sp->dst);
 5681 
 5682         if (st->rule.ptr == NULL)
 5683                 sp->rule = htonl(-1);
 5684         else
 5685                 sp->rule = htonl(st->rule.ptr->nr);
 5686         if (st->anchor.ptr == NULL)
 5687                 sp->anchor = htonl(-1);
 5688         else
 5689                 sp->anchor = htonl(st->anchor.ptr->nr);
 5690         if (st->nat_rule.ptr == NULL)
 5691                 sp->nat_rule = htonl(-1);
 5692         else
 5693                 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
 5694 
 5695         pf_state_counter_hton(st->packets[0], sp->packets[0]);
 5696         pf_state_counter_hton(st->packets[1], sp->packets[1]);
 5697         pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
 5698         pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
 5699 }
 5700 
 5701 void
 5702 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
 5703 {
 5704         bzero(sp, sizeof(*sp));
 5705 
 5706         sp->version = PF_STATE_VERSION;
 5707 
 5708         /* copy from state key */
 5709         sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
 5710         sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
 5711         sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
 5712         sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
 5713         sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
 5714         sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
 5715         sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
 5716         sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
 5717         sp->proto = st->key[PF_SK_WIRE]->proto;
 5718         sp->af = st->key[PF_SK_WIRE]->af;
 5719 
 5720         /* copy from state */
 5721         strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
 5722         strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
 5723             sizeof(sp->orig_ifname));
 5724         bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
 5725         sp->creation = htonl(time_uptime - st->creation);
 5726         sp->expire = pf_state_expires(st);
 5727         if (sp->expire <= time_uptime)
 5728                 sp->expire = htonl(0);
 5729         else
 5730                 sp->expire = htonl(sp->expire - time_uptime);
 5731 
 5732         sp->direction = st->direction;
 5733         sp->log = st->log;
 5734         sp->timeout = st->timeout;
 5735         sp->state_flags = st->state_flags;
 5736         if (st->src_node)
 5737                 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
 5738         if (st->nat_src_node)
 5739                 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
 5740 
 5741         sp->id = st->id;
 5742         sp->creatorid = st->creatorid;
 5743         pf_state_peer_hton(&st->src, &sp->src);
 5744         pf_state_peer_hton(&st->dst, &sp->dst);
 5745 
 5746         if (st->rule.ptr == NULL)
 5747                 sp->rule = htonl(-1);
 5748         else
 5749                 sp->rule = htonl(st->rule.ptr->nr);
 5750         if (st->anchor.ptr == NULL)
 5751                 sp->anchor = htonl(-1);
 5752         else
 5753                 sp->anchor = htonl(st->anchor.ptr->nr);
 5754         if (st->nat_rule.ptr == NULL)
 5755                 sp->nat_rule = htonl(-1);
 5756         else
 5757                 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
 5758 
 5759         sp->packets[0] = st->packets[0];
 5760         sp->packets[1] = st->packets[1];
 5761         sp->bytes[0] = st->bytes[0];
 5762         sp->bytes[1] = st->bytes[1];
 5763 }
 5764 
 5765 static void
 5766 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
 5767 {
 5768         struct pfr_ktable *kt;
 5769 
 5770         KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
 5771 
 5772         kt = aw->p.tbl;
 5773         if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
 5774                 kt = kt->pfrkt_root;
 5775         aw->p.tbl = NULL;
 5776         aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
 5777                 kt->pfrkt_cnt : -1;
 5778 }
 5779 
 5780 static int
 5781 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
 5782     size_t number, char **names)
 5783 {
 5784         nvlist_t        *nvc;
 5785 
 5786         nvc = nvlist_create(0);
 5787         if (nvc == NULL)
 5788                 return (ENOMEM);
 5789 
 5790         for (int i = 0; i < number; i++) {
 5791                 nvlist_append_number_array(nvc, "counters",
 5792                     counter_u64_fetch(counters[i]));
 5793                 nvlist_append_string_array(nvc, "names",
 5794                     names[i]);
 5795                 nvlist_append_number_array(nvc, "ids",
 5796                     i);
 5797         }
 5798         nvlist_add_nvlist(nvl, name, nvc);
 5799         nvlist_destroy(nvc);
 5800 
 5801         return (0);
 5802 }
 5803 
 5804 static int
 5805 pf_getstatus(struct pfioc_nv *nv)
 5806 {
 5807         nvlist_t        *nvl = NULL, *nvc = NULL;
 5808         void            *nvlpacked = NULL;
 5809         int              error;
 5810         struct pf_status s;
 5811         char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
 5812         char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
 5813         char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
 5814         PF_RULES_RLOCK_TRACKER;
 5815 
 5816 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
 5817 
 5818         PF_RULES_RLOCK();
 5819 
 5820         nvl = nvlist_create(0);
 5821         if (nvl == NULL)
 5822                 ERROUT(ENOMEM);
 5823 
 5824         nvlist_add_bool(nvl, "running", V_pf_status.running);
 5825         nvlist_add_number(nvl, "since", V_pf_status.since);
 5826         nvlist_add_number(nvl, "debug", V_pf_status.debug);
 5827         nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
 5828         nvlist_add_number(nvl, "states", V_pf_status.states);
 5829         nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
 5830         nvlist_add_bool(nvl, "syncookies_active",
 5831             V_pf_status.syncookies_active);
 5832 
 5833         /* counters */
 5834         error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
 5835             PFRES_MAX, pf_reasons);
 5836         if (error != 0)
 5837                 ERROUT(error);
 5838 
 5839         /* lcounters */
 5840         error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
 5841             KLCNT_MAX, pf_lcounter);
 5842         if (error != 0)
 5843                 ERROUT(error);
 5844 
 5845         /* fcounters */
 5846         nvc = nvlist_create(0);
 5847         if (nvc == NULL)
 5848                 ERROUT(ENOMEM);
 5849 
 5850         for (int i = 0; i < FCNT_MAX; i++) {
 5851                 nvlist_append_number_array(nvc, "counters",
 5852                     pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
 5853                 nvlist_append_string_array(nvc, "names",
 5854                     pf_fcounter[i]);
 5855                 nvlist_append_number_array(nvc, "ids",
 5856                     i);
 5857         }
 5858         nvlist_add_nvlist(nvl, "fcounters", nvc);
 5859         nvlist_destroy(nvc);
 5860         nvc = NULL;
 5861 
 5862         /* scounters */
 5863         error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
 5864             SCNT_MAX, pf_fcounter);
 5865         if (error != 0)
 5866                 ERROUT(error);
 5867 
 5868         nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
 5869         nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
 5870             PF_MD5_DIGEST_LENGTH);
 5871 
 5872         pfi_update_status(V_pf_status.ifname, &s);
 5873 
 5874         /* pcounters / bcounters */
 5875         for (int i = 0; i < 2; i++) {
 5876                 for (int j = 0; j < 2; j++) {
 5877                         for (int k = 0; k < 2; k++) {
 5878                                 nvlist_append_number_array(nvl, "pcounters",
 5879                                     s.pcounters[i][j][k]);
 5880                         }
 5881                         nvlist_append_number_array(nvl, "bcounters",
 5882                             s.bcounters[i][j]);
 5883                 }
 5884         }
 5885 
 5886         nvlpacked = nvlist_pack(nvl, &nv->len);
 5887         if (nvlpacked == NULL)
 5888                 ERROUT(ENOMEM);
 5889 
 5890         if (nv->size == 0)
 5891                 ERROUT(0);
 5892         else if (nv->size < nv->len)
 5893                 ERROUT(ENOSPC);
 5894 
 5895         PF_RULES_RUNLOCK();
 5896         error = copyout(nvlpacked, nv->data, nv->len);
 5897         goto done;
 5898 
 5899 #undef ERROUT
 5900 errout:
 5901         PF_RULES_RUNLOCK();
 5902 done:
 5903         free(nvlpacked, M_NVLIST);
 5904         nvlist_destroy(nvc);
 5905         nvlist_destroy(nvl);
 5906 
 5907         return (error);
 5908 }
 5909 
 5910 /*
 5911  * XXX - Check for version mismatch!!!
 5912  */
 5913 static void
 5914 pf_clear_all_states(void)
 5915 {
 5916         struct pf_kstate        *s;
 5917         u_int i;
 5918 
 5919         for (i = 0; i <= pf_hashmask; i++) {
 5920                 struct pf_idhash *ih = &V_pf_idhash[i];
 5921 relock:
 5922                 PF_HASHROW_LOCK(ih);
 5923                 LIST_FOREACH(s, &ih->states, entry) {
 5924                         s->timeout = PFTM_PURGE;
 5925                         /* Don't send out individual delete messages. */
 5926                         s->state_flags |= PFSTATE_NOSYNC;
 5927                         pf_unlink_state(s);
 5928                         goto relock;
 5929                 }
 5930                 PF_HASHROW_UNLOCK(ih);
 5931         }
 5932 }
 5933 
 5934 static int
 5935 pf_clear_tables(void)
 5936 {
 5937         struct pfioc_table io;
 5938         int error;
 5939 
 5940         bzero(&io, sizeof(io));
 5941 
 5942         error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
 5943             io.pfrio_flags);
 5944 
 5945         return (error);
 5946 }
 5947 
 5948 static void
 5949 pf_clear_srcnodes(struct pf_ksrc_node *n)
 5950 {
 5951         struct pf_kstate *s;
 5952         int i;
 5953 
 5954         for (i = 0; i <= pf_hashmask; i++) {
 5955                 struct pf_idhash *ih = &V_pf_idhash[i];
 5956 
 5957                 PF_HASHROW_LOCK(ih);
 5958                 LIST_FOREACH(s, &ih->states, entry) {
 5959                         if (n == NULL || n == s->src_node)
 5960                                 s->src_node = NULL;
 5961                         if (n == NULL || n == s->nat_src_node)
 5962                                 s->nat_src_node = NULL;
 5963                 }
 5964                 PF_HASHROW_UNLOCK(ih);
 5965         }
 5966 
 5967         if (n == NULL) {
 5968                 struct pf_srchash *sh;
 5969 
 5970                 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
 5971                     i++, sh++) {
 5972                         PF_HASHROW_LOCK(sh);
 5973                         LIST_FOREACH(n, &sh->nodes, entry) {
 5974                                 n->expire = 1;
 5975                                 n->states = 0;
 5976                         }
 5977                         PF_HASHROW_UNLOCK(sh);
 5978                 }
 5979         } else {
 5980                 /* XXX: hash slot should already be locked here. */
 5981                 n->expire = 1;
 5982                 n->states = 0;
 5983         }
 5984 }
 5985 
 5986 static void
 5987 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
 5988 {
 5989         struct pf_ksrc_node_list         kill;
 5990 
 5991         LIST_INIT(&kill);
 5992         for (int i = 0; i <= pf_srchashmask; i++) {
 5993                 struct pf_srchash *sh = &V_pf_srchash[i];
 5994                 struct pf_ksrc_node *sn, *tmp;
 5995 
 5996                 PF_HASHROW_LOCK(sh);
 5997                 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
 5998                         if (PF_MATCHA(psnk->psnk_src.neg,
 5999                               &psnk->psnk_src.addr.v.a.addr,
 6000                               &psnk->psnk_src.addr.v.a.mask,
 6001                               &sn->addr, sn->af) &&
 6002                             PF_MATCHA(psnk->psnk_dst.neg,
 6003                               &psnk->psnk_dst.addr.v.a.addr,
 6004                               &psnk->psnk_dst.addr.v.a.mask,
 6005                               &sn->raddr, sn->af)) {
 6006                                 pf_unlink_src_node(sn);
 6007                                 LIST_INSERT_HEAD(&kill, sn, entry);
 6008                                 sn->expire = 1;
 6009                         }
 6010                 PF_HASHROW_UNLOCK(sh);
 6011         }
 6012 
 6013         for (int i = 0; i <= pf_hashmask; i++) {
 6014                 struct pf_idhash *ih = &V_pf_idhash[i];
 6015                 struct pf_kstate *s;
 6016 
 6017                 PF_HASHROW_LOCK(ih);
 6018                 LIST_FOREACH(s, &ih->states, entry) {
 6019                         if (s->src_node && s->src_node->expire == 1)
 6020                                 s->src_node = NULL;
 6021                         if (s->nat_src_node && s->nat_src_node->expire == 1)
 6022                                 s->nat_src_node = NULL;
 6023                 }
 6024                 PF_HASHROW_UNLOCK(ih);
 6025         }
 6026 
 6027         psnk->psnk_killed = pf_free_src_nodes(&kill);
 6028 }
 6029 
 6030 static int
 6031 pf_keepcounters(struct pfioc_nv *nv)
 6032 {
 6033         nvlist_t        *nvl = NULL;
 6034         void            *nvlpacked = NULL;
 6035         int              error = 0;
 6036 
 6037 #define ERROUT(x)       ERROUT_FUNCTION(on_error, x)
 6038 
 6039         if (nv->len > pf_ioctl_maxcount)
 6040                 ERROUT(ENOMEM);
 6041 
 6042         nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 6043         if (nvlpacked == NULL)
 6044                 ERROUT(ENOMEM);
 6045 
 6046         error = copyin(nv->data, nvlpacked, nv->len);
 6047         if (error)
 6048                 ERROUT(error);
 6049 
 6050         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 6051         if (nvl == NULL)
 6052                 ERROUT(EBADMSG);
 6053 
 6054         if (! nvlist_exists_bool(nvl, "keep_counters"))
 6055                 ERROUT(EBADMSG);
 6056 
 6057         V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
 6058 
 6059 on_error:
 6060         nvlist_destroy(nvl);
 6061         free(nvlpacked, M_NVLIST);
 6062         return (error);
 6063 }
 6064 
 6065 static unsigned int
 6066 pf_clear_states(const struct pf_kstate_kill *kill)
 6067 {
 6068         struct pf_state_key_cmp  match_key;
 6069         struct pf_kstate        *s;
 6070         struct pfi_kkif *kif;
 6071         int              idx;
 6072         unsigned int     killed = 0, dir;
 6073 
 6074         for (unsigned int i = 0; i <= pf_hashmask; i++) {
 6075                 struct pf_idhash *ih = &V_pf_idhash[i];
 6076 
 6077 relock_DIOCCLRSTATES:
 6078                 PF_HASHROW_LOCK(ih);
 6079                 LIST_FOREACH(s, &ih->states, entry) {
 6080                         /* For floating states look at the original kif. */
 6081                         kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
 6082 
 6083                         if (kill->psk_ifname[0] &&
 6084                             strcmp(kill->psk_ifname,
 6085                             kif->pfik_name))
 6086                                 continue;
 6087 
 6088                         if (kill->psk_kill_match) {
 6089                                 bzero(&match_key, sizeof(match_key));
 6090 
 6091                                 if (s->direction == PF_OUT) {
 6092                                         dir = PF_IN;
 6093                                         idx = PF_SK_STACK;
 6094                                 } else {
 6095                                         dir = PF_OUT;
 6096                                         idx = PF_SK_WIRE;
 6097                                 }
 6098 
 6099                                 match_key.af = s->key[idx]->af;
 6100                                 match_key.proto = s->key[idx]->proto;
 6101                                 PF_ACPY(&match_key.addr[0],
 6102                                     &s->key[idx]->addr[1], match_key.af);
 6103                                 match_key.port[0] = s->key[idx]->port[1];
 6104                                 PF_ACPY(&match_key.addr[1],
 6105                                     &s->key[idx]->addr[0], match_key.af);
 6106                                 match_key.port[1] = s->key[idx]->port[0];
 6107                         }
 6108 
 6109                         /*
 6110                          * Don't send out individual
 6111                          * delete messages.
 6112                          */
 6113                         s->state_flags |= PFSTATE_NOSYNC;
 6114                         pf_unlink_state(s);
 6115                         killed++;
 6116 
 6117                         if (kill->psk_kill_match)
 6118                                 killed += pf_kill_matching_state(&match_key,
 6119                                     dir);
 6120 
 6121                         goto relock_DIOCCLRSTATES;
 6122                 }
 6123                 PF_HASHROW_UNLOCK(ih);
 6124         }
 6125 
 6126         if (V_pfsync_clear_states_ptr != NULL)
 6127                 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
 6128 
 6129         return (killed);
 6130 }
 6131 
 6132 static void
 6133 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
 6134 {
 6135         struct pf_kstate        *s;
 6136 
 6137         if (kill->psk_pfcmp.id) {
 6138                 if (kill->psk_pfcmp.creatorid == 0)
 6139                         kill->psk_pfcmp.creatorid = V_pf_status.hostid;
 6140                 if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
 6141                     kill->psk_pfcmp.creatorid))) {
 6142                         pf_unlink_state(s);
 6143                         *killed = 1;
 6144                 }
 6145                 return;
 6146         }
 6147 
 6148         for (unsigned int i = 0; i <= pf_hashmask; i++)
 6149                 *killed += pf_killstates_row(kill, &V_pf_idhash[i]);
 6150 
 6151         return;
 6152 }
 6153 
 6154 static int
 6155 pf_killstates_nv(struct pfioc_nv *nv)
 6156 {
 6157         struct pf_kstate_kill    kill;
 6158         nvlist_t                *nvl = NULL;
 6159         void                    *nvlpacked = NULL;
 6160         int                      error = 0;
 6161         unsigned int             killed = 0;
 6162 
 6163 #define ERROUT(x)       ERROUT_FUNCTION(on_error, x)
 6164 
 6165         if (nv->len > pf_ioctl_maxcount)
 6166                 ERROUT(ENOMEM);
 6167 
 6168         nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 6169         if (nvlpacked == NULL)
 6170                 ERROUT(ENOMEM);
 6171 
 6172         error = copyin(nv->data, nvlpacked, nv->len);
 6173         if (error)
 6174                 ERROUT(error);
 6175 
 6176         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 6177         if (nvl == NULL)
 6178                 ERROUT(EBADMSG);
 6179 
 6180         error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
 6181         if (error)
 6182                 ERROUT(error);
 6183 
 6184         pf_killstates(&kill, &killed);
 6185 
 6186         free(nvlpacked, M_NVLIST);
 6187         nvlpacked = NULL;
 6188         nvlist_destroy(nvl);
 6189         nvl = nvlist_create(0);
 6190         if (nvl == NULL)
 6191                 ERROUT(ENOMEM);
 6192 
 6193         nvlist_add_number(nvl, "killed", killed);
 6194 
 6195         nvlpacked = nvlist_pack(nvl, &nv->len);
 6196         if (nvlpacked == NULL)
 6197                 ERROUT(ENOMEM);
 6198 
 6199         if (nv->size == 0)
 6200                 ERROUT(0);
 6201         else if (nv->size < nv->len)
 6202                 ERROUT(ENOSPC);
 6203 
 6204         error = copyout(nvlpacked, nv->data, nv->len);
 6205 
 6206 on_error:
 6207         nvlist_destroy(nvl);
 6208         free(nvlpacked, M_NVLIST);
 6209         return (error);
 6210 }
 6211 
 6212 static int
 6213 pf_clearstates_nv(struct pfioc_nv *nv)
 6214 {
 6215         struct pf_kstate_kill    kill;
 6216         nvlist_t                *nvl = NULL;
 6217         void                    *nvlpacked = NULL;
 6218         int                      error = 0;
 6219         unsigned int             killed;
 6220 
 6221 #define ERROUT(x)       ERROUT_FUNCTION(on_error, x)
 6222 
 6223         if (nv->len > pf_ioctl_maxcount)
 6224                 ERROUT(ENOMEM);
 6225 
 6226         nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 6227         if (nvlpacked == NULL)
 6228                 ERROUT(ENOMEM);
 6229 
 6230         error = copyin(nv->data, nvlpacked, nv->len);
 6231         if (error)
 6232                 ERROUT(error);
 6233 
 6234         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 6235         if (nvl == NULL)
 6236                 ERROUT(EBADMSG);
 6237 
 6238         error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
 6239         if (error)
 6240                 ERROUT(error);
 6241 
 6242         killed = pf_clear_states(&kill);
 6243 
 6244         free(nvlpacked, M_NVLIST);
 6245         nvlpacked = NULL;
 6246         nvlist_destroy(nvl);
 6247         nvl = nvlist_create(0);
 6248         if (nvl == NULL)
 6249                 ERROUT(ENOMEM);
 6250 
 6251         nvlist_add_number(nvl, "killed", killed);
 6252 
 6253         nvlpacked = nvlist_pack(nvl, &nv->len);
 6254         if (nvlpacked == NULL)
 6255                 ERROUT(ENOMEM);
 6256 
 6257         if (nv->size == 0)
 6258                 ERROUT(0);
 6259         else if (nv->size < nv->len)
 6260                 ERROUT(ENOSPC);
 6261 
 6262         error = copyout(nvlpacked, nv->data, nv->len);
 6263 
 6264 #undef ERROUT
 6265 on_error:
 6266         nvlist_destroy(nvl);
 6267         free(nvlpacked, M_NVLIST);
 6268         return (error);
 6269 }
 6270 
 6271 static int
 6272 pf_getstate(struct pfioc_nv *nv)
 6273 {
 6274         nvlist_t                *nvl = NULL, *nvls;
 6275         void                    *nvlpacked = NULL;
 6276         struct pf_kstate        *s = NULL;
 6277         int                      error = 0;
 6278         uint64_t                 id, creatorid;
 6279 
 6280 #define ERROUT(x)       ERROUT_FUNCTION(errout, x)
 6281 
 6282         if (nv->len > pf_ioctl_maxcount)
 6283                 ERROUT(ENOMEM);
 6284 
 6285         nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
 6286         if (nvlpacked == NULL)
 6287                 ERROUT(ENOMEM);
 6288 
 6289         error = copyin(nv->data, nvlpacked, nv->len);
 6290         if (error)
 6291                 ERROUT(error);
 6292 
 6293         nvl = nvlist_unpack(nvlpacked, nv->len, 0);
 6294         if (nvl == NULL)
 6295                 ERROUT(EBADMSG);
 6296 
 6297         PFNV_CHK(pf_nvuint64(nvl, "id", &id));
 6298         PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
 6299 
 6300         s = pf_find_state_byid(id, creatorid);
 6301         if (s == NULL)
 6302                 ERROUT(ENOENT);
 6303 
 6304         free(nvlpacked, M_NVLIST);
 6305         nvlpacked = NULL;
 6306         nvlist_destroy(nvl);
 6307         nvl = nvlist_create(0);
 6308         if (nvl == NULL)
 6309                 ERROUT(ENOMEM);
 6310 
 6311         nvls = pf_state_to_nvstate(s);
 6312         if (nvls == NULL)
 6313                 ERROUT(ENOMEM);
 6314 
 6315         nvlist_add_nvlist(nvl, "state", nvls);
 6316         nvlist_destroy(nvls);
 6317 
 6318         nvlpacked = nvlist_pack(nvl, &nv->len);
 6319         if (nvlpacked == NULL)
 6320                 ERROUT(ENOMEM);
 6321 
 6322         if (nv->size == 0)
 6323                 ERROUT(0);
 6324         else if (nv->size < nv->len)
 6325                 ERROUT(ENOSPC);
 6326 
 6327         error = copyout(nvlpacked, nv->data, nv->len);
 6328 
 6329 #undef ERROUT
 6330 errout:
 6331         if (s != NULL)
 6332                 PF_STATE_UNLOCK(s);
 6333         free(nvlpacked, M_NVLIST);
 6334         nvlist_destroy(nvl);
 6335         return (error);
 6336 }
 6337 
 6338 /*
 6339  * XXX - Check for version mismatch!!!
 6340  */
 6341 
 6342 /*
 6343  * Duplicate pfctl -Fa operation to get rid of as much as we can.
 6344  */
 6345 static int
 6346 shutdown_pf(void)
 6347 {
 6348         int error = 0;
 6349         u_int32_t t[5];
 6350         char nn = '\0';
 6351 
 6352         do {
 6353                 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
 6354                     != 0) {
 6355                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
 6356                         break;
 6357                 }
 6358                 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
 6359                     != 0) {
 6360                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
 6361                         break;          /* XXX: rollback? */
 6362                 }
 6363                 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
 6364                     != 0) {
 6365                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
 6366                         break;          /* XXX: rollback? */
 6367                 }
 6368                 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
 6369                     != 0) {
 6370                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
 6371                         break;          /* XXX: rollback? */
 6372                 }
 6373                 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
 6374                     != 0) {
 6375                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
 6376                         break;          /* XXX: rollback? */
 6377                 }
 6378 
 6379                 /* XXX: these should always succeed here */
 6380                 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
 6381                 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
 6382                 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
 6383                 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
 6384                 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
 6385 
 6386                 if ((error = pf_clear_tables()) != 0)
 6387                         break;
 6388 
 6389                 if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
 6390                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
 6391                         break;
 6392                 }
 6393                 pf_commit_eth(t[0], &nn);
 6394 
 6395 #ifdef ALTQ
 6396                 if ((error = pf_begin_altq(&t[0])) != 0) {
 6397                         DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
 6398                         break;
 6399                 }
 6400                 pf_commit_altq(t[0]);
 6401 #endif
 6402 
 6403                 pf_clear_all_states();
 6404 
 6405                 pf_clear_srcnodes(NULL);
 6406 
 6407                 /* status does not use malloced mem so no need to cleanup */
 6408                 /* fingerprints and interfaces have their own cleanup code */
 6409         } while(0);
 6410 
 6411         return (error);
 6412 }
 6413 
 6414 static pfil_return_t
 6415 pf_check_return(int chk, struct mbuf **m)
 6416 {
 6417 
 6418         switch (chk) {
 6419         case PF_PASS:
 6420                 if (*m == NULL)
 6421                         return (PFIL_CONSUMED);
 6422                 else
 6423                         return (PFIL_PASS);
 6424                 break;
 6425         default:
 6426                 if (*m != NULL) {
 6427                         m_freem(*m);
 6428                         *m = NULL;
 6429                 }
 6430                 return (PFIL_DROPPED);
 6431         }
 6432 }
 6433 
 6434 static pfil_return_t
 6435 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
 6436     void *ruleset __unused, struct inpcb *inp)
 6437 {
 6438         int chk;
 6439 
 6440         chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
 6441 
 6442         return (pf_check_return(chk, m));
 6443 }
 6444 
 6445 static pfil_return_t
 6446 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
 6447     void *ruleset __unused, struct inpcb *inp)
 6448 {
 6449         int chk;
 6450 
 6451         chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
 6452 
 6453         return (pf_check_return(chk, m));
 6454 }
 6455 
 6456 #ifdef INET
 6457 static pfil_return_t
 6458 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
 6459     void *ruleset __unused, struct inpcb *inp)
 6460 {
 6461         int chk;
 6462 
 6463         chk = pf_test(PF_IN, flags, ifp, m, inp);
 6464 
 6465         return (pf_check_return(chk, m));
 6466 }
 6467 
 6468 static pfil_return_t
 6469 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
 6470     void *ruleset __unused,  struct inpcb *inp)
 6471 {
 6472         int chk;
 6473 
 6474         chk = pf_test(PF_OUT, flags, ifp, m, inp);
 6475 
 6476         return (pf_check_return(chk, m));
 6477 }
 6478 #endif
 6479 
 6480 #ifdef INET6
 6481 static pfil_return_t
 6482 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
 6483     void *ruleset __unused,  struct inpcb *inp)
 6484 {
 6485         int chk;
 6486 
 6487         /*
 6488          * In case of loopback traffic IPv6 uses the real interface in
 6489          * order to support scoped addresses. In order to support stateful
 6490          * filtering we have change this to lo0 as it is the case in IPv4.
 6491          */
 6492         CURVNET_SET(ifp->if_vnet);
 6493         chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp);
 6494         CURVNET_RESTORE();
 6495 
 6496         return (pf_check_return(chk, m));
 6497 }
 6498 
 6499 static pfil_return_t
 6500 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
 6501     void *ruleset __unused,  struct inpcb *inp)
 6502 {
 6503         int chk;
 6504 
 6505         CURVNET_SET(ifp->if_vnet);
 6506         chk = pf_test6(PF_OUT, flags, ifp, m, inp);
 6507         CURVNET_RESTORE();
 6508 
 6509         return (pf_check_return(chk, m));
 6510 }
 6511 #endif /* INET6 */
 6512 
 6513 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
 6514 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
 6515 #define V_pf_eth_in_hook        VNET(pf_eth_in_hook)
 6516 #define V_pf_eth_out_hook       VNET(pf_eth_out_hook)
 6517 
 6518 #ifdef INET
 6519 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
 6520 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
 6521 #define V_pf_ip4_in_hook        VNET(pf_ip4_in_hook)
 6522 #define V_pf_ip4_out_hook       VNET(pf_ip4_out_hook)
 6523 #endif
 6524 #ifdef INET6
 6525 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
 6526 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
 6527 #define V_pf_ip6_in_hook        VNET(pf_ip6_in_hook)
 6528 #define V_pf_ip6_out_hook       VNET(pf_ip6_out_hook)
 6529 #endif
 6530 
 6531 static void
 6532 hook_pf_eth(void)
 6533 {
 6534         struct pfil_hook_args pha;
 6535         struct pfil_link_args pla;
 6536         int ret __diagused;
 6537 
 6538         if (atomic_load_bool(&V_pf_pfil_eth_hooked))
 6539                 return;
 6540 
 6541         pha.pa_version = PFIL_VERSION;
 6542         pha.pa_modname = "pf";
 6543         pha.pa_ruleset = NULL;
 6544 
 6545         pla.pa_version = PFIL_VERSION;
 6546 
 6547         pha.pa_type = PFIL_TYPE_ETHERNET;
 6548         pha.pa_func = pf_eth_check_in;
 6549         pha.pa_flags = PFIL_IN;
 6550         pha.pa_rulname = "eth-in";
 6551         V_pf_eth_in_hook = pfil_add_hook(&pha);
 6552         pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
 6553         pla.pa_head = V_link_pfil_head;
 6554         pla.pa_hook = V_pf_eth_in_hook;
 6555         ret = pfil_link(&pla);
 6556         MPASS(ret == 0);
 6557         pha.pa_func = pf_eth_check_out;
 6558         pha.pa_flags = PFIL_OUT;
 6559         pha.pa_rulname = "eth-out";
 6560         V_pf_eth_out_hook = pfil_add_hook(&pha);
 6561         pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
 6562         pla.pa_head = V_link_pfil_head;
 6563         pla.pa_hook = V_pf_eth_out_hook;
 6564         ret = pfil_link(&pla);
 6565         MPASS(ret == 0);
 6566 
 6567         atomic_store_bool(&V_pf_pfil_eth_hooked, true);
 6568 }
 6569 
 6570 static void
 6571 hook_pf(void)
 6572 {
 6573         struct pfil_hook_args pha;
 6574         struct pfil_link_args pla;
 6575         int ret __diagused;
 6576 
 6577         if (atomic_load_bool(&V_pf_pfil_hooked))
 6578                 return;
 6579 
 6580         pha.pa_version = PFIL_VERSION;
 6581         pha.pa_modname = "pf";
 6582         pha.pa_ruleset = NULL;
 6583 
 6584         pla.pa_version = PFIL_VERSION;
 6585 
 6586 #ifdef INET
 6587         pha.pa_type = PFIL_TYPE_IP4;
 6588         pha.pa_func = pf_check_in;
 6589         pha.pa_flags = PFIL_IN;
 6590         pha.pa_rulname = "default-in";
 6591         V_pf_ip4_in_hook = pfil_add_hook(&pha);
 6592         pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
 6593         pla.pa_head = V_inet_pfil_head;
 6594         pla.pa_hook = V_pf_ip4_in_hook;
 6595         ret = pfil_link(&pla);
 6596         MPASS(ret == 0);
 6597         pha.pa_func = pf_check_out;
 6598         pha.pa_flags = PFIL_OUT;
 6599         pha.pa_rulname = "default-out";
 6600         V_pf_ip4_out_hook = pfil_add_hook(&pha);
 6601         pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
 6602         pla.pa_head = V_inet_pfil_head;
 6603         pla.pa_hook = V_pf_ip4_out_hook;
 6604         ret = pfil_link(&pla);
 6605         MPASS(ret == 0);
 6606 #endif
 6607 #ifdef INET6
 6608         pha.pa_type = PFIL_TYPE_IP6;
 6609         pha.pa_func = pf_check6_in;
 6610         pha.pa_flags = PFIL_IN;
 6611         pha.pa_rulname = "default-in6";
 6612         V_pf_ip6_in_hook = pfil_add_hook(&pha);
 6613         pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
 6614         pla.pa_head = V_inet6_pfil_head;
 6615         pla.pa_hook = V_pf_ip6_in_hook;
 6616         ret = pfil_link(&pla);
 6617         MPASS(ret == 0);
 6618         pha.pa_func = pf_check6_out;
 6619         pha.pa_rulname = "default-out6";
 6620         pha.pa_flags = PFIL_OUT;
 6621         V_pf_ip6_out_hook = pfil_add_hook(&pha);
 6622         pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
 6623         pla.pa_head = V_inet6_pfil_head;
 6624         pla.pa_hook = V_pf_ip6_out_hook;
 6625         ret = pfil_link(&pla);
 6626         MPASS(ret == 0);
 6627 #endif
 6628 
 6629         atomic_store_bool(&V_pf_pfil_hooked, true);
 6630 }
 6631 
 6632 static void
 6633 dehook_pf_eth(void)
 6634 {
 6635 
 6636         if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
 6637                 return;
 6638 
 6639         pfil_remove_hook(V_pf_eth_in_hook);
 6640         pfil_remove_hook(V_pf_eth_out_hook);
 6641 
 6642         atomic_store_bool(&V_pf_pfil_eth_hooked, false);
 6643 }
 6644 
 6645 static void
 6646 dehook_pf(void)
 6647 {
 6648 
 6649         if (!atomic_load_bool(&V_pf_pfil_hooked))
 6650                 return;
 6651 
 6652 #ifdef INET
 6653         pfil_remove_hook(V_pf_ip4_in_hook);
 6654         pfil_remove_hook(V_pf_ip4_out_hook);
 6655 #endif
 6656 #ifdef INET6
 6657         pfil_remove_hook(V_pf_ip6_in_hook);
 6658         pfil_remove_hook(V_pf_ip6_out_hook);
 6659 #endif
 6660 
 6661         atomic_store_bool(&V_pf_pfil_hooked, false);
 6662 }
 6663 
 6664 static void
 6665 pf_load_vnet(void)
 6666 {
 6667         V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
 6668             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
 6669 
 6670         pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
 6671             PF_RULE_TAG_HASH_SIZE_DEFAULT);
 6672 #ifdef ALTQ
 6673         pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
 6674             PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
 6675 #endif
 6676 
 6677         V_pf_keth = &V_pf_main_keth_anchor.ruleset;
 6678 
 6679         pfattach_vnet();
 6680         V_pf_vnet_active = 1;
 6681 }
 6682 
 6683 static int
 6684 pf_load(void)
 6685 {
 6686         int error;
 6687 
 6688         rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE);
 6689         sx_init(&pf_ioctl_lock, "pf ioctl");
 6690         sx_init(&pf_end_lock, "pf end thread");
 6691 
 6692         pf_mtag_initialize();
 6693 
 6694         pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
 6695         if (pf_dev == NULL)
 6696                 return (ENOMEM);
 6697 
 6698         pf_end_threads = 0;
 6699         error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
 6700         if (error != 0)
 6701                 return (error);
 6702 
 6703         pfi_initialize();
 6704 
 6705         return (0);
 6706 }
 6707 
 6708 static void
 6709 pf_unload_vnet(void)
 6710 {
 6711         int ret __diagused;
 6712 
 6713         V_pf_vnet_active = 0;
 6714         V_pf_status.running = 0;
 6715         dehook_pf();
 6716         dehook_pf_eth();
 6717 
 6718         PF_RULES_WLOCK();
 6719         pf_syncookies_cleanup();
 6720         shutdown_pf();
 6721         PF_RULES_WUNLOCK();
 6722 
 6723         /* Make sure we've cleaned up ethernet rules before we continue. */
 6724         NET_EPOCH_DRAIN_CALLBACKS();
 6725 
 6726         ret = swi_remove(V_pf_swi_cookie);
 6727         MPASS(ret == 0);
 6728         ret = intr_event_destroy(V_pf_swi_ie);
 6729         MPASS(ret == 0);
 6730 
 6731         pf_unload_vnet_purge();
 6732 
 6733         pf_normalize_cleanup();
 6734         PF_RULES_WLOCK();
 6735         pfi_cleanup_vnet();
 6736         PF_RULES_WUNLOCK();
 6737         pfr_cleanup();
 6738         pf_osfp_flush();
 6739         pf_cleanup();
 6740         if (IS_DEFAULT_VNET(curvnet))
 6741                 pf_mtag_cleanup();
 6742 
 6743         pf_cleanup_tagset(&V_pf_tags);
 6744 #ifdef ALTQ
 6745         pf_cleanup_tagset(&V_pf_qids);
 6746 #endif
 6747         uma_zdestroy(V_pf_tag_z);
 6748 
 6749 #ifdef PF_WANT_32_TO_64_COUNTER
 6750         PF_RULES_WLOCK();
 6751         LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
 6752 
 6753         MPASS(LIST_EMPTY(&V_pf_allkiflist));
 6754         MPASS(V_pf_allkifcount == 0);
 6755 
 6756         LIST_REMOVE(&V_pf_default_rule, allrulelist);
 6757         V_pf_allrulecount--;
 6758         LIST_REMOVE(V_pf_rulemarker, allrulelist);
 6759 
 6760         /*
 6761          * There are known pf rule leaks when running the test suite.
 6762          */
 6763 #ifdef notyet
 6764         MPASS(LIST_EMPTY(&V_pf_allrulelist));
 6765         MPASS(V_pf_allrulecount == 0);
 6766 #endif
 6767 
 6768         PF_RULES_WUNLOCK();
 6769 
 6770         free(V_pf_kifmarker, PFI_MTYPE);
 6771         free(V_pf_rulemarker, M_PFRULE);
 6772 #endif
 6773 
 6774         /* Free counters last as we updated them during shutdown. */
 6775         pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
 6776         for (int i = 0; i < 2; i++) {
 6777                 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
 6778                 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
 6779         }
 6780         counter_u64_free(V_pf_default_rule.states_cur);
 6781         counter_u64_free(V_pf_default_rule.states_tot);
 6782         counter_u64_free(V_pf_default_rule.src_nodes);
 6783         uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
 6784 
 6785         for (int i = 0; i < PFRES_MAX; i++)
 6786                 counter_u64_free(V_pf_status.counters[i]);
 6787         for (int i = 0; i < KLCNT_MAX; i++)
 6788                 counter_u64_free(V_pf_status.lcounters[i]);
 6789         for (int i = 0; i < FCNT_MAX; i++)
 6790                 pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
 6791         for (int i = 0; i < SCNT_MAX; i++)
 6792                 counter_u64_free(V_pf_status.scounters[i]);
 6793 }
 6794 
 6795 static void
 6796 pf_unload(void)