The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/net/bpf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1990, 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  * Copyright (c) 2019 Andrey V. Elsukov <ae@FreeBSD.org>
    7  *
    8  * This code is derived from the Stanford/CMU enet packet filter,
    9  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
   10  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
   11  * Berkeley Laboratory.
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  * 3. Neither the name of the University nor the names of its contributors
   22  *    may be used to endorse or promote products derived from this software
   23  *    without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   35  * SUCH DAMAGE.
   36  *
   37  *      @(#)bpf.c       8.4 (Berkeley) 1/9/95
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD$");
   42 
   43 #include "opt_bpf.h"
   44 #include "opt_ddb.h"
   45 #include "opt_netgraph.h"
   46 
   47 #include <sys/param.h>
   48 #include <sys/conf.h>
   49 #include <sys/eventhandler.h>
   50 #include <sys/fcntl.h>
   51 #include <sys/jail.h>
   52 #include <sys/ktr.h>
   53 #include <sys/lock.h>
   54 #include <sys/malloc.h>
   55 #include <sys/mbuf.h>
   56 #include <sys/mutex.h>
   57 #include <sys/time.h>
   58 #include <sys/priv.h>
   59 #include <sys/proc.h>
   60 #include <sys/signalvar.h>
   61 #include <sys/filio.h>
   62 #include <sys/sockio.h>
   63 #include <sys/ttycom.h>
   64 #include <sys/uio.h>
   65 #include <sys/sysent.h>
   66 #include <sys/systm.h>
   67 
   68 #include <sys/event.h>
   69 #include <sys/file.h>
   70 #include <sys/poll.h>
   71 #include <sys/proc.h>
   72 
   73 #include <sys/socket.h>
   74 
   75 #ifdef DDB
   76 #include <ddb/ddb.h>
   77 #endif
   78 
   79 #include <net/if.h>
   80 #include <net/if_var.h>
   81 #include <net/if_dl.h>
   82 #include <net/bpf.h>
   83 #include <net/bpf_buffer.h>
   84 #ifdef BPF_JITTER
   85 #include <net/bpf_jitter.h>
   86 #endif
   87 #include <net/bpf_zerocopy.h>
   88 #include <net/bpfdesc.h>
   89 #include <net/route.h>
   90 #include <net/vnet.h>
   91 
   92 #include <netinet/in.h>
   93 #include <netinet/if_ether.h>
   94 #include <sys/kernel.h>
   95 #include <sys/sysctl.h>
   96 
   97 #include <net80211/ieee80211_freebsd.h>
   98 
   99 #include <security/mac/mac_framework.h>
  100 
  101 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
  102 
  103 static struct bpf_if_ext dead_bpf_if = {
  104         .bif_dlist = CK_LIST_HEAD_INITIALIZER()
  105 };
  106 
  107 struct bpf_if {
  108 #define bif_next        bif_ext.bif_next
  109 #define bif_dlist       bif_ext.bif_dlist
  110         struct bpf_if_ext bif_ext;      /* public members */
  111         u_int           bif_dlt;        /* link layer type */
  112         u_int           bif_hdrlen;     /* length of link header */
  113         struct bpfd_list bif_wlist;     /* writer-only list */
  114         struct ifnet    *bif_ifp;       /* corresponding interface */
  115         struct bpf_if   **bif_bpf;      /* Pointer to pointer to us */
  116         volatile u_int  bif_refcnt;
  117         struct epoch_context epoch_ctx;
  118 };
  119 
  120 CTASSERT(offsetof(struct bpf_if, bif_ext) == 0);
  121 
  122 struct bpf_program_buffer {
  123         struct epoch_context    epoch_ctx;
  124 #ifdef BPF_JITTER
  125         bpf_jit_filter          *func;
  126 #endif
  127         void                    *buffer[0];
  128 };
  129 
  130 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
  131 
  132 #define PRINET  26                      /* interruptible */
  133 
  134 #define SIZEOF_BPF_HDR(type)    \
  135     (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
  136 
  137 #ifdef COMPAT_FREEBSD32
  138 #include <sys/mount.h>
  139 #include <compat/freebsd32/freebsd32.h>
  140 #define BPF_ALIGNMENT32 sizeof(int32_t)
  141 #define BPF_WORDALIGN32(x) roundup2(x, BPF_ALIGNMENT32)
  142 
  143 #ifndef BURN_BRIDGES
  144 /*
  145  * 32-bit version of structure prepended to each packet.  We use this header
  146  * instead of the standard one for 32-bit streams.  We mark the a stream as
  147  * 32-bit the first time we see a 32-bit compat ioctl request.
  148  */
  149 struct bpf_hdr32 {
  150         struct timeval32 bh_tstamp;     /* time stamp */
  151         uint32_t        bh_caplen;      /* length of captured portion */
  152         uint32_t        bh_datalen;     /* original length of packet */
  153         uint16_t        bh_hdrlen;      /* length of bpf header (this struct
  154                                            plus alignment padding) */
  155 };
  156 #endif
  157 
  158 struct bpf_program32 {
  159         u_int bf_len;
  160         uint32_t bf_insns;
  161 };
  162 
  163 struct bpf_dltlist32 {
  164         u_int   bfl_len;
  165         u_int   bfl_list;
  166 };
  167 
  168 #define BIOCSETF32      _IOW('B', 103, struct bpf_program32)
  169 #define BIOCSRTIMEOUT32 _IOW('B', 109, struct timeval32)
  170 #define BIOCGRTIMEOUT32 _IOR('B', 110, struct timeval32)
  171 #define BIOCGDLTLIST32  _IOWR('B', 121, struct bpf_dltlist32)
  172 #define BIOCSETWF32     _IOW('B', 123, struct bpf_program32)
  173 #define BIOCSETFNR32    _IOW('B', 130, struct bpf_program32)
  174 #endif
  175 
  176 #define BPF_LOCK()         sx_xlock(&bpf_sx)
  177 #define BPF_UNLOCK()            sx_xunlock(&bpf_sx)
  178 #define BPF_LOCK_ASSERT()       sx_assert(&bpf_sx, SA_XLOCKED)
  179 /*
  180  * bpf_iflist is a list of BPF interface structures, each corresponding to a
  181  * specific DLT. The same network interface might have several BPF interface
  182  * structures registered by different layers in the stack (i.e., 802.11
  183  * frames, ethernet frames, etc).
  184  */
  185 CK_LIST_HEAD(bpf_iflist, bpf_if);
  186 static struct bpf_iflist bpf_iflist;
  187 static struct sx        bpf_sx;         /* bpf global lock */
  188 static int              bpf_bpfd_cnt;
  189 
  190 static void     bpfif_ref(struct bpf_if *);
  191 static void     bpfif_rele(struct bpf_if *);
  192 
  193 static void     bpfd_ref(struct bpf_d *);
  194 static void     bpfd_rele(struct bpf_d *);
  195 static void     bpf_attachd(struct bpf_d *, struct bpf_if *);
  196 static void     bpf_detachd(struct bpf_d *);
  197 static void     bpf_detachd_locked(struct bpf_d *, bool);
  198 static void     bpfd_free(epoch_context_t);
  199 static int      bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
  200                     struct sockaddr *, int *, struct bpf_d *);
  201 static int      bpf_setif(struct bpf_d *, struct ifreq *);
  202 static void     bpf_timed_out(void *);
  203 static __inline void
  204                 bpf_wakeup(struct bpf_d *);
  205 static void     catchpacket(struct bpf_d *, u_char *, u_int, u_int,
  206                     void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
  207                     struct bintime *);
  208 static void     reset_d(struct bpf_d *);
  209 static int      bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
  210 static int      bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
  211 static int      bpf_setdlt(struct bpf_d *, u_int);
  212 static void     filt_bpfdetach(struct knote *);
  213 static int      filt_bpfread(struct knote *, long);
  214 static void     bpf_drvinit(void *);
  215 static int      bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
  216 
  217 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
  218     "bpf sysctl");
  219 int bpf_maxinsns = BPF_MAXINSNS;
  220 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
  221     &bpf_maxinsns, 0, "Maximum bpf program instructions");
  222 static int bpf_zerocopy_enable = 0;
  223 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
  224     &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
  225 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
  226     bpf_stats_sysctl, "bpf statistics portal");
  227 
  228 VNET_DEFINE_STATIC(int, bpf_optimize_writers) = 0;
  229 #define V_bpf_optimize_writers VNET(bpf_optimize_writers)
  230 SYSCTL_INT(_net_bpf, OID_AUTO, optimize_writers, CTLFLAG_VNET | CTLFLAG_RWTUN,
  231     &VNET_NAME(bpf_optimize_writers), 0,
  232     "Do not send packets until BPF program is set");
  233 
  234 static  d_open_t        bpfopen;
  235 static  d_read_t        bpfread;
  236 static  d_write_t       bpfwrite;
  237 static  d_ioctl_t       bpfioctl;
  238 static  d_poll_t        bpfpoll;
  239 static  d_kqfilter_t    bpfkqfilter;
  240 
  241 static struct cdevsw bpf_cdevsw = {
  242         .d_version =    D_VERSION,
  243         .d_open =       bpfopen,
  244         .d_read =       bpfread,
  245         .d_write =      bpfwrite,
  246         .d_ioctl =      bpfioctl,
  247         .d_poll =       bpfpoll,
  248         .d_name =       "bpf",
  249         .d_kqfilter =   bpfkqfilter,
  250 };
  251 
  252 static struct filterops bpfread_filtops = {
  253         .f_isfd = 1,
  254         .f_detach = filt_bpfdetach,
  255         .f_event = filt_bpfread,
  256 };
  257 
  258 /*
  259  * LOCKING MODEL USED BY BPF
  260  *
  261  * Locks:
  262  * 1) global lock (BPF_LOCK). Sx, used to protect some global counters,
  263  * every bpf_iflist changes, serializes ioctl access to bpf descriptors.
  264  * 2) Descriptor lock. Mutex, used to protect BPF buffers and various
  265  * structure fields used by bpf_*tap* code.
  266  *
  267  * Lock order: global lock, then descriptor lock.
  268  *
  269  * There are several possible consumers:
  270  *
  271  * 1. The kernel registers interface pointer with bpfattach().
  272  * Each call allocates new bpf_if structure, references ifnet pointer
  273  * and links bpf_if into bpf_iflist chain. This is protected with global
  274  * lock.
  275  *
  276  * 2. An userland application uses ioctl() call to bpf_d descriptor.
  277  * All such call are serialized with global lock. BPF filters can be
  278  * changed, but pointer to old filter will be freed using NET_EPOCH_CALL().
  279  * Thus it should be safe for bpf_tap/bpf_mtap* code to do access to
  280  * filter pointers, even if change will happen during bpf_tap execution.
  281  * Destroying of bpf_d descriptor also is doing using NET_EPOCH_CALL().
  282  *
  283  * 3. An userland application can write packets into bpf_d descriptor.
  284  * There we need to be sure, that ifnet won't disappear during bpfwrite().
  285  *
  286  * 4. The kernel invokes bpf_tap/bpf_mtap* functions. The access to
  287  * bif_dlist is protected with net_epoch_preempt section. So, it should
  288  * be safe to make access to bpf_d descriptor inside the section.
  289  *
  290  * 5. The kernel invokes bpfdetach() on interface destroying. All lists
  291  * are modified with global lock held and actual free() is done using
  292  * NET_EPOCH_CALL().
  293  */
  294 
  295 static void
  296 bpfif_free(epoch_context_t ctx)
  297 {
  298         struct bpf_if *bp;
  299 
  300         bp = __containerof(ctx, struct bpf_if, epoch_ctx);
  301         if_rele(bp->bif_ifp);
  302         free(bp, M_BPF);
  303 }
  304 
  305 static void
  306 bpfif_ref(struct bpf_if *bp)
  307 {
  308 
  309         refcount_acquire(&bp->bif_refcnt);
  310 }
  311 
  312 static void
  313 bpfif_rele(struct bpf_if *bp)
  314 {
  315 
  316         if (!refcount_release(&bp->bif_refcnt))
  317                 return;
  318         NET_EPOCH_CALL(bpfif_free, &bp->epoch_ctx);
  319 }
  320 
  321 static void
  322 bpfd_ref(struct bpf_d *d)
  323 {
  324 
  325         refcount_acquire(&d->bd_refcnt);
  326 }
  327 
  328 static void
  329 bpfd_rele(struct bpf_d *d)
  330 {
  331 
  332         if (!refcount_release(&d->bd_refcnt))
  333                 return;
  334         NET_EPOCH_CALL(bpfd_free, &d->epoch_ctx);
  335 }
  336 
  337 static struct bpf_program_buffer*
  338 bpf_program_buffer_alloc(size_t size, int flags)
  339 {
  340 
  341         return (malloc(sizeof(struct bpf_program_buffer) + size,
  342             M_BPF, flags));
  343 }
  344 
  345 static void
  346 bpf_program_buffer_free(epoch_context_t ctx)
  347 {
  348         struct bpf_program_buffer *ptr;
  349 
  350         ptr = __containerof(ctx, struct bpf_program_buffer, epoch_ctx);
  351 #ifdef BPF_JITTER
  352         if (ptr->func != NULL)
  353                 bpf_destroy_jit_filter(ptr->func);
  354 #endif
  355         free(ptr, M_BPF);
  356 }
  357 
  358 /*
  359  * Wrapper functions for various buffering methods.  If the set of buffer
  360  * modes expands, we will probably want to introduce a switch data structure
  361  * similar to protosw, et.
  362  */
  363 static void
  364 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
  365     u_int len)
  366 {
  367 
  368         BPFD_LOCK_ASSERT(d);
  369 
  370         switch (d->bd_bufmode) {
  371         case BPF_BUFMODE_BUFFER:
  372                 return (bpf_buffer_append_bytes(d, buf, offset, src, len));
  373 
  374         case BPF_BUFMODE_ZBUF:
  375                 counter_u64_add(d->bd_zcopy, 1);
  376                 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
  377 
  378         default:
  379                 panic("bpf_buf_append_bytes");
  380         }
  381 }
  382 
  383 static void
  384 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
  385     u_int len)
  386 {
  387 
  388         BPFD_LOCK_ASSERT(d);
  389 
  390         switch (d->bd_bufmode) {
  391         case BPF_BUFMODE_BUFFER:
  392                 return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
  393 
  394         case BPF_BUFMODE_ZBUF:
  395                 counter_u64_add(d->bd_zcopy, 1);
  396                 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
  397 
  398         default:
  399                 panic("bpf_buf_append_mbuf");
  400         }
  401 }
  402 
  403 /*
  404  * This function gets called when the free buffer is re-assigned.
  405  */
  406 static void
  407 bpf_buf_reclaimed(struct bpf_d *d)
  408 {
  409 
  410         BPFD_LOCK_ASSERT(d);
  411 
  412         switch (d->bd_bufmode) {
  413         case BPF_BUFMODE_BUFFER:
  414                 return;
  415 
  416         case BPF_BUFMODE_ZBUF:
  417                 bpf_zerocopy_buf_reclaimed(d);
  418                 return;
  419 
  420         default:
  421                 panic("bpf_buf_reclaimed");
  422         }
  423 }
  424 
  425 /*
  426  * If the buffer mechanism has a way to decide that a held buffer can be made
  427  * free, then it is exposed via the bpf_canfreebuf() interface.  (1) is
  428  * returned if the buffer can be discarded, (0) is returned if it cannot.
  429  */
  430 static int
  431 bpf_canfreebuf(struct bpf_d *d)
  432 {
  433 
  434         BPFD_LOCK_ASSERT(d);
  435 
  436         switch (d->bd_bufmode) {
  437         case BPF_BUFMODE_ZBUF:
  438                 return (bpf_zerocopy_canfreebuf(d));
  439         }
  440         return (0);
  441 }
  442 
  443 /*
  444  * Allow the buffer model to indicate that the current store buffer is
  445  * immutable, regardless of the appearance of space.  Return (1) if the
  446  * buffer is writable, and (0) if not.
  447  */
  448 static int
  449 bpf_canwritebuf(struct bpf_d *d)
  450 {
  451         BPFD_LOCK_ASSERT(d);
  452 
  453         switch (d->bd_bufmode) {
  454         case BPF_BUFMODE_ZBUF:
  455                 return (bpf_zerocopy_canwritebuf(d));
  456         }
  457         return (1);
  458 }
  459 
  460 /*
  461  * Notify buffer model that an attempt to write to the store buffer has
  462  * resulted in a dropped packet, in which case the buffer may be considered
  463  * full.
  464  */
  465 static void
  466 bpf_buffull(struct bpf_d *d)
  467 {
  468 
  469         BPFD_LOCK_ASSERT(d);
  470 
  471         switch (d->bd_bufmode) {
  472         case BPF_BUFMODE_ZBUF:
  473                 bpf_zerocopy_buffull(d);
  474                 break;
  475         }
  476 }
  477 
  478 /*
  479  * Notify the buffer model that a buffer has moved into the hold position.
  480  */
  481 void
  482 bpf_bufheld(struct bpf_d *d)
  483 {
  484 
  485         BPFD_LOCK_ASSERT(d);
  486 
  487         switch (d->bd_bufmode) {
  488         case BPF_BUFMODE_ZBUF:
  489                 bpf_zerocopy_bufheld(d);
  490                 break;
  491         }
  492 }
  493 
  494 static void
  495 bpf_free(struct bpf_d *d)
  496 {
  497 
  498         switch (d->bd_bufmode) {
  499         case BPF_BUFMODE_BUFFER:
  500                 return (bpf_buffer_free(d));
  501 
  502         case BPF_BUFMODE_ZBUF:
  503                 return (bpf_zerocopy_free(d));
  504 
  505         default:
  506                 panic("bpf_buf_free");
  507         }
  508 }
  509 
  510 static int
  511 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
  512 {
  513 
  514         if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
  515                 return (EOPNOTSUPP);
  516         return (bpf_buffer_uiomove(d, buf, len, uio));
  517 }
  518 
  519 static int
  520 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
  521 {
  522 
  523         if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
  524                 return (EOPNOTSUPP);
  525         return (bpf_buffer_ioctl_sblen(d, i));
  526 }
  527 
  528 static int
  529 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
  530 {
  531 
  532         if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
  533                 return (EOPNOTSUPP);
  534         return (bpf_zerocopy_ioctl_getzmax(td, d, i));
  535 }
  536 
  537 static int
  538 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
  539 {
  540 
  541         if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
  542                 return (EOPNOTSUPP);
  543         return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
  544 }
  545 
  546 static int
  547 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
  548 {
  549 
  550         if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
  551                 return (EOPNOTSUPP);
  552         return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
  553 }
  554 
  555 /*
  556  * General BPF functions.
  557  */
  558 static int
  559 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
  560     struct sockaddr *sockp, int *hdrlen, struct bpf_d *d)
  561 {
  562         const struct ieee80211_bpf_params *p;
  563         struct ether_header *eh;
  564         struct mbuf *m;
  565         int error;
  566         int len;
  567         int hlen;
  568         int slen;
  569 
  570         /*
  571          * Build a sockaddr based on the data link layer type.
  572          * We do this at this level because the ethernet header
  573          * is copied directly into the data field of the sockaddr.
  574          * In the case of SLIP, there is no header and the packet
  575          * is forwarded as is.
  576          * Also, we are careful to leave room at the front of the mbuf
  577          * for the link level header.
  578          */
  579         switch (linktype) {
  580         case DLT_SLIP:
  581                 sockp->sa_family = AF_INET;
  582                 hlen = 0;
  583                 break;
  584 
  585         case DLT_EN10MB:
  586                 sockp->sa_family = AF_UNSPEC;
  587                 /* XXX Would MAXLINKHDR be better? */
  588                 hlen = ETHER_HDR_LEN;
  589                 break;
  590 
  591         case DLT_FDDI:
  592                 sockp->sa_family = AF_IMPLINK;
  593                 hlen = 0;
  594                 break;
  595 
  596         case DLT_RAW:
  597                 sockp->sa_family = AF_UNSPEC;
  598                 hlen = 0;
  599                 break;
  600 
  601         case DLT_NULL:
  602                 /*
  603                  * null interface types require a 4 byte pseudo header which
  604                  * corresponds to the address family of the packet.
  605                  */
  606                 sockp->sa_family = AF_UNSPEC;
  607                 hlen = 4;
  608                 break;
  609 
  610         case DLT_ATM_RFC1483:
  611                 /*
  612                  * en atm driver requires 4-byte atm pseudo header.
  613                  * though it isn't standard, vpi:vci needs to be
  614                  * specified anyway.
  615                  */
  616                 sockp->sa_family = AF_UNSPEC;
  617                 hlen = 12;      /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
  618                 break;
  619 
  620         case DLT_PPP:
  621                 sockp->sa_family = AF_UNSPEC;
  622                 hlen = 4;       /* This should match PPP_HDRLEN */
  623                 break;
  624 
  625         case DLT_IEEE802_11:            /* IEEE 802.11 wireless */
  626                 sockp->sa_family = AF_IEEE80211;
  627                 hlen = 0;
  628                 break;
  629 
  630         case DLT_IEEE802_11_RADIO:      /* IEEE 802.11 wireless w/ phy params */
  631                 sockp->sa_family = AF_IEEE80211;
  632                 sockp->sa_len = 12;     /* XXX != 0 */
  633                 hlen = sizeof(struct ieee80211_bpf_params);
  634                 break;
  635 
  636         default:
  637                 return (EIO);
  638         }
  639 
  640         len = uio->uio_resid;
  641         if (len < hlen || len - hlen > ifp->if_mtu)
  642                 return (EMSGSIZE);
  643 
  644         m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR);
  645         if (m == NULL)
  646                 return (EIO);
  647         m->m_pkthdr.len = m->m_len = len;
  648         *mp = m;
  649 
  650         error = uiomove(mtod(m, u_char *), len, uio);
  651         if (error)
  652                 goto bad;
  653 
  654         slen = bpf_filter(d->bd_wfilter, mtod(m, u_char *), len, len);
  655         if (slen == 0) {
  656                 error = EPERM;
  657                 goto bad;
  658         }
  659 
  660         /* Check for multicast destination */
  661         switch (linktype) {
  662         case DLT_EN10MB:
  663                 eh = mtod(m, struct ether_header *);
  664                 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
  665                         if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
  666                             ETHER_ADDR_LEN) == 0)
  667                                 m->m_flags |= M_BCAST;
  668                         else
  669                                 m->m_flags |= M_MCAST;
  670                 }
  671                 if (d->bd_hdrcmplt == 0) {
  672                         memcpy(eh->ether_shost, IF_LLADDR(ifp),
  673                             sizeof(eh->ether_shost));
  674                 }
  675                 break;
  676         }
  677 
  678         /*
  679          * Make room for link header, and copy it to sockaddr
  680          */
  681         if (hlen != 0) {
  682                 if (sockp->sa_family == AF_IEEE80211) {
  683                         /*
  684                          * Collect true length from the parameter header
  685                          * NB: sockp is known to be zero'd so if we do a
  686                          *     short copy unspecified parameters will be
  687                          *     zero.
  688                          * NB: packet may not be aligned after stripping
  689                          *     bpf params
  690                          * XXX check ibp_vers
  691                          */
  692                         p = mtod(m, const struct ieee80211_bpf_params *);
  693                         hlen = p->ibp_len;
  694                         if (hlen > sizeof(sockp->sa_data)) {
  695                                 error = EINVAL;
  696                                 goto bad;
  697                         }
  698                 }
  699                 bcopy(mtod(m, const void *), sockp->sa_data, hlen);
  700         }
  701         *hdrlen = hlen;
  702 
  703         return (0);
  704 bad:
  705         m_freem(m);
  706         return (error);
  707 }
  708 
  709 /*
  710  * Attach descriptor to the bpf interface, i.e. make d listen on bp,
  711  * then reset its buffers and counters with reset_d().
  712  */
  713 static void
  714 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
  715 {
  716         int op_w;
  717 
  718         BPF_LOCK_ASSERT();
  719 
  720         /*
  721          * Save sysctl value to protect from sysctl change
  722          * between reads
  723          */
  724         op_w = V_bpf_optimize_writers || d->bd_writer;
  725 
  726         if (d->bd_bif != NULL)
  727                 bpf_detachd_locked(d, false);
  728         /*
  729          * Point d at bp, and add d to the interface's list.
  730          * Since there are many applications using BPF for
  731          * sending raw packets only (dhcpd, cdpd are good examples)
  732          * we can delay adding d to the list of active listeners until
  733          * some filter is configured.
  734          */
  735 
  736         BPFD_LOCK(d);
  737         /*
  738          * Hold reference to bpif while descriptor uses this interface.
  739          */
  740         bpfif_ref(bp);
  741         d->bd_bif = bp;
  742         if (op_w != 0) {
  743                 /* Add to writers-only list */
  744                 CK_LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
  745                 /*
  746                  * We decrement bd_writer on every filter set operation.
  747                  * First BIOCSETF is done by pcap_open_live() to set up
  748                  * snap length. After that appliation usually sets its own
  749                  * filter.
  750                  */
  751                 d->bd_writer = 2;
  752         } else
  753                 CK_LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
  754 
  755         reset_d(d);
  756         BPFD_UNLOCK(d);
  757         bpf_bpfd_cnt++;
  758 
  759         CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
  760             __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
  761 
  762         if (op_w == 0)
  763                 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
  764 }
  765 
  766 /*
  767  * Check if we need to upgrade our descriptor @d from write-only mode.
  768  */
  769 static int
  770 bpf_check_upgrade(u_long cmd, struct bpf_d *d, struct bpf_insn *fcode,
  771     int flen)
  772 {
  773         int is_snap, need_upgrade;
  774 
  775         /*
  776          * Check if we've already upgraded or new filter is empty.
  777          */
  778         if (d->bd_writer == 0 || fcode == NULL)
  779                 return (0);
  780 
  781         need_upgrade = 0;
  782 
  783         /*
  784          * Check if cmd looks like snaplen setting from
  785          * pcap_bpf.c:pcap_open_live().
  786          * Note we're not checking .k value here:
  787          * while pcap_open_live() definitely sets to non-zero value,
  788          * we'd prefer to treat k=0 (deny ALL) case the same way: e.g.
  789          * do not consider upgrading immediately
  790          */
  791         if (cmd == BIOCSETF && flen == 1 &&
  792             fcode[0].code == (BPF_RET | BPF_K))
  793                 is_snap = 1;
  794         else
  795                 is_snap = 0;
  796 
  797         if (is_snap == 0) {
  798                 /*
  799                  * We're setting first filter and it doesn't look like
  800                  * setting snaplen.  We're probably using bpf directly.
  801                  * Upgrade immediately.
  802                  */
  803                 need_upgrade = 1;
  804         } else {
  805                 /*
  806                  * Do not require upgrade by first BIOCSETF
  807                  * (used to set snaplen) by pcap_open_live().
  808                  */
  809 
  810                 if (--d->bd_writer == 0) {
  811                         /*
  812                          * First snaplen filter has already
  813                          * been set. This is probably catch-all
  814                          * filter
  815                          */
  816                         need_upgrade = 1;
  817                 }
  818         }
  819 
  820         CTR5(KTR_NET,
  821             "%s: filter function set by pid %d, "
  822             "bd_writer counter %d, snap %d upgrade %d",
  823             __func__, d->bd_pid, d->bd_writer,
  824             is_snap, need_upgrade);
  825 
  826         return (need_upgrade);
  827 }
  828 
  829 /*
  830  * Detach a file from its interface.
  831  */
  832 static void
  833 bpf_detachd(struct bpf_d *d)
  834 {
  835         BPF_LOCK();
  836         bpf_detachd_locked(d, false);
  837         BPF_UNLOCK();
  838 }
  839 
  840 static void
  841 bpf_detachd_locked(struct bpf_d *d, bool detached_ifp)
  842 {
  843         struct bpf_if *bp;
  844         struct ifnet *ifp;
  845         int error;
  846 
  847         BPF_LOCK_ASSERT();
  848         CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
  849 
  850         /* Check if descriptor is attached */
  851         if ((bp = d->bd_bif) == NULL)
  852                 return;
  853 
  854         BPFD_LOCK(d);
  855         /* Remove d from the interface's descriptor list. */
  856         CK_LIST_REMOVE(d, bd_next);
  857         /* Save bd_writer value */
  858         error = d->bd_writer;
  859         ifp = bp->bif_ifp;
  860         d->bd_bif = NULL;
  861         if (detached_ifp) {
  862                 /*
  863                  * Notify descriptor as it's detached, so that any
  864                  * sleepers wake up and get ENXIO.
  865                  */
  866                 bpf_wakeup(d);
  867         }
  868         BPFD_UNLOCK(d);
  869         bpf_bpfd_cnt--;
  870 
  871         /* Call event handler iff d is attached */
  872         if (error == 0)
  873                 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
  874 
  875         /*
  876          * Check if this descriptor had requested promiscuous mode.
  877          * If so and ifnet is not detached, turn it off.
  878          */
  879         if (d->bd_promisc && !detached_ifp) {
  880                 d->bd_promisc = 0;
  881                 CURVNET_SET(ifp->if_vnet);
  882                 error = ifpromisc(ifp, 0);
  883                 CURVNET_RESTORE();
  884                 if (error != 0 && error != ENXIO) {
  885                         /*
  886                          * ENXIO can happen if a pccard is unplugged
  887                          * Something is really wrong if we were able to put
  888                          * the driver into promiscuous mode, but can't
  889                          * take it out.
  890                          */
  891                         if_printf(bp->bif_ifp,
  892                                 "bpf_detach: ifpromisc failed (%d)\n", error);
  893                 }
  894         }
  895         bpfif_rele(bp);
  896 }
  897 
  898 /*
  899  * Close the descriptor by detaching it from its interface,
  900  * deallocating its buffers, and marking it free.
  901  */
  902 static void
  903 bpf_dtor(void *data)
  904 {
  905         struct bpf_d *d = data;
  906 
  907         BPFD_LOCK(d);
  908         if (d->bd_state == BPF_WAITING)
  909                 callout_stop(&d->bd_callout);
  910         d->bd_state = BPF_IDLE;
  911         BPFD_UNLOCK(d);
  912         funsetown(&d->bd_sigio);
  913         bpf_detachd(d);
  914 #ifdef MAC
  915         mac_bpfdesc_destroy(d);
  916 #endif /* MAC */
  917         seldrain(&d->bd_sel);
  918         knlist_destroy(&d->bd_sel.si_note);
  919         callout_drain(&d->bd_callout);
  920         bpfd_rele(d);
  921 }
  922 
  923 /*
  924  * Open ethernet device.  Returns ENXIO for illegal minor device number,
  925  * EBUSY if file is open by another process.
  926  */
  927 /* ARGSUSED */
  928 static  int
  929 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
  930 {
  931         struct bpf_d *d;
  932         int error;
  933 
  934         d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
  935         error = devfs_set_cdevpriv(d, bpf_dtor);
  936         if (error != 0) {
  937                 free(d, M_BPF);
  938                 return (error);
  939         }
  940 
  941         /* Setup counters */
  942         d->bd_rcount = counter_u64_alloc(M_WAITOK);
  943         d->bd_dcount = counter_u64_alloc(M_WAITOK);
  944         d->bd_fcount = counter_u64_alloc(M_WAITOK);
  945         d->bd_wcount = counter_u64_alloc(M_WAITOK);
  946         d->bd_wfcount = counter_u64_alloc(M_WAITOK);
  947         d->bd_wdcount = counter_u64_alloc(M_WAITOK);
  948         d->bd_zcopy = counter_u64_alloc(M_WAITOK);
  949 
  950         /*
  951          * For historical reasons, perform a one-time initialization call to
  952          * the buffer routines, even though we're not yet committed to a
  953          * particular buffer method.
  954          */
  955         bpf_buffer_init(d);
  956         if ((flags & FREAD) == 0)
  957                 d->bd_writer = 2;
  958         d->bd_hbuf_in_use = 0;
  959         d->bd_bufmode = BPF_BUFMODE_BUFFER;
  960         d->bd_sig = SIGIO;
  961         d->bd_direction = BPF_D_INOUT;
  962         d->bd_refcnt = 1;
  963         BPF_PID_REFRESH(d, td);
  964 #ifdef MAC
  965         mac_bpfdesc_init(d);
  966         mac_bpfdesc_create(td->td_ucred, d);
  967 #endif
  968         mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
  969         callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
  970         knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
  971 
  972         return (0);
  973 }
  974 
  975 /*
  976  *  bpfread - read next chunk of packets from buffers
  977  */
  978 static  int
  979 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
  980 {
  981         struct bpf_d *d;
  982         int error;
  983         int non_block;
  984         int timed_out;
  985 
  986         error = devfs_get_cdevpriv((void **)&d);
  987         if (error != 0)
  988                 return (error);
  989 
  990         /*
  991          * Restrict application to use a buffer the same size as
  992          * as kernel buffers.
  993          */
  994         if (uio->uio_resid != d->bd_bufsize)
  995                 return (EINVAL);
  996 
  997         non_block = ((ioflag & O_NONBLOCK) != 0);
  998 
  999         BPFD_LOCK(d);
 1000         BPF_PID_REFRESH_CUR(d);
 1001         if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
 1002                 BPFD_UNLOCK(d);
 1003                 return (EOPNOTSUPP);
 1004         }
 1005         if (d->bd_state == BPF_WAITING)
 1006                 callout_stop(&d->bd_callout);
 1007         timed_out = (d->bd_state == BPF_TIMED_OUT);
 1008         d->bd_state = BPF_IDLE;
 1009         while (d->bd_hbuf_in_use) {
 1010                 error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
 1011                     PRINET|PCATCH, "bd_hbuf", 0);
 1012                 if (error != 0) {
 1013                         BPFD_UNLOCK(d);
 1014                         return (error);
 1015                 }
 1016         }
 1017         /*
 1018          * If the hold buffer is empty, then do a timed sleep, which
 1019          * ends when the timeout expires or when enough packets
 1020          * have arrived to fill the store buffer.
 1021          */
 1022         while (d->bd_hbuf == NULL) {
 1023                 if (d->bd_slen != 0) {
 1024                         /*
 1025                          * A packet(s) either arrived since the previous
 1026                          * read or arrived while we were asleep.
 1027                          */
 1028                         if (d->bd_immediate || non_block || timed_out) {
 1029                                 /*
 1030                                  * Rotate the buffers and return what's here
 1031                                  * if we are in immediate mode, non-blocking
 1032                                  * flag is set, or this descriptor timed out.
 1033                                  */
 1034                                 ROTATE_BUFFERS(d);
 1035                                 break;
 1036                         }
 1037                 }
 1038 
 1039                 /*
 1040                  * No data is available, check to see if the bpf device
 1041                  * is still pointed at a real interface.  If not, return
 1042                  * ENXIO so that the userland process knows to rebind
 1043                  * it before using it again.
 1044                  */
 1045                 if (d->bd_bif == NULL) {
 1046                         BPFD_UNLOCK(d);
 1047                         return (ENXIO);
 1048                 }
 1049 
 1050                 if (non_block) {
 1051                         BPFD_UNLOCK(d);
 1052                         return (EWOULDBLOCK);
 1053                 }
 1054                 error = msleep(d, &d->bd_lock, PRINET|PCATCH,
 1055                      "bpf", d->bd_rtout);
 1056                 if (error == EINTR || error == ERESTART) {
 1057                         BPFD_UNLOCK(d);
 1058                         return (error);
 1059                 }
 1060                 if (error == EWOULDBLOCK) {
 1061                         /*
 1062                          * On a timeout, return what's in the buffer,
 1063                          * which may be nothing.  If there is something
 1064                          * in the store buffer, we can rotate the buffers.
 1065                          */
 1066                         if (d->bd_hbuf)
 1067                                 /*
 1068                                  * We filled up the buffer in between
 1069                                  * getting the timeout and arriving
 1070                                  * here, so we don't need to rotate.
 1071                                  */
 1072                                 break;
 1073 
 1074                         if (d->bd_slen == 0) {
 1075                                 BPFD_UNLOCK(d);
 1076                                 return (0);
 1077                         }
 1078                         ROTATE_BUFFERS(d);
 1079                         break;
 1080                 }
 1081         }
 1082         /*
 1083          * At this point, we know we have something in the hold slot.
 1084          */
 1085         d->bd_hbuf_in_use = 1;
 1086         BPFD_UNLOCK(d);
 1087 
 1088         /*
 1089          * Move data from hold buffer into user space.
 1090          * We know the entire buffer is transferred since
 1091          * we checked above that the read buffer is bpf_bufsize bytes.
 1092          *
 1093          * We do not have to worry about simultaneous reads because
 1094          * we waited for sole access to the hold buffer above.
 1095          */
 1096         error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
 1097 
 1098         BPFD_LOCK(d);
 1099         KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
 1100         d->bd_fbuf = d->bd_hbuf;
 1101         d->bd_hbuf = NULL;
 1102         d->bd_hlen = 0;
 1103         bpf_buf_reclaimed(d);
 1104         d->bd_hbuf_in_use = 0;
 1105         wakeup(&d->bd_hbuf_in_use);
 1106         BPFD_UNLOCK(d);
 1107 
 1108         return (error);
 1109 }
 1110 
 1111 /*
 1112  * If there are processes sleeping on this descriptor, wake them up.
 1113  */
 1114 static __inline void
 1115 bpf_wakeup(struct bpf_d *d)
 1116 {
 1117 
 1118         BPFD_LOCK_ASSERT(d);
 1119         if (d->bd_state == BPF_WAITING) {
 1120                 callout_stop(&d->bd_callout);
 1121                 d->bd_state = BPF_IDLE;
 1122         }
 1123         wakeup(d);
 1124         if (d->bd_async && d->bd_sig && d->bd_sigio)
 1125                 pgsigio(&d->bd_sigio, d->bd_sig, 0);
 1126 
 1127         selwakeuppri(&d->bd_sel, PRINET);
 1128         KNOTE_LOCKED(&d->bd_sel.si_note, 0);
 1129 }
 1130 
 1131 static void
 1132 bpf_timed_out(void *arg)
 1133 {
 1134         struct bpf_d *d = (struct bpf_d *)arg;
 1135 
 1136         BPFD_LOCK_ASSERT(d);
 1137 
 1138         if (callout_pending(&d->bd_callout) ||
 1139             !callout_active(&d->bd_callout))
 1140                 return;
 1141         if (d->bd_state == BPF_WAITING) {
 1142                 d->bd_state = BPF_TIMED_OUT;
 1143                 if (d->bd_slen != 0)
 1144                         bpf_wakeup(d);
 1145         }
 1146 }
 1147 
 1148 static int
 1149 bpf_ready(struct bpf_d *d)
 1150 {
 1151 
 1152         BPFD_LOCK_ASSERT(d);
 1153 
 1154         if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
 1155                 return (1);
 1156         if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
 1157             d->bd_slen != 0)
 1158                 return (1);
 1159         return (0);
 1160 }
 1161 
 1162 static int
 1163 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
 1164 {
 1165         struct route ro;
 1166         struct sockaddr dst;
 1167         struct epoch_tracker et;
 1168         struct bpf_if *bp;
 1169         struct bpf_d *d;
 1170         struct ifnet *ifp;
 1171         struct mbuf *m, *mc;
 1172         int error, hlen;
 1173 
 1174         error = devfs_get_cdevpriv((void **)&d);
 1175         if (error != 0)
 1176                 return (error);
 1177 
 1178         NET_EPOCH_ENTER(et);
 1179         BPFD_LOCK(d);
 1180         BPF_PID_REFRESH_CUR(d);
 1181         counter_u64_add(d->bd_wcount, 1);
 1182         if ((bp = d->bd_bif) == NULL) {
 1183                 error = ENXIO;
 1184                 goto out_locked;
 1185         }
 1186 
 1187         ifp = bp->bif_ifp;
 1188         if ((ifp->if_flags & IFF_UP) == 0) {
 1189                 error = ENETDOWN;
 1190                 goto out_locked;
 1191         }
 1192 
 1193         if (uio->uio_resid == 0)
 1194                 goto out_locked;
 1195 
 1196         bzero(&dst, sizeof(dst));
 1197         m = NULL;
 1198         hlen = 0;
 1199 
 1200         /*
 1201          * Take extra reference, unlock d and exit from epoch section,
 1202          * since bpf_movein() can sleep.
 1203          */
 1204         bpfd_ref(d);
 1205         NET_EPOCH_EXIT(et);
 1206         BPFD_UNLOCK(d);
 1207 
 1208         error = bpf_movein(uio, (int)bp->bif_dlt, ifp,
 1209             &m, &dst, &hlen, d);
 1210 
 1211         if (error != 0) {
 1212                 counter_u64_add(d->bd_wdcount, 1);
 1213                 bpfd_rele(d);
 1214                 return (error);
 1215         }
 1216 
 1217         BPFD_LOCK(d);
 1218         /*
 1219          * Check that descriptor is still attached to the interface.
 1220          * This can happen on bpfdetach(). To avoid access to detached
 1221          * ifnet, free mbuf and return ENXIO.
 1222          */
 1223         if (d->bd_bif == NULL) {
 1224                 counter_u64_add(d->bd_wdcount, 1);
 1225                 BPFD_UNLOCK(d);
 1226                 bpfd_rele(d);
 1227                 m_freem(m);
 1228                 return (ENXIO);
 1229         }
 1230         counter_u64_add(d->bd_wfcount, 1);
 1231         if (d->bd_hdrcmplt)
 1232                 dst.sa_family = pseudo_AF_HDRCMPLT;
 1233 
 1234         if (d->bd_feedback) {
 1235                 mc = m_dup(m, M_NOWAIT);
 1236                 if (mc != NULL)
 1237                         mc->m_pkthdr.rcvif = ifp;
 1238                 /* Set M_PROMISC for outgoing packets to be discarded. */
 1239                 if (d->bd_direction == BPF_D_INOUT)
 1240                         m->m_flags |= M_PROMISC;
 1241         } else
 1242                 mc = NULL;
 1243 
 1244         m->m_pkthdr.len -= hlen;
 1245         m->m_len -= hlen;
 1246         m->m_data += hlen;      /* XXX */
 1247 
 1248         CURVNET_SET(ifp->if_vnet);
 1249 #ifdef MAC
 1250         mac_bpfdesc_create_mbuf(d, m);
 1251         if (mc != NULL)
 1252                 mac_bpfdesc_create_mbuf(d, mc);
 1253 #endif
 1254 
 1255         bzero(&ro, sizeof(ro));
 1256         if (hlen != 0) {
 1257                 ro.ro_prepend = (u_char *)&dst.sa_data;
 1258                 ro.ro_plen = hlen;
 1259                 ro.ro_flags = RT_HAS_HEADER;
 1260         }
 1261 
 1262         /* Avoid possible recursion on BPFD_LOCK(). */
 1263         NET_EPOCH_ENTER(et);
 1264         BPFD_UNLOCK(d);
 1265         error = (*ifp->if_output)(ifp, m, &dst, &ro);
 1266         if (error)
 1267                 counter_u64_add(d->bd_wdcount, 1);
 1268 
 1269         if (mc != NULL) {
 1270                 if (error == 0)
 1271                         (*ifp->if_input)(ifp, mc);
 1272                 else
 1273                         m_freem(mc);
 1274         }
 1275         NET_EPOCH_EXIT(et);
 1276         CURVNET_RESTORE();
 1277         bpfd_rele(d);
 1278         return (error);
 1279 
 1280 out_locked:
 1281         counter_u64_add(d->bd_wdcount, 1);
 1282         NET_EPOCH_EXIT(et);
 1283         BPFD_UNLOCK(d);
 1284         return (error);
 1285 }
 1286 
 1287 /*
 1288  * Reset a descriptor by flushing its packet buffer and clearing the receive
 1289  * and drop counts.  This is doable for kernel-only buffers, but with
 1290  * zero-copy buffers, we can't write to (or rotate) buffers that are
 1291  * currently owned by userspace.  It would be nice if we could encapsulate
 1292  * this logic in the buffer code rather than here.
 1293  */
 1294 static void
 1295 reset_d(struct bpf_d *d)
 1296 {
 1297 
 1298         BPFD_LOCK_ASSERT(d);
 1299 
 1300         while (d->bd_hbuf_in_use)
 1301                 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
 1302                     "bd_hbuf", 0);
 1303         if ((d->bd_hbuf != NULL) &&
 1304             (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
 1305                 /* Free the hold buffer. */
 1306                 d->bd_fbuf = d->bd_hbuf;
 1307                 d->bd_hbuf = NULL;
 1308                 d->bd_hlen = 0;
 1309                 bpf_buf_reclaimed(d);
 1310         }
 1311         if (bpf_canwritebuf(d))
 1312                 d->bd_slen = 0;
 1313         counter_u64_zero(d->bd_rcount);
 1314         counter_u64_zero(d->bd_dcount);
 1315         counter_u64_zero(d->bd_fcount);
 1316         counter_u64_zero(d->bd_wcount);
 1317         counter_u64_zero(d->bd_wfcount);
 1318         counter_u64_zero(d->bd_wdcount);
 1319         counter_u64_zero(d->bd_zcopy);
 1320 }
 1321 
 1322 /*
 1323  *  FIONREAD            Check for read packet available.
 1324  *  BIOCGBLEN           Get buffer len [for read()].
 1325  *  BIOCSETF            Set read filter.
 1326  *  BIOCSETFNR          Set read filter without resetting descriptor.
 1327  *  BIOCSETWF           Set write filter.
 1328  *  BIOCFLUSH           Flush read packet buffer.
 1329  *  BIOCPROMISC         Put interface into promiscuous mode.
 1330  *  BIOCGDLT            Get link layer type.
 1331  *  BIOCGETIF           Get interface name.
 1332  *  BIOCSETIF           Set interface.
 1333  *  BIOCSRTIMEOUT       Set read timeout.
 1334  *  BIOCGRTIMEOUT       Get read timeout.
 1335  *  BIOCGSTATS          Get packet stats.
 1336  *  BIOCIMMEDIATE       Set immediate mode.
 1337  *  BIOCVERSION         Get filter language version.
 1338  *  BIOCGHDRCMPLT       Get "header already complete" flag
 1339  *  BIOCSHDRCMPLT       Set "header already complete" flag
 1340  *  BIOCGDIRECTION      Get packet direction flag
 1341  *  BIOCSDIRECTION      Set packet direction flag
 1342  *  BIOCGTSTAMP         Get time stamp format and resolution.
 1343  *  BIOCSTSTAMP         Set time stamp format and resolution.
 1344  *  BIOCLOCK            Set "locked" flag
 1345  *  BIOCFEEDBACK        Set packet feedback mode.
 1346  *  BIOCSETZBUF         Set current zero-copy buffer locations.
 1347  *  BIOCGETZMAX         Get maximum zero-copy buffer size.
 1348  *  BIOCROTZBUF         Force rotation of zero-copy buffer
 1349  *  BIOCSETBUFMODE      Set buffer mode.
 1350  *  BIOCGETBUFMODE      Get current buffer mode.
 1351  */
 1352 /* ARGSUSED */
 1353 static  int
 1354 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
 1355     struct thread *td)
 1356 {
 1357         struct bpf_d *d;
 1358         int error;
 1359 
 1360         error = devfs_get_cdevpriv((void **)&d);
 1361         if (error != 0)
 1362                 return (error);
 1363 
 1364         /*
 1365          * Refresh PID associated with this descriptor.
 1366          */
 1367         BPFD_LOCK(d);
 1368         BPF_PID_REFRESH(d, td);
 1369         if (d->bd_state == BPF_WAITING)
 1370                 callout_stop(&d->bd_callout);
 1371         d->bd_state = BPF_IDLE;
 1372         BPFD_UNLOCK(d);
 1373 
 1374         if (d->bd_locked == 1) {
 1375                 switch (cmd) {
 1376                 case BIOCGBLEN:
 1377                 case BIOCFLUSH:
 1378                 case BIOCGDLT:
 1379                 case BIOCGDLTLIST:
 1380 #ifdef COMPAT_FREEBSD32
 1381                 case BIOCGDLTLIST32:
 1382 #endif
 1383                 case BIOCGETIF:
 1384                 case BIOCGRTIMEOUT:
 1385 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
 1386                 case BIOCGRTIMEOUT32:
 1387 #endif
 1388                 case BIOCGSTATS:
 1389                 case BIOCVERSION:
 1390                 case BIOCGRSIG:
 1391                 case BIOCGHDRCMPLT:
 1392                 case BIOCSTSTAMP:
 1393                 case BIOCFEEDBACK:
 1394                 case FIONREAD:
 1395                 case BIOCLOCK:
 1396                 case BIOCSRTIMEOUT:
 1397 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
 1398                 case BIOCSRTIMEOUT32:
 1399 #endif
 1400                 case BIOCIMMEDIATE:
 1401                 case TIOCGPGRP:
 1402                 case BIOCROTZBUF:
 1403                         break;
 1404                 default:
 1405                         return (EPERM);
 1406                 }
 1407         }
 1408 #ifdef COMPAT_FREEBSD32
 1409         /*
 1410          * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
 1411          * that it will get 32-bit packet headers.
 1412          */
 1413         switch (cmd) {
 1414         case BIOCSETF32:
 1415         case BIOCSETFNR32:
 1416         case BIOCSETWF32:
 1417         case BIOCGDLTLIST32:
 1418         case BIOCGRTIMEOUT32:
 1419         case BIOCSRTIMEOUT32:
 1420                 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
 1421                         BPFD_LOCK(d);
 1422                         d->bd_compat32 = 1;
 1423                         BPFD_UNLOCK(d);
 1424                 }
 1425         }
 1426 #endif
 1427 
 1428         CURVNET_SET(TD_TO_VNET(td));
 1429         switch (cmd) {
 1430         default:
 1431                 error = EINVAL;
 1432                 break;
 1433 
 1434         /*
 1435          * Check for read packet available.
 1436          */
 1437         case FIONREAD:
 1438                 {
 1439                         int n;
 1440 
 1441                         BPFD_LOCK(d);
 1442                         n = d->bd_slen;
 1443                         while (d->bd_hbuf_in_use)
 1444                                 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
 1445                                     PRINET, "bd_hbuf", 0);
 1446                         if (d->bd_hbuf)
 1447                                 n += d->bd_hlen;
 1448                         BPFD_UNLOCK(d);
 1449 
 1450                         *(int *)addr = n;
 1451                         break;
 1452                 }
 1453 
 1454         /*
 1455          * Get buffer len [for read()].
 1456          */
 1457         case BIOCGBLEN:
 1458                 BPFD_LOCK(d);
 1459                 *(u_int *)addr = d->bd_bufsize;
 1460                 BPFD_UNLOCK(d);
 1461                 break;
 1462 
 1463         /*
 1464          * Set buffer length.
 1465          */
 1466         case BIOCSBLEN:
 1467                 error = bpf_ioctl_sblen(d, (u_int *)addr);
 1468                 break;
 1469 
 1470         /*
 1471          * Set link layer read filter.
 1472          */
 1473         case BIOCSETF:
 1474         case BIOCSETFNR:
 1475         case BIOCSETWF:
 1476 #ifdef COMPAT_FREEBSD32
 1477         case BIOCSETF32:
 1478         case BIOCSETFNR32:
 1479         case BIOCSETWF32:
 1480 #endif
 1481                 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
 1482                 break;
 1483 
 1484         /*
 1485          * Flush read packet buffer.
 1486          */
 1487         case BIOCFLUSH:
 1488                 BPFD_LOCK(d);
 1489                 reset_d(d);
 1490                 BPFD_UNLOCK(d);
 1491                 break;
 1492 
 1493         /*
 1494          * Put interface into promiscuous mode.
 1495          */
 1496         case BIOCPROMISC:
 1497                 if (d->bd_bif == NULL) {
 1498                         /*
 1499                          * No interface attached yet.
 1500                          */
 1501                         error = EINVAL;
 1502                         break;
 1503                 }
 1504                 if (d->bd_promisc == 0) {
 1505                         error = ifpromisc(d->bd_bif->bif_ifp, 1);
 1506                         if (error == 0)
 1507                                 d->bd_promisc = 1;
 1508                 }
 1509                 break;
 1510 
 1511         /*
 1512          * Get current data link type.
 1513          */
 1514         case BIOCGDLT:
 1515                 BPF_LOCK();
 1516                 if (d->bd_bif == NULL)
 1517                         error = EINVAL;
 1518                 else
 1519                         *(u_int *)addr = d->bd_bif->bif_dlt;
 1520                 BPF_UNLOCK();
 1521                 break;
 1522 
 1523         /*
 1524          * Get a list of supported data link types.
 1525          */
 1526 #ifdef COMPAT_FREEBSD32
 1527         case BIOCGDLTLIST32:
 1528                 {
 1529                         struct bpf_dltlist32 *list32;
 1530                         struct bpf_dltlist dltlist;
 1531 
 1532                         list32 = (struct bpf_dltlist32 *)addr;
 1533                         dltlist.bfl_len = list32->bfl_len;
 1534                         dltlist.bfl_list = PTRIN(list32->bfl_list);
 1535                         BPF_LOCK();
 1536                         if (d->bd_bif == NULL)
 1537                                 error = EINVAL;
 1538                         else {
 1539                                 error = bpf_getdltlist(d, &dltlist);
 1540                                 if (error == 0)
 1541                                         list32->bfl_len = dltlist.bfl_len;
 1542                         }
 1543                         BPF_UNLOCK();
 1544                         break;
 1545                 }
 1546 #endif
 1547 
 1548         case BIOCGDLTLIST:
 1549                 BPF_LOCK();
 1550                 if (d->bd_bif == NULL)
 1551                         error = EINVAL;
 1552                 else
 1553                         error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
 1554                 BPF_UNLOCK();
 1555                 break;
 1556 
 1557         /*
 1558          * Set data link type.
 1559          */
 1560         case BIOCSDLT:
 1561                 BPF_LOCK();
 1562                 if (d->bd_bif == NULL)
 1563                         error = EINVAL;
 1564                 else
 1565                         error = bpf_setdlt(d, *(u_int *)addr);
 1566                 BPF_UNLOCK();
 1567                 break;
 1568 
 1569         /*
 1570          * Get interface name.
 1571          */
 1572         case BIOCGETIF:
 1573                 BPF_LOCK();
 1574                 if (d->bd_bif == NULL)
 1575                         error = EINVAL;
 1576                 else {
 1577                         struct ifnet *const ifp = d->bd_bif->bif_ifp;
 1578                         struct ifreq *const ifr = (struct ifreq *)addr;
 1579 
 1580                         strlcpy(ifr->ifr_name, ifp->if_xname,
 1581                             sizeof(ifr->ifr_name));
 1582                 }
 1583                 BPF_UNLOCK();
 1584                 break;
 1585 
 1586         /*
 1587          * Set interface.
 1588          */
 1589         case BIOCSETIF:
 1590                 {
 1591                         int alloc_buf, size;
 1592 
 1593                         /*
 1594                          * Behavior here depends on the buffering model.  If
 1595                          * we're using kernel memory buffers, then we can
 1596                          * allocate them here.  If we're using zero-copy,
 1597                          * then the user process must have registered buffers
 1598                          * by the time we get here.
 1599                          */
 1600                         alloc_buf = 0;
 1601                         BPFD_LOCK(d);
 1602                         if (d->bd_bufmode == BPF_BUFMODE_BUFFER &&
 1603                             d->bd_sbuf == NULL)
 1604                                 alloc_buf = 1;
 1605                         BPFD_UNLOCK(d);
 1606                         if (alloc_buf) {
 1607                                 size = d->bd_bufsize;
 1608                                 error = bpf_buffer_ioctl_sblen(d, &size);
 1609                                 if (error != 0)
 1610                                         break;
 1611                         }
 1612                         BPF_LOCK();
 1613                         error = bpf_setif(d, (struct ifreq *)addr);
 1614                         BPF_UNLOCK();
 1615                         break;
 1616                 }
 1617 
 1618         /*
 1619          * Set read timeout.
 1620          */
 1621         case BIOCSRTIMEOUT:
 1622 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
 1623         case BIOCSRTIMEOUT32:
 1624 #endif
 1625                 {
 1626                         struct timeval *tv = (struct timeval *)addr;
 1627 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
 1628                         struct timeval32 *tv32;
 1629                         struct timeval tv64;
 1630 
 1631                         if (cmd == BIOCSRTIMEOUT32) {
 1632                                 tv32 = (struct timeval32 *)addr;
 1633                                 tv = &tv64;
 1634                                 tv->tv_sec = tv32->tv_sec;
 1635                                 tv->tv_usec = tv32->tv_usec;
 1636                         } else
 1637 #endif
 1638                                 tv = (struct timeval *)addr;
 1639 
 1640                         /*
 1641                          * Subtract 1 tick from tvtohz() since this isn't
 1642                          * a one-shot timer.
 1643                          */
 1644                         if ((error = itimerfix(tv)) == 0)
 1645                                 d->bd_rtout = tvtohz(tv) - 1;
 1646                         break;
 1647                 }
 1648 
 1649         /*
 1650          * Get read timeout.
 1651          */
 1652         case BIOCGRTIMEOUT:
 1653 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
 1654         case BIOCGRTIMEOUT32:
 1655 #endif
 1656                 {
 1657                         struct timeval *tv;
 1658 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
 1659                         struct timeval32 *tv32;
 1660                         struct timeval tv64;
 1661 
 1662                         if (cmd == BIOCGRTIMEOUT32)
 1663                                 tv = &tv64;
 1664                         else
 1665 #endif
 1666                                 tv = (struct timeval *)addr;
 1667 
 1668                         tv->tv_sec = d->bd_rtout / hz;
 1669                         tv->tv_usec = (d->bd_rtout % hz) * tick;
 1670 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
 1671                         if (cmd == BIOCGRTIMEOUT32) {
 1672                                 tv32 = (struct timeval32 *)addr;
 1673                                 tv32->tv_sec = tv->tv_sec;
 1674                                 tv32->tv_usec = tv->tv_usec;
 1675                         }
 1676 #endif
 1677 
 1678                         break;
 1679                 }
 1680 
 1681         /*
 1682          * Get packet stats.
 1683          */
 1684         case BIOCGSTATS:
 1685                 {
 1686                         struct bpf_stat *bs = (struct bpf_stat *)addr;
 1687 
 1688                         /* XXXCSJP overflow */
 1689                         bs->bs_recv = (u_int)counter_u64_fetch(d->bd_rcount);
 1690                         bs->bs_drop = (u_int)counter_u64_fetch(d->bd_dcount);
 1691                         break;
 1692                 }
 1693 
 1694         /*
 1695          * Set immediate mode.
 1696          */
 1697         case BIOCIMMEDIATE:
 1698                 BPFD_LOCK(d);
 1699                 d->bd_immediate = *(u_int *)addr;
 1700                 BPFD_UNLOCK(d);
 1701                 break;
 1702 
 1703         case BIOCVERSION:
 1704                 {
 1705                         struct bpf_version *bv = (struct bpf_version *)addr;
 1706 
 1707                         bv->bv_major = BPF_MAJOR_VERSION;
 1708                         bv->bv_minor = BPF_MINOR_VERSION;
 1709                         break;
 1710                 }
 1711 
 1712         /*
 1713          * Get "header already complete" flag
 1714          */
 1715         case BIOCGHDRCMPLT:
 1716                 BPFD_LOCK(d);
 1717                 *(u_int *)addr = d->bd_hdrcmplt;
 1718                 BPFD_UNLOCK(d);
 1719                 break;
 1720 
 1721         /*
 1722          * Set "header already complete" flag
 1723          */
 1724         case BIOCSHDRCMPLT:
 1725                 BPFD_LOCK(d);
 1726                 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
 1727                 BPFD_UNLOCK(d);
 1728                 break;
 1729 
 1730         /*
 1731          * Get packet direction flag
 1732          */
 1733         case BIOCGDIRECTION:
 1734                 BPFD_LOCK(d);
 1735                 *(u_int *)addr = d->bd_direction;
 1736                 BPFD_UNLOCK(d);
 1737                 break;
 1738 
 1739         /*
 1740          * Set packet direction flag
 1741          */
 1742         case BIOCSDIRECTION:
 1743                 {
 1744                         u_int   direction;
 1745 
 1746                         direction = *(u_int *)addr;
 1747                         switch (direction) {
 1748                         case BPF_D_IN:
 1749                         case BPF_D_INOUT:
 1750                         case BPF_D_OUT:
 1751                                 BPFD_LOCK(d);
 1752                                 d->bd_direction = direction;
 1753                                 BPFD_UNLOCK(d);
 1754                                 break;
 1755                         default:
 1756                                 error = EINVAL;
 1757                         }
 1758                 }
 1759                 break;
 1760 
 1761         /*
 1762          * Get packet timestamp format and resolution.
 1763          */
 1764         case BIOCGTSTAMP:
 1765                 BPFD_LOCK(d);
 1766                 *(u_int *)addr = d->bd_tstamp;
 1767                 BPFD_UNLOCK(d);
 1768                 break;
 1769 
 1770         /*
 1771          * Set packet timestamp format and resolution.
 1772          */
 1773         case BIOCSTSTAMP:
 1774                 {
 1775                         u_int   func;
 1776 
 1777                         func = *(u_int *)addr;
 1778                         if (BPF_T_VALID(func))
 1779                                 d->bd_tstamp = func;
 1780                         else
 1781                                 error = EINVAL;
 1782                 }
 1783                 break;
 1784 
 1785         case BIOCFEEDBACK:
 1786                 BPFD_LOCK(d);
 1787                 d->bd_feedback = *(u_int *)addr;
 1788                 BPFD_UNLOCK(d);
 1789                 break;
 1790 
 1791         case BIOCLOCK:
 1792                 BPFD_LOCK(d);
 1793                 d->bd_locked = 1;
 1794                 BPFD_UNLOCK(d);
 1795                 break;
 1796 
 1797         case FIONBIO:           /* Non-blocking I/O */
 1798                 break;
 1799 
 1800         case FIOASYNC:          /* Send signal on receive packets */
 1801                 BPFD_LOCK(d);
 1802                 d->bd_async = *(int *)addr;
 1803                 BPFD_UNLOCK(d);
 1804                 break;
 1805 
 1806         case FIOSETOWN:
 1807                 /*
 1808                  * XXX: Add some sort of locking here?
 1809                  * fsetown() can sleep.
 1810                  */
 1811                 error = fsetown(*(int *)addr, &d->bd_sigio);
 1812                 break;
 1813 
 1814         case FIOGETOWN:
 1815                 BPFD_LOCK(d);
 1816                 *(int *)addr = fgetown(&d->bd_sigio);
 1817                 BPFD_UNLOCK(d);
 1818                 break;
 1819 
 1820         /* This is deprecated, FIOSETOWN should be used instead. */
 1821         case TIOCSPGRP:
 1822                 error = fsetown(-(*(int *)addr), &d->bd_sigio);
 1823                 break;
 1824 
 1825         /* This is deprecated, FIOGETOWN should be used instead. */
 1826         case TIOCGPGRP:
 1827                 *(int *)addr = -fgetown(&d->bd_sigio);
 1828                 break;
 1829 
 1830         case BIOCSRSIG:         /* Set receive signal */
 1831                 {
 1832                         u_int sig;
 1833 
 1834                         sig = *(u_int *)addr;
 1835 
 1836                         if (sig >= NSIG)
 1837                                 error = EINVAL;
 1838                         else {
 1839                                 BPFD_LOCK(d);
 1840                                 d->bd_sig = sig;
 1841                                 BPFD_UNLOCK(d);
 1842                         }
 1843                         break;
 1844                 }
 1845         case BIOCGRSIG:
 1846                 BPFD_LOCK(d);
 1847                 *(u_int *)addr = d->bd_sig;
 1848                 BPFD_UNLOCK(d);
 1849                 break;
 1850 
 1851         case BIOCGETBUFMODE:
 1852                 BPFD_LOCK(d);
 1853                 *(u_int *)addr = d->bd_bufmode;
 1854                 BPFD_UNLOCK(d);
 1855                 break;
 1856 
 1857         case BIOCSETBUFMODE:
 1858                 /*
 1859                  * Allow the buffering mode to be changed as long as we
 1860                  * haven't yet committed to a particular mode.  Our
 1861                  * definition of commitment, for now, is whether or not a
 1862                  * buffer has been allocated or an interface attached, since
 1863                  * that's the point where things get tricky.
 1864                  */
 1865                 switch (*(u_int *)addr) {
 1866                 case BPF_BUFMODE_BUFFER:
 1867                         break;
 1868 
 1869                 case BPF_BUFMODE_ZBUF:
 1870                         if (bpf_zerocopy_enable)
 1871                                 break;
 1872                         /* FALLSTHROUGH */
 1873 
 1874                 default:
 1875                         CURVNET_RESTORE();
 1876                         return (EINVAL);
 1877                 }
 1878 
 1879                 BPFD_LOCK(d);
 1880                 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
 1881                     d->bd_fbuf != NULL || d->bd_bif != NULL) {
 1882                         BPFD_UNLOCK(d);
 1883                         CURVNET_RESTORE();
 1884                         return (EBUSY);
 1885                 }
 1886                 d->bd_bufmode = *(u_int *)addr;
 1887                 BPFD_UNLOCK(d);
 1888                 break;
 1889 
 1890         case BIOCGETZMAX:
 1891                 error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
 1892                 break;
 1893 
 1894         case BIOCSETZBUF:
 1895                 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
 1896                 break;
 1897 
 1898         case BIOCROTZBUF:
 1899                 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
 1900                 break;
 1901         }
 1902         CURVNET_RESTORE();
 1903         return (error);
 1904 }
 1905 
 1906 /*
 1907  * Set d's packet filter program to fp. If this file already has a filter,
 1908  * free it and replace it. Returns EINVAL for bogus requests.
 1909  *
 1910  * Note we use global lock here to serialize bpf_setf() and bpf_setif()
 1911  * calls.
 1912  */
 1913 static int
 1914 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
 1915 {
 1916 #ifdef COMPAT_FREEBSD32
 1917         struct bpf_program fp_swab;
 1918         struct bpf_program32 *fp32;
 1919 #endif
 1920         struct bpf_program_buffer *fcode;
 1921         struct bpf_insn *filter;
 1922 #ifdef BPF_JITTER
 1923         bpf_jit_filter *jfunc;
 1924 #endif
 1925         size_t size;
 1926         u_int flen;
 1927         bool track_event;
 1928 
 1929 #ifdef COMPAT_FREEBSD32
 1930         switch (cmd) {
 1931         case BIOCSETF32:
 1932         case BIOCSETWF32:
 1933         case BIOCSETFNR32:
 1934                 fp32 = (struct bpf_program32 *)fp;
 1935                 fp_swab.bf_len = fp32->bf_len;
 1936                 fp_swab.bf_insns =
 1937                     (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
 1938                 fp = &fp_swab;
 1939                 switch (cmd) {
 1940                 case BIOCSETF32:
 1941                         cmd = BIOCSETF;
 1942                         break;
 1943                 case BIOCSETWF32:
 1944                         cmd = BIOCSETWF;
 1945                         break;
 1946                 }
 1947                 break;
 1948         }
 1949 #endif
 1950 
 1951         filter = NULL;
 1952 #ifdef BPF_JITTER
 1953         jfunc = NULL;
 1954 #endif
 1955         /*
 1956          * Check new filter validness before acquiring any locks.
 1957          * Allocate memory for new filter, if needed.
 1958          */
 1959         flen = fp->bf_len;
 1960         if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
 1961                 return (EINVAL);
 1962         size = flen * sizeof(*fp->bf_insns);
 1963         if (size > 0) {
 1964                 /* We're setting up new filter. Copy and check actual data. */
 1965                 fcode = bpf_program_buffer_alloc(size, M_WAITOK);
 1966                 filter = (struct bpf_insn *)fcode->buffer;
 1967                 if (copyin(fp->bf_insns, filter, size) != 0 ||
 1968                     !bpf_validate(filter, flen)) {
 1969                         free(fcode, M_BPF);
 1970                         return (EINVAL);
 1971                 }
 1972 #ifdef BPF_JITTER
 1973                 if (cmd != BIOCSETWF) {
 1974                         /*
 1975                          * Filter is copied inside fcode and is
 1976                          * perfectly valid.
 1977                          */
 1978                         jfunc = bpf_jitter(filter, flen);
 1979                 }
 1980 #endif
 1981         }
 1982 
 1983         track_event = false;
 1984         fcode = NULL;
 1985 
 1986         BPF_LOCK();
 1987         BPFD_LOCK(d);
 1988         /* Set up new filter. */
 1989         if (cmd == BIOCSETWF) {
 1990                 if (d->bd_wfilter != NULL) {
 1991                         fcode = __containerof((void *)d->bd_wfilter,
 1992                             struct bpf_program_buffer, buffer);
 1993 #ifdef BPF_JITTER
 1994                         fcode->func = NULL;
 1995 #endif
 1996                 }
 1997                 d->bd_wfilter = filter;
 1998         } else {
 1999                 if (d->bd_rfilter != NULL) {
 2000                         fcode = __containerof((void *)d->bd_rfilter,
 2001                             struct bpf_program_buffer, buffer);
 2002 #ifdef BPF_JITTER
 2003                         fcode->func = d->bd_bfilter;
 2004 #endif
 2005                 }
 2006                 d->bd_rfilter = filter;
 2007 #ifdef BPF_JITTER
 2008                 d->bd_bfilter = jfunc;
 2009 #endif
 2010                 if (cmd == BIOCSETF)
 2011                         reset_d(d);
 2012 
 2013                 if (bpf_check_upgrade(cmd, d, filter, flen) != 0) {
 2014                         /*
 2015                          * Filter can be set several times without
 2016                          * specifying interface. In this case just mark d
 2017                          * as reader.
 2018                          */
 2019                         d->bd_writer = 0;
 2020                         if (d->bd_bif != NULL) {
 2021                                 /*
 2022                                  * Remove descriptor from writers-only list
 2023                                  * and add it to active readers list.
 2024                                  */
 2025                                 CK_LIST_REMOVE(d, bd_next);
 2026                                 CK_LIST_INSERT_HEAD(&d->bd_bif->bif_dlist,
 2027                                     d, bd_next);
 2028                                 CTR2(KTR_NET,
 2029                                     "%s: upgrade required by pid %d",
 2030                                     __func__, d->bd_pid);
 2031                                 track_event = true;
 2032                         }
 2033                 }
 2034         }
 2035         BPFD_UNLOCK(d);
 2036 
 2037         if (fcode != NULL)
 2038                 NET_EPOCH_CALL(bpf_program_buffer_free, &fcode->epoch_ctx);
 2039 
 2040         if (track_event)
 2041                 EVENTHANDLER_INVOKE(bpf_track,
 2042                     d->bd_bif->bif_ifp, d->bd_bif->bif_dlt, 1);
 2043 
 2044         BPF_UNLOCK();
 2045         return (0);
 2046 }
 2047 
 2048 /*
 2049  * Detach a file from its current interface (if attached at all) and attach
 2050  * to the interface indicated by the name stored in ifr.
 2051  * Return an errno or 0.
 2052  */
 2053 static int
 2054 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
 2055 {
 2056         struct bpf_if *bp;
 2057         struct ifnet *theywant;
 2058 
 2059         BPF_LOCK_ASSERT();
 2060 
 2061         theywant = ifunit(ifr->ifr_name);
 2062         if (theywant == NULL || theywant->if_bpf == NULL)
 2063                 return (ENXIO);
 2064 
 2065         bp = theywant->if_bpf;
 2066         /*
 2067          * At this point, we expect the buffer is already allocated.  If not,
 2068          * return an error.
 2069          */
 2070         switch (d->bd_bufmode) {
 2071         case BPF_BUFMODE_BUFFER:
 2072         case BPF_BUFMODE_ZBUF:
 2073                 if (d->bd_sbuf == NULL)
 2074                         return (EINVAL);
 2075                 break;
 2076 
 2077         default:
 2078                 panic("bpf_setif: bufmode %d", d->bd_bufmode);
 2079         }
 2080         if (bp != d->bd_bif)
 2081                 bpf_attachd(d, bp);
 2082         else {
 2083                 BPFD_LOCK(d);
 2084                 reset_d(d);
 2085                 BPFD_UNLOCK(d);
 2086         }
 2087         return (0);
 2088 }
 2089 
 2090 /*
 2091  * Support for select() and poll() system calls
 2092  *
 2093  * Return true iff the specific operation will not block indefinitely.
 2094  * Otherwise, return false but make a note that a selwakeup() must be done.
 2095  */
 2096 static int
 2097 bpfpoll(struct cdev *dev, int events, struct thread *td)
 2098 {
 2099         struct bpf_d *d;
 2100         int revents;
 2101 
 2102         if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
 2103                 return (events &
 2104                     (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
 2105 
 2106         /*
 2107          * Refresh PID associated with this descriptor.
 2108          */
 2109         revents = events & (POLLOUT | POLLWRNORM);
 2110         BPFD_LOCK(d);
 2111         BPF_PID_REFRESH(d, td);
 2112         if (events & (POLLIN | POLLRDNORM)) {
 2113                 if (bpf_ready(d))
 2114                         revents |= events & (POLLIN | POLLRDNORM);
 2115                 else {
 2116                         selrecord(td, &d->bd_sel);
 2117                         /* Start the read timeout if necessary. */
 2118                         if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
 2119                                 callout_reset(&d->bd_callout, d->bd_rtout,
 2120                                     bpf_timed_out, d);
 2121                                 d->bd_state = BPF_WAITING;
 2122                         }
 2123                 }
 2124         }
 2125         BPFD_UNLOCK(d);
 2126         return (revents);
 2127 }
 2128 
 2129 /*
 2130  * Support for kevent() system call.  Register EVFILT_READ filters and
 2131  * reject all others.
 2132  */
 2133 int
 2134 bpfkqfilter(struct cdev *dev, struct knote *kn)
 2135 {
 2136         struct bpf_d *d;
 2137 
 2138         if (devfs_get_cdevpriv((void **)&d) != 0 ||
 2139             kn->kn_filter != EVFILT_READ)
 2140                 return (1);
 2141 
 2142         /*
 2143          * Refresh PID associated with this descriptor.
 2144          */
 2145         BPFD_LOCK(d);
 2146         BPF_PID_REFRESH_CUR(d);
 2147         kn->kn_fop = &bpfread_filtops;
 2148         kn->kn_hook = d;
 2149         knlist_add(&d->bd_sel.si_note, kn, 1);
 2150         BPFD_UNLOCK(d);
 2151 
 2152         return (0);
 2153 }
 2154 
 2155 static void
 2156 filt_bpfdetach(struct knote *kn)
 2157 {
 2158         struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
 2159 
 2160         knlist_remove(&d->bd_sel.si_note, kn, 0);
 2161 }
 2162 
 2163 static int
 2164 filt_bpfread(struct knote *kn, long hint)
 2165 {
 2166         struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
 2167         int ready;
 2168 
 2169         BPFD_LOCK_ASSERT(d);
 2170         ready = bpf_ready(d);
 2171         if (ready) {
 2172                 kn->kn_data = d->bd_slen;
 2173                 /*
 2174                  * Ignore the hold buffer if it is being copied to user space.
 2175                  */
 2176                 if (!d->bd_hbuf_in_use && d->bd_hbuf)
 2177                         kn->kn_data += d->bd_hlen;
 2178         } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
 2179                 callout_reset(&d->bd_callout, d->bd_rtout,
 2180                     bpf_timed_out, d);
 2181                 d->bd_state = BPF_WAITING;
 2182         }
 2183 
 2184         return (ready);
 2185 }
 2186 
 2187 #define BPF_TSTAMP_NONE         0
 2188 #define BPF_TSTAMP_FAST         1
 2189 #define BPF_TSTAMP_NORMAL       2
 2190 #define BPF_TSTAMP_EXTERN       3
 2191 
 2192 static int
 2193 bpf_ts_quality(int tstype)
 2194 {
 2195 
 2196         if (tstype == BPF_T_NONE)
 2197                 return (BPF_TSTAMP_NONE);
 2198         if ((tstype & BPF_T_FAST) != 0)
 2199                 return (BPF_TSTAMP_FAST);
 2200 
 2201         return (BPF_TSTAMP_NORMAL);
 2202 }
 2203 
 2204 static int
 2205 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m)
 2206 {
 2207         struct m_tag *tag;
 2208         int quality;
 2209 
 2210         quality = bpf_ts_quality(tstype);
 2211         if (quality == BPF_TSTAMP_NONE)
 2212                 return (quality);
 2213 
 2214         if (m != NULL) {
 2215                 tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL);
 2216                 if (tag != NULL) {
 2217                         *bt = *(struct bintime *)(tag + 1);
 2218                         return (BPF_TSTAMP_EXTERN);
 2219                 }
 2220         }
 2221         if (quality == BPF_TSTAMP_NORMAL)
 2222                 binuptime(bt);
 2223         else
 2224                 getbinuptime(bt);
 2225 
 2226         return (quality);
 2227 }
 2228 
 2229 /*
 2230  * Incoming linkage from device drivers.  Process the packet pkt, of length
 2231  * pktlen, which is stored in a contiguous buffer.  The packet is parsed
 2232  * by each process' filter, and if accepted, stashed into the corresponding
 2233  * buffer.
 2234  */
 2235 void
 2236 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
 2237 {
 2238         struct epoch_tracker et;
 2239         struct bintime bt;
 2240         struct bpf_d *d;
 2241 #ifdef BPF_JITTER
 2242         bpf_jit_filter *bf;
 2243 #endif
 2244         u_int slen;
 2245         int gottime;
 2246 
 2247         gottime = BPF_TSTAMP_NONE;
 2248         NET_EPOCH_ENTER(et);
 2249         CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
 2250                 counter_u64_add(d->bd_rcount, 1);
 2251                 /*
 2252                  * NB: We dont call BPF_CHECK_DIRECTION() here since there
 2253                  * is no way for the caller to indiciate to us whether this
 2254                  * packet is inbound or outbound. In the bpf_mtap() routines,
 2255                  * we use the interface pointers on the mbuf to figure it out.
 2256                  */
 2257 #ifdef BPF_JITTER
 2258                 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
 2259                 if (bf != NULL)
 2260                         slen = (*(bf->func))(pkt, pktlen, pktlen);
 2261                 else
 2262 #endif
 2263                 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
 2264                 if (slen != 0) {
 2265                         /*
 2266                          * Filter matches. Let's to acquire write lock.
 2267                          */
 2268                         BPFD_LOCK(d);
 2269                         counter_u64_add(d->bd_fcount, 1);
 2270                         if (gottime < bpf_ts_quality(d->bd_tstamp))
 2271                                 gottime = bpf_gettime(&bt, d->bd_tstamp,
 2272                                     NULL);
 2273 #ifdef MAC
 2274                         if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
 2275 #endif
 2276                                 catchpacket(d, pkt, pktlen, slen,
 2277                                     bpf_append_bytes, &bt);
 2278                         BPFD_UNLOCK(d);
 2279                 }
 2280         }
 2281         NET_EPOCH_EXIT(et);
 2282 }
 2283 
 2284 #define BPF_CHECK_DIRECTION(d, r, i)                            \
 2285             (((d)->bd_direction == BPF_D_IN && (r) != (i)) ||   \
 2286             ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
 2287 
 2288 /*
 2289  * Incoming linkage from device drivers, when packet is in an mbuf chain.
 2290  * Locking model is explained in bpf_tap().
 2291  */
 2292 void
 2293 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
 2294 {
 2295         struct epoch_tracker et;
 2296         struct bintime bt;
 2297         struct bpf_d *d;
 2298 #ifdef BPF_JITTER
 2299         bpf_jit_filter *bf;
 2300 #endif
 2301         u_int pktlen, slen;
 2302         int gottime;
 2303 
 2304         /* Skip outgoing duplicate packets. */
 2305         if ((m->m_flags & M_PROMISC) != 0 && m_rcvif(m) == NULL) {
 2306                 m->m_flags &= ~M_PROMISC;
 2307                 return;
 2308         }
 2309 
 2310         pktlen = m_length(m, NULL);
 2311         gottime = BPF_TSTAMP_NONE;
 2312 
 2313         NET_EPOCH_ENTER(et);
 2314         CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
 2315                 if (BPF_CHECK_DIRECTION(d, m_rcvif(m), bp->bif_ifp))
 2316                         continue;
 2317                 counter_u64_add(d->bd_rcount, 1);
 2318 #ifdef BPF_JITTER
 2319                 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
 2320                 /* XXX We cannot handle multiple mbufs. */
 2321                 if (bf != NULL && m->m_next == NULL)
 2322                         slen = (*(bf->func))(mtod(m, u_char *), pktlen,
 2323                             pktlen);
 2324                 else
 2325 #endif
 2326                 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
 2327                 if (slen != 0) {
 2328                         BPFD_LOCK(d);
 2329 
 2330                         counter_u64_add(d->bd_fcount, 1);
 2331                         if (gottime < bpf_ts_quality(d->bd_tstamp))
 2332                                 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
 2333 #ifdef MAC
 2334                         if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
 2335 #endif
 2336                                 catchpacket(d, (u_char *)m, pktlen, slen,
 2337                                     bpf_append_mbuf, &bt);
 2338                         BPFD_UNLOCK(d);
 2339                 }
 2340         }
 2341         NET_EPOCH_EXIT(et);
 2342 }
 2343 
 2344 /*
 2345  * Incoming linkage from device drivers, when packet is in
 2346  * an mbuf chain and to be prepended by a contiguous header.
 2347  */
 2348 void
 2349 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
 2350 {
 2351         struct epoch_tracker et;
 2352         struct bintime bt;
 2353         struct mbuf mb;
 2354         struct bpf_d *d;
 2355         u_int pktlen, slen;
 2356         int gottime;
 2357 
 2358         /* Skip outgoing duplicate packets. */
 2359         if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
 2360                 m->m_flags &= ~M_PROMISC;
 2361                 return;
 2362         }
 2363 
 2364         pktlen = m_length(m, NULL);
 2365         /*
 2366          * Craft on-stack mbuf suitable for passing to bpf_filter.
 2367          * Note that we cut corners here; we only setup what's
 2368          * absolutely needed--this mbuf should never go anywhere else.
 2369          */
 2370         mb.m_flags = 0;
 2371         mb.m_next = m;
 2372         mb.m_data = data;
 2373         mb.m_len = dlen;
 2374         pktlen += dlen;
 2375 
 2376         gottime = BPF_TSTAMP_NONE;
 2377 
 2378         NET_EPOCH_ENTER(et);
 2379         CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
 2380                 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
 2381                         continue;
 2382                 counter_u64_add(d->bd_rcount, 1);
 2383                 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
 2384                 if (slen != 0) {
 2385                         BPFD_LOCK(d);
 2386 
 2387                         counter_u64_add(d->bd_fcount, 1);
 2388                         if (gottime < bpf_ts_quality(d->bd_tstamp))
 2389                                 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
 2390 #ifdef MAC
 2391                         if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
 2392 #endif
 2393                                 catchpacket(d, (u_char *)&mb, pktlen, slen,
 2394                                     bpf_append_mbuf, &bt);
 2395                         BPFD_UNLOCK(d);
 2396                 }
 2397         }
 2398         NET_EPOCH_EXIT(et);
 2399 }
 2400 
 2401 #undef  BPF_CHECK_DIRECTION
 2402 #undef  BPF_TSTAMP_NONE
 2403 #undef  BPF_TSTAMP_FAST
 2404 #undef  BPF_TSTAMP_NORMAL
 2405 #undef  BPF_TSTAMP_EXTERN
 2406 
 2407 static int
 2408 bpf_hdrlen(struct bpf_d *d)
 2409 {
 2410         int hdrlen;
 2411 
 2412         hdrlen = d->bd_bif->bif_hdrlen;
 2413 #ifndef BURN_BRIDGES
 2414         if (d->bd_tstamp == BPF_T_NONE ||
 2415             BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
 2416 #ifdef COMPAT_FREEBSD32
 2417                 if (d->bd_compat32)
 2418                         hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32);
 2419                 else
 2420 #endif
 2421                         hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr);
 2422         else
 2423 #endif
 2424                 hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr);
 2425 #ifdef COMPAT_FREEBSD32
 2426         if (d->bd_compat32)
 2427                 hdrlen = BPF_WORDALIGN32(hdrlen);
 2428         else
 2429 #endif
 2430                 hdrlen = BPF_WORDALIGN(hdrlen);
 2431 
 2432         return (hdrlen - d->bd_bif->bif_hdrlen);
 2433 }
 2434 
 2435 static void
 2436 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
 2437 {
 2438         struct bintime bt2, boottimebin;
 2439         struct timeval tsm;
 2440         struct timespec tsn;
 2441 
 2442         if ((tstype & BPF_T_MONOTONIC) == 0) {
 2443                 bt2 = *bt;
 2444                 getboottimebin(&boottimebin);
 2445                 bintime_add(&bt2, &boottimebin);
 2446                 bt = &bt2;
 2447         }
 2448         switch (BPF_T_FORMAT(tstype)) {
 2449         case BPF_T_MICROTIME:
 2450                 bintime2timeval(bt, &tsm);
 2451                 ts->bt_sec = tsm.tv_sec;
 2452                 ts->bt_frac = tsm.tv_usec;
 2453                 break;
 2454         case BPF_T_NANOTIME:
 2455                 bintime2timespec(bt, &tsn);
 2456                 ts->bt_sec = tsn.tv_sec;
 2457                 ts->bt_frac = tsn.tv_nsec;
 2458                 break;
 2459         case BPF_T_BINTIME:
 2460                 ts->bt_sec = bt->sec;
 2461                 ts->bt_frac = bt->frac;
 2462                 break;
 2463         }
 2464 }
 2465 
 2466 /*
 2467  * Move the packet data from interface memory (pkt) into the
 2468  * store buffer.  "cpfn" is the routine called to do the actual data
 2469  * transfer.  bcopy is passed in to copy contiguous chunks, while
 2470  * bpf_append_mbuf is passed in to copy mbuf chains.  In the latter case,
 2471  * pkt is really an mbuf.
 2472  */
 2473 static void
 2474 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
 2475     void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
 2476     struct bintime *bt)
 2477 {
 2478         struct bpf_xhdr hdr;
 2479 #ifndef BURN_BRIDGES
 2480         struct bpf_hdr hdr_old;
 2481 #ifdef COMPAT_FREEBSD32
 2482         struct bpf_hdr32 hdr32_old;
 2483 #endif
 2484 #endif
 2485         int caplen, curlen, hdrlen, totlen;
 2486         int do_wakeup = 0;
 2487         int do_timestamp;
 2488         int tstype;
 2489 
 2490         BPFD_LOCK_ASSERT(d);
 2491         if (d->bd_bif == NULL) {
 2492                 /* Descriptor was detached in concurrent thread */
 2493                 counter_u64_add(d->bd_dcount, 1);
 2494                 return;
 2495         }
 2496 
 2497         /*
 2498          * Detect whether user space has released a buffer back to us, and if
 2499          * so, move it from being a hold buffer to a free buffer.  This may
 2500          * not be the best place to do it (for example, we might only want to
 2501          * run this check if we need the space), but for now it's a reliable
 2502          * spot to do it.
 2503          */
 2504         if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
 2505                 d->bd_fbuf = d->bd_hbuf;
 2506                 d->bd_hbuf = NULL;
 2507                 d->bd_hlen = 0;
 2508                 bpf_buf_reclaimed(d);
 2509         }
 2510 
 2511         /*
 2512          * Figure out how many bytes to move.  If the packet is
 2513          * greater or equal to the snapshot length, transfer that
 2514          * much.  Otherwise, transfer the whole packet (unless
 2515          * we hit the buffer size limit).
 2516          */
 2517         hdrlen = bpf_hdrlen(d);
 2518         totlen = hdrlen + min(snaplen, pktlen);
 2519         if (totlen > d->bd_bufsize)
 2520                 totlen = d->bd_bufsize;
 2521 
 2522         /*
 2523          * Round up the end of the previous packet to the next longword.
 2524          *
 2525          * Drop the packet if there's no room and no hope of room
 2526          * If the packet would overflow the storage buffer or the storage
 2527          * buffer is considered immutable by the buffer model, try to rotate
 2528          * the buffer and wakeup pending processes.
 2529          */
 2530 #ifdef COMPAT_FREEBSD32
 2531         if (d->bd_compat32)
 2532                 curlen = BPF_WORDALIGN32(d->bd_slen);
 2533         else
 2534 #endif
 2535                 curlen = BPF_WORDALIGN(d->bd_slen);
 2536         if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
 2537                 if (d->bd_fbuf == NULL) {
 2538                         /*
 2539                          * There's no room in the store buffer, and no
 2540                          * prospect of room, so drop the packet.  Notify the
 2541                          * buffer model.
 2542                          */
 2543                         bpf_buffull(d);
 2544                         counter_u64_add(d->bd_dcount, 1);
 2545                         return;
 2546                 }
 2547                 KASSERT(!d->bd_hbuf_in_use, ("hold buffer is in use"));
 2548                 ROTATE_BUFFERS(d);
 2549                 do_wakeup = 1;
 2550                 curlen = 0;
 2551         } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
 2552                 /*
 2553                  * Immediate mode is set, or the read timeout has already
 2554                  * expired during a select call.  A packet arrived, so the
 2555                  * reader should be woken up.
 2556                  */
 2557                 do_wakeup = 1;
 2558         caplen = totlen - hdrlen;
 2559         tstype = d->bd_tstamp;
 2560         do_timestamp = tstype != BPF_T_NONE;
 2561 #ifndef BURN_BRIDGES
 2562         if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) {
 2563                 struct bpf_ts ts;
 2564                 if (do_timestamp)
 2565                         bpf_bintime2ts(bt, &ts, tstype);
 2566 #ifdef COMPAT_FREEBSD32
 2567                 if (d->bd_compat32) {
 2568                         bzero(&hdr32_old, sizeof(hdr32_old));
 2569                         if (do_timestamp) {
 2570                                 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec;
 2571                                 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac;
 2572                         }
 2573                         hdr32_old.bh_datalen = pktlen;
 2574                         hdr32_old.bh_hdrlen = hdrlen;
 2575                         hdr32_old.bh_caplen = caplen;
 2576                         bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
 2577                             sizeof(hdr32_old));
 2578                         goto copy;
 2579                 }
 2580 #endif
 2581                 bzero(&hdr_old, sizeof(hdr_old));
 2582                 if (do_timestamp) {
 2583                         hdr_old.bh_tstamp.tv_sec = ts.bt_sec;
 2584                         hdr_old.bh_tstamp.tv_usec = ts.bt_frac;
 2585                 }
 2586                 hdr_old.bh_datalen = pktlen;
 2587                 hdr_old.bh_hdrlen = hdrlen;
 2588                 hdr_old.bh_caplen = caplen;
 2589                 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
 2590                     sizeof(hdr_old));
 2591                 goto copy;
 2592         }
 2593 #endif
 2594 
 2595         /*
 2596          * Append the bpf header.  Note we append the actual header size, but
 2597          * move forward the length of the header plus padding.
 2598          */
 2599         bzero(&hdr, sizeof(hdr));
 2600         if (do_timestamp)
 2601                 bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype);
 2602         hdr.bh_datalen = pktlen;
 2603         hdr.bh_hdrlen = hdrlen;
 2604         hdr.bh_caplen = caplen;
 2605         bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
 2606 
 2607         /*
 2608          * Copy the packet data into the store buffer and update its length.
 2609          */
 2610 #ifndef BURN_BRIDGES
 2611 copy:
 2612 #endif
 2613         (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
 2614         d->bd_slen = curlen + totlen;
 2615 
 2616         if (do_wakeup)
 2617                 bpf_wakeup(d);
 2618 }
 2619 
 2620 /*
 2621  * Free buffers currently in use by a descriptor.
 2622  * Called on close.
 2623  */
 2624 static void
 2625 bpfd_free(epoch_context_t ctx)
 2626 {
 2627         struct bpf_d *d;
 2628         struct bpf_program_buffer *p;
 2629 
 2630         /*
 2631          * We don't need to lock out interrupts since this descriptor has
 2632          * been detached from its interface and it yet hasn't been marked
 2633          * free.
 2634          */
 2635         d = __containerof(ctx, struct bpf_d, epoch_ctx);
 2636         bpf_free(d);
 2637         if (d->bd_rfilter != NULL) {
 2638                 p = __containerof((void *)d->bd_rfilter,
 2639                     struct bpf_program_buffer, buffer);
 2640 #ifdef BPF_JITTER
 2641                 p->func = d->bd_bfilter;
 2642 #endif
 2643                 bpf_program_buffer_free(&p->epoch_ctx);
 2644         }
 2645         if (d->bd_wfilter != NULL) {
 2646                 p = __containerof((void *)d->bd_wfilter,
 2647                     struct bpf_program_buffer, buffer);
 2648 #ifdef BPF_JITTER
 2649                 p->func = NULL;
 2650 #endif
 2651                 bpf_program_buffer_free(&p->epoch_ctx);
 2652         }
 2653 
 2654         mtx_destroy(&d->bd_lock);
 2655         counter_u64_free(d->bd_rcount);
 2656         counter_u64_free(d->bd_dcount);
 2657         counter_u64_free(d->bd_fcount);
 2658         counter_u64_free(d->bd_wcount);
 2659         counter_u64_free(d->bd_wfcount);
 2660         counter_u64_free(d->bd_wdcount);
 2661         counter_u64_free(d->bd_zcopy);
 2662         free(d, M_BPF);
 2663 }
 2664 
 2665 /*
 2666  * Attach an interface to bpf.  dlt is the link layer type; hdrlen is the
 2667  * fixed size of the link header (variable length headers not yet supported).
 2668  */
 2669 void
 2670 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
 2671 {
 2672 
 2673         bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
 2674 }
 2675 
 2676 /*
 2677  * Attach an interface to bpf.  ifp is a pointer to the structure
 2678  * defining the interface to be attached, dlt is the link layer type,
 2679  * and hdrlen is the fixed size of the link header (variable length
 2680  * headers are not yet supporrted).
 2681  */
 2682 void
 2683 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen,
 2684     struct bpf_if **driverp)
 2685 {
 2686         struct bpf_if *bp;
 2687 
 2688         KASSERT(*driverp == NULL,
 2689             ("bpfattach2: driverp already initialized"));
 2690 
 2691         bp = malloc(sizeof(*bp), M_BPF, M_WAITOK | M_ZERO);
 2692 
 2693         CK_LIST_INIT(&bp->bif_dlist);
 2694         CK_LIST_INIT(&bp->bif_wlist);
 2695         bp->bif_ifp = ifp;
 2696         bp->bif_dlt = dlt;
 2697         bp->bif_hdrlen = hdrlen;
 2698         bp->bif_bpf = driverp;
 2699         bp->bif_refcnt = 1;
 2700         *driverp = bp;
 2701         /*
 2702          * Reference ifnet pointer, so it won't freed until
 2703          * we release it.
 2704          */
 2705         if_ref(ifp);
 2706         BPF_LOCK();
 2707         CK_LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
 2708         BPF_UNLOCK();
 2709 
 2710         if (bootverbose && IS_DEFAULT_VNET(curvnet))
 2711                 if_printf(ifp, "bpf attached\n");
 2712 }
 2713 
 2714 #ifdef VIMAGE
 2715 /*
 2716  * When moving interfaces between vnet instances we need a way to
 2717  * query the dlt and hdrlen before detach so we can re-attch the if_bpf
 2718  * after the vmove.  We unfortunately have no device driver infrastructure
 2719  * to query the interface for these values after creation/attach, thus
 2720  * add this as a workaround.
 2721  */
 2722 int
 2723 bpf_get_bp_params(struct bpf_if *bp, u_int *bif_dlt, u_int *bif_hdrlen)
 2724 {
 2725 
 2726         if (bp == NULL)
 2727                 return (ENXIO);
 2728         if (bif_dlt == NULL && bif_hdrlen == NULL)
 2729                 return (0);
 2730 
 2731         if (bif_dlt != NULL)
 2732                 *bif_dlt = bp->bif_dlt;
 2733         if (bif_hdrlen != NULL)
 2734                 *bif_hdrlen = bp->bif_hdrlen;
 2735 
 2736         return (0);
 2737 }
 2738 #endif
 2739 
 2740 /*
 2741  * Detach bpf from an interface. This involves detaching each descriptor
 2742  * associated with the interface. Notify each descriptor as it's detached
 2743  * so that any sleepers wake up and get ENXIO.
 2744  */
 2745 void
 2746 bpfdetach(struct ifnet *ifp)
 2747 {
 2748         struct bpf_if *bp, *bp_temp;
 2749         struct bpf_d *d;
 2750 
 2751         BPF_LOCK();
 2752         /* Find all bpf_if struct's which reference ifp and detach them. */
 2753         CK_LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) {
 2754                 if (ifp != bp->bif_ifp)
 2755                         continue;
 2756 
 2757                 CK_LIST_REMOVE(bp, bif_next);
 2758                 *bp->bif_bpf = (struct bpf_if *)&dead_bpf_if;
 2759 
 2760                 CTR4(KTR_NET,
 2761                     "%s: sheduling free for encap %d (%p) for if %p",
 2762                     __func__, bp->bif_dlt, bp, ifp);
 2763 
 2764                 /* Detach common descriptors */
 2765                 while ((d = CK_LIST_FIRST(&bp->bif_dlist)) != NULL) {
 2766                         bpf_detachd_locked(d, true);
 2767                 }
 2768 
 2769                 /* Detach writer-only descriptors */
 2770                 while ((d = CK_LIST_FIRST(&bp->bif_wlist)) != NULL) {
 2771                         bpf_detachd_locked(d, true);
 2772                 }
 2773                 bpfif_rele(bp);
 2774         }
 2775         BPF_UNLOCK();
 2776 }
 2777 
 2778 /*
 2779  * Get a list of available data link type of the interface.
 2780  */
 2781 static int
 2782 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
 2783 {
 2784         struct ifnet *ifp;
 2785         struct bpf_if *bp;
 2786         u_int *lst;
 2787         int error, n, n1;
 2788 
 2789         BPF_LOCK_ASSERT();
 2790 
 2791         ifp = d->bd_bif->bif_ifp;
 2792         n1 = 0;
 2793         CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) {
 2794                 if (bp->bif_ifp == ifp)
 2795                         n1++;
 2796         }
 2797         if (bfl->bfl_list == NULL) {
 2798                 bfl->bfl_len = n1;
 2799                 return (0);
 2800         }
 2801         if (n1 > bfl->bfl_len)
 2802                 return (ENOMEM);
 2803 
 2804         lst = malloc(n1 * sizeof(u_int), M_TEMP, M_WAITOK);
 2805         n = 0;
 2806         CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) {
 2807                 if (bp->bif_ifp != ifp)
 2808                         continue;
 2809                 lst[n++] = bp->bif_dlt;
 2810         }
 2811         error = copyout(lst, bfl->bfl_list, sizeof(u_int) * n);
 2812         free(lst, M_TEMP);
 2813         bfl->bfl_len = n;
 2814         return (error);
 2815 }
 2816 
 2817 /*
 2818  * Set the data link type of a BPF instance.
 2819  */
 2820 static int
 2821 bpf_setdlt(struct bpf_d *d, u_int dlt)
 2822 {
 2823         int error, opromisc;
 2824         struct ifnet *ifp;
 2825         struct bpf_if *bp;
 2826 
 2827         BPF_LOCK_ASSERT();
 2828         MPASS(d->bd_bif != NULL);
 2829 
 2830         /*
 2831          * It is safe to check bd_bif without BPFD_LOCK, it can not be
 2832          * changed while we hold global lock.
 2833          */
 2834         if (d->bd_bif->bif_dlt == dlt)
 2835                 return (0);
 2836 
 2837         ifp = d->bd_bif->bif_ifp;
 2838         CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) {
 2839                 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
 2840                         break;
 2841         }
 2842         if (bp == NULL)
 2843                 return (EINVAL);
 2844 
 2845         opromisc = d->bd_promisc;
 2846         bpf_attachd(d, bp);
 2847         if (opromisc) {
 2848                 error = ifpromisc(bp->bif_ifp, 1);
 2849                 if (error)
 2850                         if_printf(bp->bif_ifp, "%s: ifpromisc failed (%d)\n",
 2851                             __func__, error);
 2852                 else
 2853                         d->bd_promisc = 1;
 2854         }
 2855         return (0);
 2856 }
 2857 
 2858 static void
 2859 bpf_drvinit(void *unused)
 2860 {
 2861         struct cdev *dev;
 2862 
 2863         sx_init(&bpf_sx, "bpf global lock");
 2864         CK_LIST_INIT(&bpf_iflist);
 2865 
 2866         dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
 2867         /* For compatibility */
 2868         make_dev_alias(dev, "bpf0");
 2869 }
 2870 
 2871 /*
 2872  * Zero out the various packet counters associated with all of the bpf
 2873  * descriptors.  At some point, we will probably want to get a bit more
 2874  * granular and allow the user to specify descriptors to be zeroed.
 2875  */
 2876 static void
 2877 bpf_zero_counters(void)
 2878 {
 2879         struct bpf_if *bp;
 2880         struct bpf_d *bd;
 2881 
 2882         BPF_LOCK();
 2883         /*
 2884          * We are protected by global lock here, interfaces and
 2885          * descriptors can not be deleted while we hold it.
 2886          */
 2887         CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) {
 2888                 CK_LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
 2889                         counter_u64_zero(bd->bd_rcount);
 2890                         counter_u64_zero(bd->bd_dcount);
 2891                         counter_u64_zero(bd->bd_fcount);
 2892                         counter_u64_zero(bd->bd_wcount);
 2893                         counter_u64_zero(bd->bd_wfcount);
 2894                         counter_u64_zero(bd->bd_zcopy);
 2895                 }
 2896         }
 2897         BPF_UNLOCK();
 2898 }
 2899 
 2900 /*
 2901  * Fill filter statistics
 2902  */
 2903 static void
 2904 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
 2905 {
 2906 
 2907         BPF_LOCK_ASSERT();
 2908         bzero(d, sizeof(*d));
 2909         d->bd_structsize = sizeof(*d);
 2910         d->bd_immediate = bd->bd_immediate;
 2911         d->bd_promisc = bd->bd_promisc;
 2912         d->bd_hdrcmplt = bd->bd_hdrcmplt;
 2913         d->bd_direction = bd->bd_direction;
 2914         d->bd_feedback = bd->bd_feedback;
 2915         d->bd_async = bd->bd_async;
 2916         d->bd_rcount = counter_u64_fetch(bd->bd_rcount);
 2917         d->bd_dcount = counter_u64_fetch(bd->bd_dcount);
 2918         d->bd_fcount = counter_u64_fetch(bd->bd_fcount);
 2919         d->bd_sig = bd->bd_sig;
 2920         d->bd_slen = bd->bd_slen;
 2921         d->bd_hlen = bd->bd_hlen;
 2922         d->bd_bufsize = bd->bd_bufsize;
 2923         d->bd_pid = bd->bd_pid;
 2924         strlcpy(d->bd_ifname,
 2925             bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
 2926         d->bd_locked = bd->bd_locked;
 2927         d->bd_wcount = counter_u64_fetch(bd->bd_wcount);
 2928         d->bd_wdcount = counter_u64_fetch(bd->bd_wdcount);
 2929         d->bd_wfcount = counter_u64_fetch(bd->bd_wfcount);
 2930         d->bd_zcopy = counter_u64_fetch(bd->bd_zcopy);
 2931         d->bd_bufmode = bd->bd_bufmode;
 2932 }
 2933 
 2934 /*
 2935  * Handle `netstat -B' stats request
 2936  */
 2937 static int
 2938 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
 2939 {
 2940         static const struct xbpf_d zerostats;
 2941         struct xbpf_d *xbdbuf, *xbd, tempstats;
 2942         int index, error;
 2943         struct bpf_if *bp;
 2944         struct bpf_d *bd;
 2945 
 2946         /*
 2947          * XXX This is not technically correct. It is possible for non
 2948          * privileged users to open bpf devices. It would make sense
 2949          * if the users who opened the devices were able to retrieve
 2950          * the statistics for them, too.
 2951          */
 2952         error = priv_check(req->td, PRIV_NET_BPF);
 2953         if (error)
 2954                 return (error);
 2955         /*
 2956          * Check to see if the user is requesting that the counters be
 2957          * zeroed out.  Explicitly check that the supplied data is zeroed,
 2958          * as we aren't allowing the user to set the counters currently.
 2959          */
 2960         if (req->newptr != NULL) {
 2961                 if (req->newlen != sizeof(tempstats))
 2962                         return (EINVAL);
 2963                 memset(&tempstats, 0, sizeof(tempstats));
 2964                 error = SYSCTL_IN(req, &tempstats, sizeof(tempstats));
 2965                 if (error)
 2966                         return (error);
 2967                 if (bcmp(&tempstats, &zerostats, sizeof(tempstats)) != 0)
 2968                         return (EINVAL);
 2969                 bpf_zero_counters();
 2970                 return (0);
 2971         }
 2972         if (req->oldptr == NULL)
 2973                 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
 2974         if (bpf_bpfd_cnt == 0)
 2975                 return (SYSCTL_OUT(req, 0, 0));
 2976         xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
 2977         BPF_LOCK();
 2978         if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
 2979                 BPF_UNLOCK();
 2980                 free(xbdbuf, M_BPF);
 2981                 return (ENOMEM);
 2982         }
 2983         index = 0;
 2984         CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) {
 2985                 /* Send writers-only first */
 2986                 CK_LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
 2987                         xbd = &xbdbuf[index++];
 2988                         bpfstats_fill_xbpf(xbd, bd);
 2989                 }
 2990                 CK_LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
 2991                         xbd = &xbdbuf[index++];
 2992                         bpfstats_fill_xbpf(xbd, bd);
 2993                 }
 2994         }
 2995         BPF_UNLOCK();
 2996         error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
 2997         free(xbdbuf, M_BPF);
 2998         return (error);
 2999 }
 3000 
 3001 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
 3002 
 3003 #else /* !DEV_BPF && !NETGRAPH_BPF */
 3004 
 3005 /*
 3006  * NOP stubs to allow bpf-using drivers to load and function.
 3007  *
 3008  * A 'better' implementation would allow the core bpf functionality
 3009  * to be loaded at runtime.
 3010  */
 3011 
 3012 void
 3013 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
 3014 {
 3015 }
 3016 
 3017 void
 3018 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
 3019 {
 3020 }
 3021 
 3022 void
 3023 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
 3024 {
 3025 }
 3026 
 3027 void
 3028 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
 3029 {
 3030 
 3031         bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
 3032 }
 3033 
 3034 void
 3035 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
 3036 {
 3037 
 3038         *driverp = (struct bpf_if *)&dead_bpf_if;
 3039 }
 3040 
 3041 void
 3042 bpfdetach(struct ifnet *ifp)
 3043 {
 3044 }
 3045 
 3046 u_int
 3047 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
 3048 {
 3049         return -1;      /* "no filter" behaviour */
 3050 }
 3051 
 3052 int
 3053 bpf_validate(const struct bpf_insn *f, int len)
 3054 {
 3055         return 0;               /* false */
 3056 }
 3057 
 3058 #endif /* !DEV_BPF && !NETGRAPH_BPF */
 3059 
 3060 #ifdef DDB
 3061 static void
 3062 bpf_show_bpf_if(struct bpf_if *bpf_if)
 3063 {
 3064 
 3065         if (bpf_if == NULL)
 3066                 return;
 3067         db_printf("%p:\n", bpf_if);
 3068 #define BPF_DB_PRINTF(f, e)     db_printf("   %s = " f "\n", #e, bpf_if->e);
 3069         /* bif_ext.bif_next */
 3070         /* bif_ext.bif_dlist */
 3071         BPF_DB_PRINTF("%#x", bif_dlt);
 3072         BPF_DB_PRINTF("%u", bif_hdrlen);
 3073         /* bif_wlist */
 3074         BPF_DB_PRINTF("%p", bif_ifp);
 3075         BPF_DB_PRINTF("%p", bif_bpf);
 3076         BPF_DB_PRINTF("%u", bif_refcnt);
 3077 }
 3078 
 3079 DB_SHOW_COMMAND(bpf_if, db_show_bpf_if)
 3080 {
 3081 
 3082         if (!have_addr) {
 3083                 db_printf("usage: show bpf_if <struct bpf_if *>\n");
 3084                 return;
 3085         }
 3086 
 3087         bpf_show_bpf_if((struct bpf_if *)addr);
 3088 }
 3089 #endif

Cache object: 97c2775bba1ec1991d06f6a1d4852475


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.