FreeBSD/Linux Kernel Cross Reference
sys/net/bpf.c
1 /*-
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/9.2/sys/net/bpf.c 251756 2013-06-14 18:56:37Z ghelmer $");
39
40 #include "opt_bpf.h"
41 #include "opt_compat.h"
42 #include "opt_netgraph.h"
43
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/lock.h>
47 #include <sys/rwlock.h>
48 #include <sys/systm.h>
49 #include <sys/conf.h>
50 #include <sys/fcntl.h>
51 #include <sys/jail.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/time.h>
55 #include <sys/priv.h>
56 #include <sys/proc.h>
57 #include <sys/signalvar.h>
58 #include <sys/filio.h>
59 #include <sys/sockio.h>
60 #include <sys/ttycom.h>
61 #include <sys/uio.h>
62
63 #include <sys/event.h>
64 #include <sys/file.h>
65 #include <sys/poll.h>
66 #include <sys/proc.h>
67
68 #include <sys/socket.h>
69
70 #include <net/if.h>
71 #define BPF_INTERNAL
72 #include <net/bpf.h>
73 #include <net/bpf_buffer.h>
74 #ifdef BPF_JITTER
75 #include <net/bpf_jitter.h>
76 #endif
77 #include <net/bpf_zerocopy.h>
78 #include <net/bpfdesc.h>
79 #include <net/vnet.h>
80
81 #include <netinet/in.h>
82 #include <netinet/if_ether.h>
83 #include <sys/kernel.h>
84 #include <sys/sysctl.h>
85
86 #include <net80211/ieee80211_freebsd.h>
87
88 #include <security/mac/mac_framework.h>
89
90 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
91
92 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
93
94 #define PRINET 26 /* interruptible */
95
96 #define SIZEOF_BPF_HDR(type) \
97 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
98
99 #ifdef COMPAT_FREEBSD32
100 #include <sys/mount.h>
101 #include <compat/freebsd32/freebsd32.h>
102 #define BPF_ALIGNMENT32 sizeof(int32_t)
103 #define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1))
104
105 #ifndef BURN_BRIDGES
106 /*
107 * 32-bit version of structure prepended to each packet. We use this header
108 * instead of the standard one for 32-bit streams. We mark the a stream as
109 * 32-bit the first time we see a 32-bit compat ioctl request.
110 */
111 struct bpf_hdr32 {
112 struct timeval32 bh_tstamp; /* time stamp */
113 uint32_t bh_caplen; /* length of captured portion */
114 uint32_t bh_datalen; /* original length of packet */
115 uint16_t bh_hdrlen; /* length of bpf header (this struct
116 plus alignment padding) */
117 };
118 #endif
119
120 struct bpf_program32 {
121 u_int bf_len;
122 uint32_t bf_insns;
123 };
124
125 struct bpf_dltlist32 {
126 u_int bfl_len;
127 u_int bfl_list;
128 };
129
130 #define BIOCSETF32 _IOW('B', 103, struct bpf_program32)
131 #define BIOCSRTIMEOUT32 _IOW('B', 109, struct timeval32)
132 #define BIOCGRTIMEOUT32 _IOR('B', 110, struct timeval32)
133 #define BIOCGDLTLIST32 _IOWR('B', 121, struct bpf_dltlist32)
134 #define BIOCSETWF32 _IOW('B', 123, struct bpf_program32)
135 #define BIOCSETFNR32 _IOW('B', 130, struct bpf_program32)
136 #endif
137
138 /*
139 * bpf_iflist is a list of BPF interface structures, each corresponding to a
140 * specific DLT. The same network interface might have several BPF interface
141 * structures registered by different layers in the stack (i.e., 802.11
142 * frames, ethernet frames, etc).
143 */
144 static LIST_HEAD(, bpf_if) bpf_iflist;
145 static struct mtx bpf_mtx; /* bpf global lock */
146 static int bpf_bpfd_cnt;
147
148 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
149 static void bpf_detachd(struct bpf_d *);
150 static void bpf_detachd_locked(struct bpf_d *);
151 static void bpf_freed(struct bpf_d *);
152 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
153 struct sockaddr *, int *, struct bpf_insn *);
154 static int bpf_setif(struct bpf_d *, struct ifreq *);
155 static void bpf_timed_out(void *);
156 static __inline void
157 bpf_wakeup(struct bpf_d *);
158 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
159 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
160 struct bintime *);
161 static void reset_d(struct bpf_d *);
162 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
163 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
164 static int bpf_setdlt(struct bpf_d *, u_int);
165 static void filt_bpfdetach(struct knote *);
166 static int filt_bpfread(struct knote *, long);
167 static void bpf_drvinit(void *);
168 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
169
170 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
171 int bpf_maxinsns = BPF_MAXINSNS;
172 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
173 &bpf_maxinsns, 0, "Maximum bpf program instructions");
174 static int bpf_zerocopy_enable = 0;
175 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
176 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
177 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
178 bpf_stats_sysctl, "bpf statistics portal");
179
180 static VNET_DEFINE(int, bpf_optimize_writers) = 0;
181 #define V_bpf_optimize_writers VNET(bpf_optimize_writers)
182 SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers,
183 CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0,
184 "Do not send packets until BPF program is set");
185
186 static d_open_t bpfopen;
187 static d_read_t bpfread;
188 static d_write_t bpfwrite;
189 static d_ioctl_t bpfioctl;
190 static d_poll_t bpfpoll;
191 static d_kqfilter_t bpfkqfilter;
192
193 static struct cdevsw bpf_cdevsw = {
194 .d_version = D_VERSION,
195 .d_open = bpfopen,
196 .d_read = bpfread,
197 .d_write = bpfwrite,
198 .d_ioctl = bpfioctl,
199 .d_poll = bpfpoll,
200 .d_name = "bpf",
201 .d_kqfilter = bpfkqfilter,
202 };
203
204 static struct filterops bpfread_filtops = {
205 .f_isfd = 1,
206 .f_detach = filt_bpfdetach,
207 .f_event = filt_bpfread,
208 };
209
210 eventhandler_tag bpf_ifdetach_cookie = NULL;
211
212 /*
213 * LOCKING MODEL USED BY BPF:
214 * Locks:
215 * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal,
216 * some global counters and every bpf_if reference.
217 * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters.
218 * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields
219 * used by bpf_mtap code.
220 *
221 * Lock order:
222 *
223 * Global lock, interface lock, descriptor lock
224 *
225 * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2]
226 * working model. In many places (like bpf_detachd) we start with BPF descriptor
227 * (and we need to at least rlock it to get reliable interface pointer). This
228 * gives us potential LOR. As a result, we use global lock to protect from bpf_if
229 * change in every such place.
230 *
231 * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and
232 * 3) descriptor main wlock.
233 * Reading bd_bif can be protected by any of these locks, typically global lock.
234 *
235 * Changing read/write BPF filter is protected by the same three locks,
236 * the same applies for reading.
237 *
238 * Sleeping in global lock is not allowed due to bpfdetach() using it.
239 */
240
241 /*
242 * Wrapper functions for various buffering methods. If the set of buffer
243 * modes expands, we will probably want to introduce a switch data structure
244 * similar to protosw, et.
245 */
246 static void
247 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
248 u_int len)
249 {
250
251 BPFD_LOCK_ASSERT(d);
252
253 switch (d->bd_bufmode) {
254 case BPF_BUFMODE_BUFFER:
255 return (bpf_buffer_append_bytes(d, buf, offset, src, len));
256
257 case BPF_BUFMODE_ZBUF:
258 d->bd_zcopy++;
259 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
260
261 default:
262 panic("bpf_buf_append_bytes");
263 }
264 }
265
266 static void
267 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
268 u_int len)
269 {
270
271 BPFD_LOCK_ASSERT(d);
272
273 switch (d->bd_bufmode) {
274 case BPF_BUFMODE_BUFFER:
275 return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
276
277 case BPF_BUFMODE_ZBUF:
278 d->bd_zcopy++;
279 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
280
281 default:
282 panic("bpf_buf_append_mbuf");
283 }
284 }
285
286 /*
287 * This function gets called when the free buffer is re-assigned.
288 */
289 static void
290 bpf_buf_reclaimed(struct bpf_d *d)
291 {
292
293 BPFD_LOCK_ASSERT(d);
294
295 switch (d->bd_bufmode) {
296 case BPF_BUFMODE_BUFFER:
297 return;
298
299 case BPF_BUFMODE_ZBUF:
300 bpf_zerocopy_buf_reclaimed(d);
301 return;
302
303 default:
304 panic("bpf_buf_reclaimed");
305 }
306 }
307
308 /*
309 * If the buffer mechanism has a way to decide that a held buffer can be made
310 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is
311 * returned if the buffer can be discarded, (0) is returned if it cannot.
312 */
313 static int
314 bpf_canfreebuf(struct bpf_d *d)
315 {
316
317 BPFD_LOCK_ASSERT(d);
318
319 switch (d->bd_bufmode) {
320 case BPF_BUFMODE_ZBUF:
321 return (bpf_zerocopy_canfreebuf(d));
322 }
323 return (0);
324 }
325
326 /*
327 * Allow the buffer model to indicate that the current store buffer is
328 * immutable, regardless of the appearance of space. Return (1) if the
329 * buffer is writable, and (0) if not.
330 */
331 static int
332 bpf_canwritebuf(struct bpf_d *d)
333 {
334 BPFD_LOCK_ASSERT(d);
335
336 switch (d->bd_bufmode) {
337 case BPF_BUFMODE_ZBUF:
338 return (bpf_zerocopy_canwritebuf(d));
339 }
340 return (1);
341 }
342
343 /*
344 * Notify buffer model that an attempt to write to the store buffer has
345 * resulted in a dropped packet, in which case the buffer may be considered
346 * full.
347 */
348 static void
349 bpf_buffull(struct bpf_d *d)
350 {
351
352 BPFD_LOCK_ASSERT(d);
353
354 switch (d->bd_bufmode) {
355 case BPF_BUFMODE_ZBUF:
356 bpf_zerocopy_buffull(d);
357 break;
358 }
359 }
360
361 /*
362 * Notify the buffer model that a buffer has moved into the hold position.
363 */
364 void
365 bpf_bufheld(struct bpf_d *d)
366 {
367
368 BPFD_LOCK_ASSERT(d);
369
370 switch (d->bd_bufmode) {
371 case BPF_BUFMODE_ZBUF:
372 bpf_zerocopy_bufheld(d);
373 break;
374 }
375 }
376
377 static void
378 bpf_free(struct bpf_d *d)
379 {
380
381 switch (d->bd_bufmode) {
382 case BPF_BUFMODE_BUFFER:
383 return (bpf_buffer_free(d));
384
385 case BPF_BUFMODE_ZBUF:
386 return (bpf_zerocopy_free(d));
387
388 default:
389 panic("bpf_buf_free");
390 }
391 }
392
393 static int
394 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
395 {
396
397 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
398 return (EOPNOTSUPP);
399 return (bpf_buffer_uiomove(d, buf, len, uio));
400 }
401
402 static int
403 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
404 {
405
406 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
407 return (EOPNOTSUPP);
408 return (bpf_buffer_ioctl_sblen(d, i));
409 }
410
411 static int
412 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
413 {
414
415 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
416 return (EOPNOTSUPP);
417 return (bpf_zerocopy_ioctl_getzmax(td, d, i));
418 }
419
420 static int
421 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
422 {
423
424 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
425 return (EOPNOTSUPP);
426 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
427 }
428
429 static int
430 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
431 {
432
433 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
434 return (EOPNOTSUPP);
435 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
436 }
437
438 /*
439 * General BPF functions.
440 */
441 static int
442 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
443 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
444 {
445 const struct ieee80211_bpf_params *p;
446 struct ether_header *eh;
447 struct mbuf *m;
448 int error;
449 int len;
450 int hlen;
451 int slen;
452
453 /*
454 * Build a sockaddr based on the data link layer type.
455 * We do this at this level because the ethernet header
456 * is copied directly into the data field of the sockaddr.
457 * In the case of SLIP, there is no header and the packet
458 * is forwarded as is.
459 * Also, we are careful to leave room at the front of the mbuf
460 * for the link level header.
461 */
462 switch (linktype) {
463
464 case DLT_SLIP:
465 sockp->sa_family = AF_INET;
466 hlen = 0;
467 break;
468
469 case DLT_EN10MB:
470 sockp->sa_family = AF_UNSPEC;
471 /* XXX Would MAXLINKHDR be better? */
472 hlen = ETHER_HDR_LEN;
473 break;
474
475 case DLT_FDDI:
476 sockp->sa_family = AF_IMPLINK;
477 hlen = 0;
478 break;
479
480 case DLT_RAW:
481 sockp->sa_family = AF_UNSPEC;
482 hlen = 0;
483 break;
484
485 case DLT_NULL:
486 /*
487 * null interface types require a 4 byte pseudo header which
488 * corresponds to the address family of the packet.
489 */
490 sockp->sa_family = AF_UNSPEC;
491 hlen = 4;
492 break;
493
494 case DLT_ATM_RFC1483:
495 /*
496 * en atm driver requires 4-byte atm pseudo header.
497 * though it isn't standard, vpi:vci needs to be
498 * specified anyway.
499 */
500 sockp->sa_family = AF_UNSPEC;
501 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
502 break;
503
504 case DLT_PPP:
505 sockp->sa_family = AF_UNSPEC;
506 hlen = 4; /* This should match PPP_HDRLEN */
507 break;
508
509 case DLT_IEEE802_11: /* IEEE 802.11 wireless */
510 sockp->sa_family = AF_IEEE80211;
511 hlen = 0;
512 break;
513
514 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */
515 sockp->sa_family = AF_IEEE80211;
516 sockp->sa_len = 12; /* XXX != 0 */
517 hlen = sizeof(struct ieee80211_bpf_params);
518 break;
519
520 default:
521 return (EIO);
522 }
523
524 len = uio->uio_resid;
525
526 if (len - hlen > ifp->if_mtu)
527 return (EMSGSIZE);
528
529 if ((unsigned)len > MJUM16BYTES)
530 return (EIO);
531
532 if (len <= MHLEN)
533 MGETHDR(m, M_WAIT, MT_DATA);
534 else if (len <= MCLBYTES)
535 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
536 else
537 m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR,
538 #if (MJUMPAGESIZE > MCLBYTES)
539 len <= MJUMPAGESIZE ? MJUMPAGESIZE :
540 #endif
541 (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES));
542 m->m_pkthdr.len = m->m_len = len;
543 m->m_pkthdr.rcvif = NULL;
544 *mp = m;
545
546 if (m->m_len < hlen) {
547 error = EPERM;
548 goto bad;
549 }
550
551 error = uiomove(mtod(m, u_char *), len, uio);
552 if (error)
553 goto bad;
554
555 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
556 if (slen == 0) {
557 error = EPERM;
558 goto bad;
559 }
560
561 /* Check for multicast destination */
562 switch (linktype) {
563 case DLT_EN10MB:
564 eh = mtod(m, struct ether_header *);
565 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
566 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
567 ETHER_ADDR_LEN) == 0)
568 m->m_flags |= M_BCAST;
569 else
570 m->m_flags |= M_MCAST;
571 }
572 break;
573 }
574
575 /*
576 * Make room for link header, and copy it to sockaddr
577 */
578 if (hlen != 0) {
579 if (sockp->sa_family == AF_IEEE80211) {
580 /*
581 * Collect true length from the parameter header
582 * NB: sockp is known to be zero'd so if we do a
583 * short copy unspecified parameters will be
584 * zero.
585 * NB: packet may not be aligned after stripping
586 * bpf params
587 * XXX check ibp_vers
588 */
589 p = mtod(m, const struct ieee80211_bpf_params *);
590 hlen = p->ibp_len;
591 if (hlen > sizeof(sockp->sa_data)) {
592 error = EINVAL;
593 goto bad;
594 }
595 }
596 bcopy(m->m_data, sockp->sa_data, hlen);
597 }
598 *hdrlen = hlen;
599
600 return (0);
601 bad:
602 m_freem(m);
603 return (error);
604 }
605
606 /*
607 * Attach file to the bpf interface, i.e. make d listen on bp.
608 */
609 static void
610 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
611 {
612 int op_w;
613
614 BPF_LOCK_ASSERT();
615
616 /*
617 * Save sysctl value to protect from sysctl change
618 * between reads
619 */
620 op_w = V_bpf_optimize_writers;
621
622 if (d->bd_bif != NULL)
623 bpf_detachd_locked(d);
624 /*
625 * Point d at bp, and add d to the interface's list.
626 * Since there are many applicaiotns using BPF for
627 * sending raw packets only (dhcpd, cdpd are good examples)
628 * we can delay adding d to the list of active listeners until
629 * some filter is configured.
630 */
631
632 BPFIF_WLOCK(bp);
633 BPFD_LOCK(d);
634
635 d->bd_bif = bp;
636
637 if (op_w != 0) {
638 /* Add to writers-only list */
639 LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
640 /*
641 * We decrement bd_writer on every filter set operation.
642 * First BIOCSETF is done by pcap_open_live() to set up
643 * snap length. After that appliation usually sets its own filter
644 */
645 d->bd_writer = 2;
646 } else
647 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
648
649 BPFD_UNLOCK(d);
650 BPFIF_WUNLOCK(bp);
651
652 bpf_bpfd_cnt++;
653
654 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
655 __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
656
657 if (op_w == 0)
658 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
659 }
660
661 /*
662 * Add d to the list of active bp filters.
663 * Reuqires bpf_attachd() to be called before
664 */
665 static void
666 bpf_upgraded(struct bpf_d *d)
667 {
668 struct bpf_if *bp;
669
670 BPF_LOCK_ASSERT();
671
672 bp = d->bd_bif;
673
674 /*
675 * Filter can be set several times without specifying interface.
676 * Mark d as reader and exit.
677 */
678 if (bp == NULL) {
679 BPFD_LOCK(d);
680 d->bd_writer = 0;
681 BPFD_UNLOCK(d);
682 return;
683 }
684
685 BPFIF_WLOCK(bp);
686 BPFD_LOCK(d);
687
688 /* Remove from writers-only list */
689 LIST_REMOVE(d, bd_next);
690 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
691 /* Mark d as reader */
692 d->bd_writer = 0;
693
694 BPFD_UNLOCK(d);
695 BPFIF_WUNLOCK(bp);
696
697 CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid);
698
699 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
700 }
701
702 /*
703 * Detach a file from its interface.
704 */
705 static void
706 bpf_detachd(struct bpf_d *d)
707 {
708 BPF_LOCK();
709 bpf_detachd_locked(d);
710 BPF_UNLOCK();
711 }
712
713 static void
714 bpf_detachd_locked(struct bpf_d *d)
715 {
716 int error;
717 struct bpf_if *bp;
718 struct ifnet *ifp;
719
720 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
721
722 BPF_LOCK_ASSERT();
723
724 /* Check if descriptor is attached */
725 if ((bp = d->bd_bif) == NULL)
726 return;
727
728 BPFIF_WLOCK(bp);
729 BPFD_LOCK(d);
730
731 /* Save bd_writer value */
732 error = d->bd_writer;
733
734 /*
735 * Remove d from the interface's descriptor list.
736 */
737 LIST_REMOVE(d, bd_next);
738
739 ifp = bp->bif_ifp;
740 d->bd_bif = NULL;
741 BPFD_UNLOCK(d);
742 BPFIF_WUNLOCK(bp);
743
744 bpf_bpfd_cnt--;
745
746 /* Call event handler iff d is attached */
747 if (error == 0)
748 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
749
750 /*
751 * Check if this descriptor had requested promiscuous mode.
752 * If so, turn it off.
753 */
754 if (d->bd_promisc) {
755 d->bd_promisc = 0;
756 CURVNET_SET(ifp->if_vnet);
757 error = ifpromisc(ifp, 0);
758 CURVNET_RESTORE();
759 if (error != 0 && error != ENXIO) {
760 /*
761 * ENXIO can happen if a pccard is unplugged
762 * Something is really wrong if we were able to put
763 * the driver into promiscuous mode, but can't
764 * take it out.
765 */
766 if_printf(bp->bif_ifp,
767 "bpf_detach: ifpromisc failed (%d)\n", error);
768 }
769 }
770 }
771
772 /*
773 * Close the descriptor by detaching it from its interface,
774 * deallocating its buffers, and marking it free.
775 */
776 static void
777 bpf_dtor(void *data)
778 {
779 struct bpf_d *d = data;
780
781 BPFD_LOCK(d);
782 if (d->bd_state == BPF_WAITING)
783 callout_stop(&d->bd_callout);
784 d->bd_state = BPF_IDLE;
785 BPFD_UNLOCK(d);
786 funsetown(&d->bd_sigio);
787 bpf_detachd(d);
788 #ifdef MAC
789 mac_bpfdesc_destroy(d);
790 #endif /* MAC */
791 seldrain(&d->bd_sel);
792 knlist_destroy(&d->bd_sel.si_note);
793 callout_drain(&d->bd_callout);
794 bpf_freed(d);
795 free(d, M_BPF);
796 }
797
798 /*
799 * Open ethernet device. Returns ENXIO for illegal minor device number,
800 * EBUSY if file is open by another process.
801 */
802 /* ARGSUSED */
803 static int
804 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
805 {
806 struct bpf_d *d;
807 int error, size;
808
809 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
810 error = devfs_set_cdevpriv(d, bpf_dtor);
811 if (error != 0) {
812 free(d, M_BPF);
813 return (error);
814 }
815
816 /*
817 * For historical reasons, perform a one-time initialization call to
818 * the buffer routines, even though we're not yet committed to a
819 * particular buffer method.
820 */
821 bpf_buffer_init(d);
822 d->bd_hbuf_in_use = 0;
823 d->bd_bufmode = BPF_BUFMODE_BUFFER;
824 d->bd_sig = SIGIO;
825 d->bd_direction = BPF_D_INOUT;
826 BPF_PID_REFRESH(d, td);
827 #ifdef MAC
828 mac_bpfdesc_init(d);
829 mac_bpfdesc_create(td->td_ucred, d);
830 #endif
831 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
832 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
833 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
834
835 /* Allocate default buffers */
836 size = d->bd_bufsize;
837 bpf_buffer_ioctl_sblen(d, &size);
838
839 return (0);
840 }
841
842 /*
843 * bpfread - read next chunk of packets from buffers
844 */
845 static int
846 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
847 {
848 struct bpf_d *d;
849 int error;
850 int non_block;
851 int timed_out;
852
853 error = devfs_get_cdevpriv((void **)&d);
854 if (error != 0)
855 return (error);
856
857 /*
858 * Restrict application to use a buffer the same size as
859 * as kernel buffers.
860 */
861 if (uio->uio_resid != d->bd_bufsize)
862 return (EINVAL);
863
864 non_block = ((ioflag & O_NONBLOCK) != 0);
865
866 BPFD_LOCK(d);
867 BPF_PID_REFRESH_CUR(d);
868 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
869 BPFD_UNLOCK(d);
870 return (EOPNOTSUPP);
871 }
872 if (d->bd_state == BPF_WAITING)
873 callout_stop(&d->bd_callout);
874 timed_out = (d->bd_state == BPF_TIMED_OUT);
875 d->bd_state = BPF_IDLE;
876 while (d->bd_hbuf_in_use) {
877 error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
878 PRINET|PCATCH, "bd_hbuf", 0);
879 if (error != 0) {
880 BPFD_UNLOCK(d);
881 return (error);
882 }
883 }
884 /*
885 * If the hold buffer is empty, then do a timed sleep, which
886 * ends when the timeout expires or when enough packets
887 * have arrived to fill the store buffer.
888 */
889 while (d->bd_hbuf == NULL) {
890 if (d->bd_slen != 0) {
891 /*
892 * A packet(s) either arrived since the previous
893 * read or arrived while we were asleep.
894 */
895 if (d->bd_immediate || non_block || timed_out) {
896 /*
897 * Rotate the buffers and return what's here
898 * if we are in immediate mode, non-blocking
899 * flag is set, or this descriptor timed out.
900 */
901 ROTATE_BUFFERS(d);
902 break;
903 }
904 }
905
906 /*
907 * No data is available, check to see if the bpf device
908 * is still pointed at a real interface. If not, return
909 * ENXIO so that the userland process knows to rebind
910 * it before using it again.
911 */
912 if (d->bd_bif == NULL) {
913 BPFD_UNLOCK(d);
914 return (ENXIO);
915 }
916
917 if (non_block) {
918 BPFD_UNLOCK(d);
919 return (EWOULDBLOCK);
920 }
921 error = msleep(d, &d->bd_lock, PRINET|PCATCH,
922 "bpf", d->bd_rtout);
923 if (error == EINTR || error == ERESTART) {
924 BPFD_UNLOCK(d);
925 return (error);
926 }
927 if (error == EWOULDBLOCK) {
928 /*
929 * On a timeout, return what's in the buffer,
930 * which may be nothing. If there is something
931 * in the store buffer, we can rotate the buffers.
932 */
933 if (d->bd_hbuf)
934 /*
935 * We filled up the buffer in between
936 * getting the timeout and arriving
937 * here, so we don't need to rotate.
938 */
939 break;
940
941 if (d->bd_slen == 0) {
942 BPFD_UNLOCK(d);
943 return (0);
944 }
945 ROTATE_BUFFERS(d);
946 break;
947 }
948 }
949 /*
950 * At this point, we know we have something in the hold slot.
951 */
952 d->bd_hbuf_in_use = 1;
953 BPFD_UNLOCK(d);
954
955 /*
956 * Move data from hold buffer into user space.
957 * We know the entire buffer is transferred since
958 * we checked above that the read buffer is bpf_bufsize bytes.
959 *
960 * We do not have to worry about simultaneous reads because
961 * we waited for sole access to the hold buffer above.
962 */
963 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
964
965 BPFD_LOCK(d);
966 KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
967 d->bd_fbuf = d->bd_hbuf;
968 d->bd_hbuf = NULL;
969 d->bd_hlen = 0;
970 bpf_buf_reclaimed(d);
971 d->bd_hbuf_in_use = 0;
972 wakeup(&d->bd_hbuf_in_use);
973 BPFD_UNLOCK(d);
974
975 return (error);
976 }
977
978 /*
979 * If there are processes sleeping on this descriptor, wake them up.
980 */
981 static __inline void
982 bpf_wakeup(struct bpf_d *d)
983 {
984
985 BPFD_LOCK_ASSERT(d);
986 if (d->bd_state == BPF_WAITING) {
987 callout_stop(&d->bd_callout);
988 d->bd_state = BPF_IDLE;
989 }
990 wakeup(d);
991 if (d->bd_async && d->bd_sig && d->bd_sigio)
992 pgsigio(&d->bd_sigio, d->bd_sig, 0);
993
994 selwakeuppri(&d->bd_sel, PRINET);
995 KNOTE_LOCKED(&d->bd_sel.si_note, 0);
996 }
997
998 static void
999 bpf_timed_out(void *arg)
1000 {
1001 struct bpf_d *d = (struct bpf_d *)arg;
1002
1003 BPFD_LOCK_ASSERT(d);
1004
1005 if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
1006 return;
1007 if (d->bd_state == BPF_WAITING) {
1008 d->bd_state = BPF_TIMED_OUT;
1009 if (d->bd_slen != 0)
1010 bpf_wakeup(d);
1011 }
1012 }
1013
1014 static int
1015 bpf_ready(struct bpf_d *d)
1016 {
1017
1018 BPFD_LOCK_ASSERT(d);
1019
1020 if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1021 return (1);
1022 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1023 d->bd_slen != 0)
1024 return (1);
1025 return (0);
1026 }
1027
1028 static int
1029 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
1030 {
1031 struct bpf_d *d;
1032 struct ifnet *ifp;
1033 struct mbuf *m, *mc;
1034 struct sockaddr dst;
1035 int error, hlen;
1036
1037 error = devfs_get_cdevpriv((void **)&d);
1038 if (error != 0)
1039 return (error);
1040
1041 BPF_PID_REFRESH_CUR(d);
1042 d->bd_wcount++;
1043 /* XXX: locking required */
1044 if (d->bd_bif == NULL) {
1045 d->bd_wdcount++;
1046 return (ENXIO);
1047 }
1048
1049 ifp = d->bd_bif->bif_ifp;
1050
1051 if ((ifp->if_flags & IFF_UP) == 0) {
1052 d->bd_wdcount++;
1053 return (ENETDOWN);
1054 }
1055
1056 if (uio->uio_resid == 0) {
1057 d->bd_wdcount++;
1058 return (0);
1059 }
1060
1061 bzero(&dst, sizeof(dst));
1062 m = NULL;
1063 hlen = 0;
1064 /* XXX: bpf_movein() can sleep */
1065 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
1066 &m, &dst, &hlen, d->bd_wfilter);
1067 if (error) {
1068 d->bd_wdcount++;
1069 return (error);
1070 }
1071 d->bd_wfcount++;
1072 if (d->bd_hdrcmplt)
1073 dst.sa_family = pseudo_AF_HDRCMPLT;
1074
1075 if (d->bd_feedback) {
1076 mc = m_dup(m, M_DONTWAIT);
1077 if (mc != NULL)
1078 mc->m_pkthdr.rcvif = ifp;
1079 /* Set M_PROMISC for outgoing packets to be discarded. */
1080 if (d->bd_direction == BPF_D_INOUT)
1081 m->m_flags |= M_PROMISC;
1082 } else
1083 mc = NULL;
1084
1085 m->m_pkthdr.len -= hlen;
1086 m->m_len -= hlen;
1087 m->m_data += hlen; /* XXX */
1088
1089 CURVNET_SET(ifp->if_vnet);
1090 #ifdef MAC
1091 BPFD_LOCK(d);
1092 mac_bpfdesc_create_mbuf(d, m);
1093 if (mc != NULL)
1094 mac_bpfdesc_create_mbuf(d, mc);
1095 BPFD_UNLOCK(d);
1096 #endif
1097
1098 error = (*ifp->if_output)(ifp, m, &dst, NULL);
1099 if (error)
1100 d->bd_wdcount++;
1101
1102 if (mc != NULL) {
1103 if (error == 0)
1104 (*ifp->if_input)(ifp, mc);
1105 else
1106 m_freem(mc);
1107 }
1108 CURVNET_RESTORE();
1109
1110 return (error);
1111 }
1112
1113 /*
1114 * Reset a descriptor by flushing its packet buffer and clearing the receive
1115 * and drop counts. This is doable for kernel-only buffers, but with
1116 * zero-copy buffers, we can't write to (or rotate) buffers that are
1117 * currently owned by userspace. It would be nice if we could encapsulate
1118 * this logic in the buffer code rather than here.
1119 */
1120 static void
1121 reset_d(struct bpf_d *d)
1122 {
1123
1124 BPFD_LOCK_ASSERT(d);
1125
1126 while (d->bd_hbuf_in_use)
1127 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
1128 "bd_hbuf", 0);
1129 if ((d->bd_hbuf != NULL) &&
1130 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1131 /* Free the hold buffer. */
1132 d->bd_fbuf = d->bd_hbuf;
1133 d->bd_hbuf = NULL;
1134 d->bd_hlen = 0;
1135 bpf_buf_reclaimed(d);
1136 }
1137 if (bpf_canwritebuf(d))
1138 d->bd_slen = 0;
1139 d->bd_rcount = 0;
1140 d->bd_dcount = 0;
1141 d->bd_fcount = 0;
1142 d->bd_wcount = 0;
1143 d->bd_wfcount = 0;
1144 d->bd_wdcount = 0;
1145 d->bd_zcopy = 0;
1146 }
1147
1148 /*
1149 * FIONREAD Check for read packet available.
1150 * SIOCGIFADDR Get interface address - convenient hook to driver.
1151 * BIOCGBLEN Get buffer len [for read()].
1152 * BIOCSETF Set read filter.
1153 * BIOCSETFNR Set read filter without resetting descriptor.
1154 * BIOCSETWF Set write filter.
1155 * BIOCFLUSH Flush read packet buffer.
1156 * BIOCPROMISC Put interface into promiscuous mode.
1157 * BIOCGDLT Get link layer type.
1158 * BIOCGETIF Get interface name.
1159 * BIOCSETIF Set interface.
1160 * BIOCSRTIMEOUT Set read timeout.
1161 * BIOCGRTIMEOUT Get read timeout.
1162 * BIOCGSTATS Get packet stats.
1163 * BIOCIMMEDIATE Set immediate mode.
1164 * BIOCVERSION Get filter language version.
1165 * BIOCGHDRCMPLT Get "header already complete" flag
1166 * BIOCSHDRCMPLT Set "header already complete" flag
1167 * BIOCGDIRECTION Get packet direction flag
1168 * BIOCSDIRECTION Set packet direction flag
1169 * BIOCGTSTAMP Get time stamp format and resolution.
1170 * BIOCSTSTAMP Set time stamp format and resolution.
1171 * BIOCLOCK Set "locked" flag
1172 * BIOCFEEDBACK Set packet feedback mode.
1173 * BIOCSETZBUF Set current zero-copy buffer locations.
1174 * BIOCGETZMAX Get maximum zero-copy buffer size.
1175 * BIOCROTZBUF Force rotation of zero-copy buffer
1176 * BIOCSETBUFMODE Set buffer mode.
1177 * BIOCGETBUFMODE Get current buffer mode.
1178 */
1179 /* ARGSUSED */
1180 static int
1181 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1182 struct thread *td)
1183 {
1184 struct bpf_d *d;
1185 int error;
1186
1187 error = devfs_get_cdevpriv((void **)&d);
1188 if (error != 0)
1189 return (error);
1190
1191 /*
1192 * Refresh PID associated with this descriptor.
1193 */
1194 BPFD_LOCK(d);
1195 BPF_PID_REFRESH(d, td);
1196 if (d->bd_state == BPF_WAITING)
1197 callout_stop(&d->bd_callout);
1198 d->bd_state = BPF_IDLE;
1199 BPFD_UNLOCK(d);
1200
1201 if (d->bd_locked == 1) {
1202 switch (cmd) {
1203 case BIOCGBLEN:
1204 case BIOCFLUSH:
1205 case BIOCGDLT:
1206 case BIOCGDLTLIST:
1207 #ifdef COMPAT_FREEBSD32
1208 case BIOCGDLTLIST32:
1209 #endif
1210 case BIOCGETIF:
1211 case BIOCGRTIMEOUT:
1212 #ifdef COMPAT_FREEBSD32
1213 case BIOCGRTIMEOUT32:
1214 #endif
1215 case BIOCGSTATS:
1216 case BIOCVERSION:
1217 case BIOCGRSIG:
1218 case BIOCGHDRCMPLT:
1219 case BIOCSTSTAMP:
1220 case BIOCFEEDBACK:
1221 case FIONREAD:
1222 case BIOCLOCK:
1223 case BIOCSRTIMEOUT:
1224 #ifdef COMPAT_FREEBSD32
1225 case BIOCSRTIMEOUT32:
1226 #endif
1227 case BIOCIMMEDIATE:
1228 case TIOCGPGRP:
1229 case BIOCROTZBUF:
1230 break;
1231 default:
1232 return (EPERM);
1233 }
1234 }
1235 #ifdef COMPAT_FREEBSD32
1236 /*
1237 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
1238 * that it will get 32-bit packet headers.
1239 */
1240 switch (cmd) {
1241 case BIOCSETF32:
1242 case BIOCSETFNR32:
1243 case BIOCSETWF32:
1244 case BIOCGDLTLIST32:
1245 case BIOCGRTIMEOUT32:
1246 case BIOCSRTIMEOUT32:
1247 BPFD_LOCK(d);
1248 d->bd_compat32 = 1;
1249 BPFD_UNLOCK(d);
1250 }
1251 #endif
1252
1253 CURVNET_SET(TD_TO_VNET(td));
1254 switch (cmd) {
1255
1256 default:
1257 error = EINVAL;
1258 break;
1259
1260 /*
1261 * Check for read packet available.
1262 */
1263 case FIONREAD:
1264 {
1265 int n;
1266
1267 BPFD_LOCK(d);
1268 n = d->bd_slen;
1269 while (d->bd_hbuf_in_use)
1270 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1271 PRINET, "bd_hbuf", 0);
1272 if (d->bd_hbuf)
1273 n += d->bd_hlen;
1274 BPFD_UNLOCK(d);
1275
1276 *(int *)addr = n;
1277 break;
1278 }
1279
1280 case SIOCGIFADDR:
1281 {
1282 struct ifnet *ifp;
1283
1284 if (d->bd_bif == NULL)
1285 error = EINVAL;
1286 else {
1287 ifp = d->bd_bif->bif_ifp;
1288 error = (*ifp->if_ioctl)(ifp, cmd, addr);
1289 }
1290 break;
1291 }
1292
1293 /*
1294 * Get buffer len [for read()].
1295 */
1296 case BIOCGBLEN:
1297 BPFD_LOCK(d);
1298 *(u_int *)addr = d->bd_bufsize;
1299 BPFD_UNLOCK(d);
1300 break;
1301
1302 /*
1303 * Set buffer length.
1304 */
1305 case BIOCSBLEN:
1306 error = bpf_ioctl_sblen(d, (u_int *)addr);
1307 break;
1308
1309 /*
1310 * Set link layer read filter.
1311 */
1312 case BIOCSETF:
1313 case BIOCSETFNR:
1314 case BIOCSETWF:
1315 #ifdef COMPAT_FREEBSD32
1316 case BIOCSETF32:
1317 case BIOCSETFNR32:
1318 case BIOCSETWF32:
1319 #endif
1320 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1321 break;
1322
1323 /*
1324 * Flush read packet buffer.
1325 */
1326 case BIOCFLUSH:
1327 BPFD_LOCK(d);
1328 reset_d(d);
1329 BPFD_UNLOCK(d);
1330 break;
1331
1332 /*
1333 * Put interface into promiscuous mode.
1334 */
1335 case BIOCPROMISC:
1336 if (d->bd_bif == NULL) {
1337 /*
1338 * No interface attached yet.
1339 */
1340 error = EINVAL;
1341 break;
1342 }
1343 if (d->bd_promisc == 0) {
1344 error = ifpromisc(d->bd_bif->bif_ifp, 1);
1345 if (error == 0)
1346 d->bd_promisc = 1;
1347 }
1348 break;
1349
1350 /*
1351 * Get current data link type.
1352 */
1353 case BIOCGDLT:
1354 BPF_LOCK();
1355 if (d->bd_bif == NULL)
1356 error = EINVAL;
1357 else
1358 *(u_int *)addr = d->bd_bif->bif_dlt;
1359 BPF_UNLOCK();
1360 break;
1361
1362 /*
1363 * Get a list of supported data link types.
1364 */
1365 #ifdef COMPAT_FREEBSD32
1366 case BIOCGDLTLIST32:
1367 {
1368 struct bpf_dltlist32 *list32;
1369 struct bpf_dltlist dltlist;
1370
1371 list32 = (struct bpf_dltlist32 *)addr;
1372 dltlist.bfl_len = list32->bfl_len;
1373 dltlist.bfl_list = PTRIN(list32->bfl_list);
1374 BPF_LOCK();
1375 if (d->bd_bif == NULL)
1376 error = EINVAL;
1377 else {
1378 error = bpf_getdltlist(d, &dltlist);
1379 if (error == 0)
1380 list32->bfl_len = dltlist.bfl_len;
1381 }
1382 BPF_UNLOCK();
1383 break;
1384 }
1385 #endif
1386
1387 case BIOCGDLTLIST:
1388 BPF_LOCK();
1389 if (d->bd_bif == NULL)
1390 error = EINVAL;
1391 else
1392 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1393 BPF_UNLOCK();
1394 break;
1395
1396 /*
1397 * Set data link type.
1398 */
1399 case BIOCSDLT:
1400 BPF_LOCK();
1401 if (d->bd_bif == NULL)
1402 error = EINVAL;
1403 else
1404 error = bpf_setdlt(d, *(u_int *)addr);
1405 BPF_UNLOCK();
1406 break;
1407
1408 /*
1409 * Get interface name.
1410 */
1411 case BIOCGETIF:
1412 BPF_LOCK();
1413 if (d->bd_bif == NULL)
1414 error = EINVAL;
1415 else {
1416 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1417 struct ifreq *const ifr = (struct ifreq *)addr;
1418
1419 strlcpy(ifr->ifr_name, ifp->if_xname,
1420 sizeof(ifr->ifr_name));
1421 }
1422 BPF_UNLOCK();
1423 break;
1424
1425 /*
1426 * Set interface.
1427 */
1428 case BIOCSETIF:
1429 BPF_LOCK();
1430 error = bpf_setif(d, (struct ifreq *)addr);
1431 BPF_UNLOCK();
1432 break;
1433
1434 /*
1435 * Set read timeout.
1436 */
1437 case BIOCSRTIMEOUT:
1438 #ifdef COMPAT_FREEBSD32
1439 case BIOCSRTIMEOUT32:
1440 #endif
1441 {
1442 struct timeval *tv = (struct timeval *)addr;
1443 #ifdef COMPAT_FREEBSD32
1444 struct timeval32 *tv32;
1445 struct timeval tv64;
1446
1447 if (cmd == BIOCSRTIMEOUT32) {
1448 tv32 = (struct timeval32 *)addr;
1449 tv = &tv64;
1450 tv->tv_sec = tv32->tv_sec;
1451 tv->tv_usec = tv32->tv_usec;
1452 } else
1453 #endif
1454 tv = (struct timeval *)addr;
1455
1456 /*
1457 * Subtract 1 tick from tvtohz() since this isn't
1458 * a one-shot timer.
1459 */
1460 if ((error = itimerfix(tv)) == 0)
1461 d->bd_rtout = tvtohz(tv) - 1;
1462 break;
1463 }
1464
1465 /*
1466 * Get read timeout.
1467 */
1468 case BIOCGRTIMEOUT:
1469 #ifdef COMPAT_FREEBSD32
1470 case BIOCGRTIMEOUT32:
1471 #endif
1472 {
1473 struct timeval *tv;
1474 #ifdef COMPAT_FREEBSD32
1475 struct timeval32 *tv32;
1476 struct timeval tv64;
1477
1478 if (cmd == BIOCGRTIMEOUT32)
1479 tv = &tv64;
1480 else
1481 #endif
1482 tv = (struct timeval *)addr;
1483
1484 tv->tv_sec = d->bd_rtout / hz;
1485 tv->tv_usec = (d->bd_rtout % hz) * tick;
1486 #ifdef COMPAT_FREEBSD32
1487 if (cmd == BIOCGRTIMEOUT32) {
1488 tv32 = (struct timeval32 *)addr;
1489 tv32->tv_sec = tv->tv_sec;
1490 tv32->tv_usec = tv->tv_usec;
1491 }
1492 #endif
1493
1494 break;
1495 }
1496
1497 /*
1498 * Get packet stats.
1499 */
1500 case BIOCGSTATS:
1501 {
1502 struct bpf_stat *bs = (struct bpf_stat *)addr;
1503
1504 /* XXXCSJP overflow */
1505 bs->bs_recv = d->bd_rcount;
1506 bs->bs_drop = d->bd_dcount;
1507 break;
1508 }
1509
1510 /*
1511 * Set immediate mode.
1512 */
1513 case BIOCIMMEDIATE:
1514 BPFD_LOCK(d);
1515 d->bd_immediate = *(u_int *)addr;
1516 BPFD_UNLOCK(d);
1517 break;
1518
1519 case BIOCVERSION:
1520 {
1521 struct bpf_version *bv = (struct bpf_version *)addr;
1522
1523 bv->bv_major = BPF_MAJOR_VERSION;
1524 bv->bv_minor = BPF_MINOR_VERSION;
1525 break;
1526 }
1527
1528 /*
1529 * Get "header already complete" flag
1530 */
1531 case BIOCGHDRCMPLT:
1532 BPFD_LOCK(d);
1533 *(u_int *)addr = d->bd_hdrcmplt;
1534 BPFD_UNLOCK(d);
1535 break;
1536
1537 /*
1538 * Set "header already complete" flag
1539 */
1540 case BIOCSHDRCMPLT:
1541 BPFD_LOCK(d);
1542 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1543 BPFD_UNLOCK(d);
1544 break;
1545
1546 /*
1547 * Get packet direction flag
1548 */
1549 case BIOCGDIRECTION:
1550 BPFD_LOCK(d);
1551 *(u_int *)addr = d->bd_direction;
1552 BPFD_UNLOCK(d);
1553 break;
1554
1555 /*
1556 * Set packet direction flag
1557 */
1558 case BIOCSDIRECTION:
1559 {
1560 u_int direction;
1561
1562 direction = *(u_int *)addr;
1563 switch (direction) {
1564 case BPF_D_IN:
1565 case BPF_D_INOUT:
1566 case BPF_D_OUT:
1567 BPFD_LOCK(d);
1568 d->bd_direction = direction;
1569 BPFD_UNLOCK(d);
1570 break;
1571 default:
1572 error = EINVAL;
1573 }
1574 }
1575 break;
1576
1577 /*
1578 * Get packet timestamp format and resolution.
1579 */
1580 case BIOCGTSTAMP:
1581 BPFD_LOCK(d);
1582 *(u_int *)addr = d->bd_tstamp;
1583 BPFD_UNLOCK(d);
1584 break;
1585
1586 /*
1587 * Set packet timestamp format and resolution.
1588 */
1589 case BIOCSTSTAMP:
1590 {
1591 u_int func;
1592
1593 func = *(u_int *)addr;
1594 if (BPF_T_VALID(func))
1595 d->bd_tstamp = func;
1596 else
1597 error = EINVAL;
1598 }
1599 break;
1600
1601 case BIOCFEEDBACK:
1602 BPFD_LOCK(d);
1603 d->bd_feedback = *(u_int *)addr;
1604 BPFD_UNLOCK(d);
1605 break;
1606
1607 case BIOCLOCK:
1608 BPFD_LOCK(d);
1609 d->bd_locked = 1;
1610 BPFD_UNLOCK(d);
1611 break;
1612
1613 case FIONBIO: /* Non-blocking I/O */
1614 break;
1615
1616 case FIOASYNC: /* Send signal on receive packets */
1617 BPFD_LOCK(d);
1618 d->bd_async = *(int *)addr;
1619 BPFD_UNLOCK(d);
1620 break;
1621
1622 case FIOSETOWN:
1623 /*
1624 * XXX: Add some sort of locking here?
1625 * fsetown() can sleep.
1626 */
1627 error = fsetown(*(int *)addr, &d->bd_sigio);
1628 break;
1629
1630 case FIOGETOWN:
1631 BPFD_LOCK(d);
1632 *(int *)addr = fgetown(&d->bd_sigio);
1633 BPFD_UNLOCK(d);
1634 break;
1635
1636 /* This is deprecated, FIOSETOWN should be used instead. */
1637 case TIOCSPGRP:
1638 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1639 break;
1640
1641 /* This is deprecated, FIOGETOWN should be used instead. */
1642 case TIOCGPGRP:
1643 *(int *)addr = -fgetown(&d->bd_sigio);
1644 break;
1645
1646 case BIOCSRSIG: /* Set receive signal */
1647 {
1648 u_int sig;
1649
1650 sig = *(u_int *)addr;
1651
1652 if (sig >= NSIG)
1653 error = EINVAL;
1654 else {
1655 BPFD_LOCK(d);
1656 d->bd_sig = sig;
1657 BPFD_UNLOCK(d);
1658 }
1659 break;
1660 }
1661 case BIOCGRSIG:
1662 BPFD_LOCK(d);
1663 *(u_int *)addr = d->bd_sig;
1664 BPFD_UNLOCK(d);
1665 break;
1666
1667 case BIOCGETBUFMODE:
1668 BPFD_LOCK(d);
1669 *(u_int *)addr = d->bd_bufmode;
1670 BPFD_UNLOCK(d);
1671 break;
1672
1673 case BIOCSETBUFMODE:
1674 /*
1675 * Allow the buffering mode to be changed as long as we
1676 * haven't yet committed to a particular mode. Our
1677 * definition of commitment, for now, is whether or not a
1678 * buffer has been allocated or an interface attached, since
1679 * that's the point where things get tricky.
1680 */
1681 switch (*(u_int *)addr) {
1682 case BPF_BUFMODE_BUFFER:
1683 break;
1684
1685 case BPF_BUFMODE_ZBUF:
1686 if (bpf_zerocopy_enable)
1687 break;
1688 /* FALLSTHROUGH */
1689
1690 default:
1691 CURVNET_RESTORE();
1692 return (EINVAL);
1693 }
1694
1695 BPFD_LOCK(d);
1696 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1697 d->bd_fbuf != NULL || d->bd_bif != NULL) {
1698 BPFD_UNLOCK(d);
1699 CURVNET_RESTORE();
1700 return (EBUSY);
1701 }
1702 d->bd_bufmode = *(u_int *)addr;
1703 BPFD_UNLOCK(d);
1704 break;
1705
1706 case BIOCGETZMAX:
1707 error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1708 break;
1709
1710 case BIOCSETZBUF:
1711 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1712 break;
1713
1714 case BIOCROTZBUF:
1715 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1716 break;
1717 }
1718 CURVNET_RESTORE();
1719 return (error);
1720 }
1721
1722 /*
1723 * Set d's packet filter program to fp. If this file already has a filter,
1724 * free it and replace it. Returns EINVAL for bogus requests.
1725 *
1726 * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls
1727 * since reading d->bd_bif can't be protected by d or interface lock due to
1728 * lock order.
1729 *
1730 * Additionally, we have to acquire interface write lock due to bpf_mtap() uses
1731 * interface read lock to read all filers.
1732 *
1733 */
1734 static int
1735 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1736 {
1737 #ifdef COMPAT_FREEBSD32
1738 struct bpf_program fp_swab;
1739 struct bpf_program32 *fp32;
1740 #endif
1741 struct bpf_insn *fcode, *old;
1742 #ifdef BPF_JITTER
1743 bpf_jit_filter *jfunc, *ofunc;
1744 #endif
1745 size_t size;
1746 u_int flen;
1747 int need_upgrade;
1748
1749 #ifdef COMPAT_FREEBSD32
1750 switch (cmd) {
1751 case BIOCSETF32:
1752 case BIOCSETWF32:
1753 case BIOCSETFNR32:
1754 fp32 = (struct bpf_program32 *)fp;
1755 fp_swab.bf_len = fp32->bf_len;
1756 fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
1757 fp = &fp_swab;
1758 switch (cmd) {
1759 case BIOCSETF32:
1760 cmd = BIOCSETF;
1761 break;
1762 case BIOCSETWF32:
1763 cmd = BIOCSETWF;
1764 break;
1765 }
1766 break;
1767 }
1768 #endif
1769
1770 fcode = NULL;
1771 #ifdef BPF_JITTER
1772 jfunc = ofunc = NULL;
1773 #endif
1774 need_upgrade = 0;
1775
1776 /*
1777 * Check new filter validness before acquiring any locks.
1778 * Allocate memory for new filter, if needed.
1779 */
1780 flen = fp->bf_len;
1781 if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
1782 return (EINVAL);
1783 size = flen * sizeof(*fp->bf_insns);
1784 if (size > 0) {
1785 /* We're setting up new filter. Copy and check actual data. */
1786 fcode = malloc(size, M_BPF, M_WAITOK);
1787 if (copyin(fp->bf_insns, fcode, size) != 0 ||
1788 !bpf_validate(fcode, flen)) {
1789 free(fcode, M_BPF);
1790 return (EINVAL);
1791 }
1792 #ifdef BPF_JITTER
1793 /* Filter is copied inside fcode and is perfectly valid. */
1794 jfunc = bpf_jitter(fcode, flen);
1795 #endif
1796 }
1797
1798 BPF_LOCK();
1799
1800 /*
1801 * Set up new filter.
1802 * Protect filter change by interface lock.
1803 * Additionally, we are protected by global lock here.
1804 */
1805 if (d->bd_bif != NULL)
1806 BPFIF_WLOCK(d->bd_bif);
1807 BPFD_LOCK(d);
1808 if (cmd == BIOCSETWF) {
1809 old = d->bd_wfilter;
1810 d->bd_wfilter = fcode;
1811 } else {
1812 old = d->bd_rfilter;
1813 d->bd_rfilter = fcode;
1814 #ifdef BPF_JITTER
1815 ofunc = d->bd_bfilter;
1816 d->bd_bfilter = jfunc;
1817 #endif
1818 if (cmd == BIOCSETF)
1819 reset_d(d);
1820
1821 if (fcode != NULL) {
1822 /*
1823 * Do not require upgrade by first BIOCSETF
1824 * (used to set snaplen) by pcap_open_live().
1825 */
1826 if (d->bd_writer != 0 && --d->bd_writer == 0)
1827 need_upgrade = 1;
1828 CTR4(KTR_NET, "%s: filter function set by pid %d, "
1829 "bd_writer counter %d, need_upgrade %d",
1830 __func__, d->bd_pid, d->bd_writer, need_upgrade);
1831 }
1832 }
1833 BPFD_UNLOCK(d);
1834 if (d->bd_bif != NULL)
1835 BPFIF_WUNLOCK(d->bd_bif);
1836 if (old != NULL)
1837 free(old, M_BPF);
1838 #ifdef BPF_JITTER
1839 if (ofunc != NULL)
1840 bpf_destroy_jit_filter(ofunc);
1841 #endif
1842
1843 /* Move d to active readers list. */
1844 if (need_upgrade)
1845 bpf_upgraded(d);
1846
1847 BPF_UNLOCK();
1848 return (0);
1849 }
1850
1851 /*
1852 * Detach a file from its current interface (if attached at all) and attach
1853 * to the interface indicated by the name stored in ifr.
1854 * Return an errno or 0.
1855 */
1856 static int
1857 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1858 {
1859 struct bpf_if *bp;
1860 struct ifnet *theywant;
1861
1862 BPF_LOCK_ASSERT();
1863
1864 theywant = ifunit(ifr->ifr_name);
1865 if (theywant == NULL || theywant->if_bpf == NULL)
1866 return (ENXIO);
1867
1868 bp = theywant->if_bpf;
1869
1870 /* Check if interface is not being detached from BPF */
1871 BPFIF_RLOCK(bp);
1872 if (bp->flags & BPFIF_FLAG_DYING) {
1873 BPFIF_RUNLOCK(bp);
1874 return (ENXIO);
1875 }
1876 BPFIF_RUNLOCK(bp);
1877
1878 /*
1879 * Behavior here depends on the buffering model. If we're using
1880 * kernel memory buffers, then we can allocate them here. If we're
1881 * using zero-copy, then the user process must have registered
1882 * buffers by the time we get here. If not, return an error.
1883 */
1884 switch (d->bd_bufmode) {
1885 case BPF_BUFMODE_BUFFER:
1886 case BPF_BUFMODE_ZBUF:
1887 if (d->bd_sbuf == NULL)
1888 return (EINVAL);
1889 break;
1890
1891 default:
1892 panic("bpf_setif: bufmode %d", d->bd_bufmode);
1893 }
1894 if (bp != d->bd_bif)
1895 bpf_attachd(d, bp);
1896 BPFD_LOCK(d);
1897 reset_d(d);
1898 BPFD_UNLOCK(d);
1899 return (0);
1900 }
1901
1902 /*
1903 * Support for select() and poll() system calls
1904 *
1905 * Return true iff the specific operation will not block indefinitely.
1906 * Otherwise, return false but make a note that a selwakeup() must be done.
1907 */
1908 static int
1909 bpfpoll(struct cdev *dev, int events, struct thread *td)
1910 {
1911 struct bpf_d *d;
1912 int revents;
1913
1914 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
1915 return (events &
1916 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
1917
1918 /*
1919 * Refresh PID associated with this descriptor.
1920 */
1921 revents = events & (POLLOUT | POLLWRNORM);
1922 BPFD_LOCK(d);
1923 BPF_PID_REFRESH(d, td);
1924 if (events & (POLLIN | POLLRDNORM)) {
1925 if (bpf_ready(d))
1926 revents |= events & (POLLIN | POLLRDNORM);
1927 else {
1928 selrecord(td, &d->bd_sel);
1929 /* Start the read timeout if necessary. */
1930 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1931 callout_reset(&d->bd_callout, d->bd_rtout,
1932 bpf_timed_out, d);
1933 d->bd_state = BPF_WAITING;
1934 }
1935 }
1936 }
1937 BPFD_UNLOCK(d);
1938 return (revents);
1939 }
1940
1941 /*
1942 * Support for kevent() system call. Register EVFILT_READ filters and
1943 * reject all others.
1944 */
1945 int
1946 bpfkqfilter(struct cdev *dev, struct knote *kn)
1947 {
1948 struct bpf_d *d;
1949
1950 if (devfs_get_cdevpriv((void **)&d) != 0 ||
1951 kn->kn_filter != EVFILT_READ)
1952 return (1);
1953
1954 /*
1955 * Refresh PID associated with this descriptor.
1956 */
1957 BPFD_LOCK(d);
1958 BPF_PID_REFRESH_CUR(d);
1959 kn->kn_fop = &bpfread_filtops;
1960 kn->kn_hook = d;
1961 knlist_add(&d->bd_sel.si_note, kn, 1);
1962 BPFD_UNLOCK(d);
1963
1964 return (0);
1965 }
1966
1967 static void
1968 filt_bpfdetach(struct knote *kn)
1969 {
1970 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1971
1972 knlist_remove(&d->bd_sel.si_note, kn, 0);
1973 }
1974
1975 static int
1976 filt_bpfread(struct knote *kn, long hint)
1977 {
1978 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1979 int ready;
1980
1981 BPFD_LOCK_ASSERT(d);
1982 ready = bpf_ready(d);
1983 if (ready) {
1984 kn->kn_data = d->bd_slen;
1985 while (d->bd_hbuf_in_use)
1986 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1987 PRINET, "bd_hbuf", 0);
1988 if (d->bd_hbuf)
1989 kn->kn_data += d->bd_hlen;
1990 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1991 callout_reset(&d->bd_callout, d->bd_rtout,
1992 bpf_timed_out, d);
1993 d->bd_state = BPF_WAITING;
1994 }
1995
1996 return (ready);
1997 }
1998
1999 #define BPF_TSTAMP_NONE 0
2000 #define BPF_TSTAMP_FAST 1
2001 #define BPF_TSTAMP_NORMAL 2
2002 #define BPF_TSTAMP_EXTERN 3
2003
2004 static int
2005 bpf_ts_quality(int tstype)
2006 {
2007
2008 if (tstype == BPF_T_NONE)
2009 return (BPF_TSTAMP_NONE);
2010 if ((tstype & BPF_T_FAST) != 0)
2011 return (BPF_TSTAMP_FAST);
2012
2013 return (BPF_TSTAMP_NORMAL);
2014 }
2015
2016 static int
2017 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m)
2018 {
2019 struct m_tag *tag;
2020 int quality;
2021
2022 quality = bpf_ts_quality(tstype);
2023 if (quality == BPF_TSTAMP_NONE)
2024 return (quality);
2025
2026 if (m != NULL) {
2027 tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL);
2028 if (tag != NULL) {
2029 *bt = *(struct bintime *)(tag + 1);
2030 return (BPF_TSTAMP_EXTERN);
2031 }
2032 }
2033 if (quality == BPF_TSTAMP_NORMAL)
2034 binuptime(bt);
2035 else
2036 getbinuptime(bt);
2037
2038 return (quality);
2039 }
2040
2041 /*
2042 * Incoming linkage from device drivers. Process the packet pkt, of length
2043 * pktlen, which is stored in a contiguous buffer. The packet is parsed
2044 * by each process' filter, and if accepted, stashed into the corresponding
2045 * buffer.
2046 */
2047 void
2048 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2049 {
2050 struct bintime bt;
2051 struct bpf_d *d;
2052 #ifdef BPF_JITTER
2053 bpf_jit_filter *bf;
2054 #endif
2055 u_int slen;
2056 int gottime;
2057
2058 gottime = BPF_TSTAMP_NONE;
2059
2060 BPFIF_RLOCK(bp);
2061
2062 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2063 /*
2064 * We are not using any locks for d here because:
2065 * 1) any filter change is protected by interface
2066 * write lock
2067 * 2) destroying/detaching d is protected by interface
2068 * write lock, too
2069 */
2070
2071 /* XXX: Do not protect counter for the sake of performance. */
2072 ++d->bd_rcount;
2073 /*
2074 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
2075 * way for the caller to indiciate to us whether this packet
2076 * is inbound or outbound. In the bpf_mtap() routines, we use
2077 * the interface pointers on the mbuf to figure it out.
2078 */
2079 #ifdef BPF_JITTER
2080 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2081 if (bf != NULL)
2082 slen = (*(bf->func))(pkt, pktlen, pktlen);
2083 else
2084 #endif
2085 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
2086 if (slen != 0) {
2087 /*
2088 * Filter matches. Let's to acquire write lock.
2089 */
2090 BPFD_LOCK(d);
2091
2092 d->bd_fcount++;
2093 if (gottime < bpf_ts_quality(d->bd_tstamp))
2094 gottime = bpf_gettime(&bt, d->bd_tstamp, NULL);
2095 #ifdef MAC
2096 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2097 #endif
2098 catchpacket(d, pkt, pktlen, slen,
2099 bpf_append_bytes, &bt);
2100 BPFD_UNLOCK(d);
2101 }
2102 }
2103 BPFIF_RUNLOCK(bp);
2104 }
2105
2106 #define BPF_CHECK_DIRECTION(d, r, i) \
2107 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
2108 ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2109
2110 /*
2111 * Incoming linkage from device drivers, when packet is in an mbuf chain.
2112 * Locking model is explained in bpf_tap().
2113 */
2114 void
2115 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2116 {
2117 struct bintime bt;
2118 struct bpf_d *d;
2119 #ifdef BPF_JITTER
2120 bpf_jit_filter *bf;
2121 #endif
2122 u_int pktlen, slen;
2123 int gottime;
2124
2125 /* Skip outgoing duplicate packets. */
2126 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2127 m->m_flags &= ~M_PROMISC;
2128 return;
2129 }
2130
2131 pktlen = m_length(m, NULL);
2132 gottime = BPF_TSTAMP_NONE;
2133
2134 BPFIF_RLOCK(bp);
2135
2136 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2137 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2138 continue;
2139 ++d->bd_rcount;
2140 #ifdef BPF_JITTER
2141 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2142 /* XXX We cannot handle multiple mbufs. */
2143 if (bf != NULL && m->m_next == NULL)
2144 slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
2145 else
2146 #endif
2147 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2148 if (slen != 0) {
2149 BPFD_LOCK(d);
2150
2151 d->bd_fcount++;
2152 if (gottime < bpf_ts_quality(d->bd_tstamp))
2153 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2154 #ifdef MAC
2155 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2156 #endif
2157 catchpacket(d, (u_char *)m, pktlen, slen,
2158 bpf_append_mbuf, &bt);
2159 BPFD_UNLOCK(d);
2160 }
2161 }
2162 BPFIF_RUNLOCK(bp);
2163 }
2164
2165 /*
2166 * Incoming linkage from device drivers, when packet is in
2167 * an mbuf chain and to be prepended by a contiguous header.
2168 */
2169 void
2170 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
2171 {
2172 struct bintime bt;
2173 struct mbuf mb;
2174 struct bpf_d *d;
2175 u_int pktlen, slen;
2176 int gottime;
2177
2178 /* Skip outgoing duplicate packets. */
2179 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2180 m->m_flags &= ~M_PROMISC;
2181 return;
2182 }
2183
2184 pktlen = m_length(m, NULL);
2185 /*
2186 * Craft on-stack mbuf suitable for passing to bpf_filter.
2187 * Note that we cut corners here; we only setup what's
2188 * absolutely needed--this mbuf should never go anywhere else.
2189 */
2190 mb.m_next = m;
2191 mb.m_data = data;
2192 mb.m_len = dlen;
2193 pktlen += dlen;
2194
2195 gottime = BPF_TSTAMP_NONE;
2196
2197 BPFIF_RLOCK(bp);
2198
2199 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2200 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2201 continue;
2202 ++d->bd_rcount;
2203 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2204 if (slen != 0) {
2205 BPFD_LOCK(d);
2206
2207 d->bd_fcount++;
2208 if (gottime < bpf_ts_quality(d->bd_tstamp))
2209 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2210 #ifdef MAC
2211 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2212 #endif
2213 catchpacket(d, (u_char *)&mb, pktlen, slen,
2214 bpf_append_mbuf, &bt);
2215 BPFD_UNLOCK(d);
2216 }
2217 }
2218 BPFIF_RUNLOCK(bp);
2219 }
2220
2221 #undef BPF_CHECK_DIRECTION
2222
2223 #undef BPF_TSTAMP_NONE
2224 #undef BPF_TSTAMP_FAST
2225 #undef BPF_TSTAMP_NORMAL
2226 #undef BPF_TSTAMP_EXTERN
2227
2228 static int
2229 bpf_hdrlen(struct bpf_d *d)
2230 {
2231 int hdrlen;
2232
2233 hdrlen = d->bd_bif->bif_hdrlen;
2234 #ifndef BURN_BRIDGES
2235 if (d->bd_tstamp == BPF_T_NONE ||
2236 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
2237 #ifdef COMPAT_FREEBSD32
2238 if (d->bd_compat32)
2239 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32);
2240 else
2241 #endif
2242 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr);
2243 else
2244 #endif
2245 hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr);
2246 #ifdef COMPAT_FREEBSD32
2247 if (d->bd_compat32)
2248 hdrlen = BPF_WORDALIGN32(hdrlen);
2249 else
2250 #endif
2251 hdrlen = BPF_WORDALIGN(hdrlen);
2252
2253 return (hdrlen - d->bd_bif->bif_hdrlen);
2254 }
2255
2256 static void
2257 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
2258 {
2259 struct bintime bt2;
2260 struct timeval tsm;
2261 struct timespec tsn;
2262
2263 if ((tstype & BPF_T_MONOTONIC) == 0) {
2264 bt2 = *bt;
2265 bintime_add(&bt2, &boottimebin);
2266 bt = &bt2;
2267 }
2268 switch (BPF_T_FORMAT(tstype)) {
2269 case BPF_T_MICROTIME:
2270 bintime2timeval(bt, &tsm);
2271 ts->bt_sec = tsm.tv_sec;
2272 ts->bt_frac = tsm.tv_usec;
2273 break;
2274 case BPF_T_NANOTIME:
2275 bintime2timespec(bt, &tsn);
2276 ts->bt_sec = tsn.tv_sec;
2277 ts->bt_frac = tsn.tv_nsec;
2278 break;
2279 case BPF_T_BINTIME:
2280 ts->bt_sec = bt->sec;
2281 ts->bt_frac = bt->frac;
2282 break;
2283 }
2284 }
2285
2286 /*
2287 * Move the packet data from interface memory (pkt) into the
2288 * store buffer. "cpfn" is the routine called to do the actual data
2289 * transfer. bcopy is passed in to copy contiguous chunks, while
2290 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case,
2291 * pkt is really an mbuf.
2292 */
2293 static void
2294 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2295 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
2296 struct bintime *bt)
2297 {
2298 struct bpf_xhdr hdr;
2299 #ifndef BURN_BRIDGES
2300 struct bpf_hdr hdr_old;
2301 #ifdef COMPAT_FREEBSD32
2302 struct bpf_hdr32 hdr32_old;
2303 #endif
2304 #endif
2305 int caplen, curlen, hdrlen, totlen;
2306 int do_wakeup = 0;
2307 int do_timestamp;
2308 int tstype;
2309
2310 BPFD_LOCK_ASSERT(d);
2311
2312 /*
2313 * Detect whether user space has released a buffer back to us, and if
2314 * so, move it from being a hold buffer to a free buffer. This may
2315 * not be the best place to do it (for example, we might only want to
2316 * run this check if we need the space), but for now it's a reliable
2317 * spot to do it.
2318 */
2319 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2320 while (d->bd_hbuf_in_use)
2321 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2322 PRINET, "bd_hbuf", 0);
2323 d->bd_fbuf = d->bd_hbuf;
2324 d->bd_hbuf = NULL;
2325 d->bd_hlen = 0;
2326 bpf_buf_reclaimed(d);
2327 }
2328
2329 /*
2330 * Figure out how many bytes to move. If the packet is
2331 * greater or equal to the snapshot length, transfer that
2332 * much. Otherwise, transfer the whole packet (unless
2333 * we hit the buffer size limit).
2334 */
2335 hdrlen = bpf_hdrlen(d);
2336 totlen = hdrlen + min(snaplen, pktlen);
2337 if (totlen > d->bd_bufsize)
2338 totlen = d->bd_bufsize;
2339
2340 /*
2341 * Round up the end of the previous packet to the next longword.
2342 *
2343 * Drop the packet if there's no room and no hope of room
2344 * If the packet would overflow the storage buffer or the storage
2345 * buffer is considered immutable by the buffer model, try to rotate
2346 * the buffer and wakeup pending processes.
2347 */
2348 #ifdef COMPAT_FREEBSD32
2349 if (d->bd_compat32)
2350 curlen = BPF_WORDALIGN32(d->bd_slen);
2351 else
2352 #endif
2353 curlen = BPF_WORDALIGN(d->bd_slen);
2354 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2355 if (d->bd_fbuf == NULL) {
2356 /*
2357 * There's no room in the store buffer, and no
2358 * prospect of room, so drop the packet. Notify the
2359 * buffer model.
2360 */
2361 bpf_buffull(d);
2362 ++d->bd_dcount;
2363 return;
2364 }
2365 while (d->bd_hbuf_in_use)
2366 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2367 PRINET, "bd_hbuf", 0);
2368 ROTATE_BUFFERS(d);
2369 do_wakeup = 1;
2370 curlen = 0;
2371 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
2372 /*
2373 * Immediate mode is set, or the read timeout has already
2374 * expired during a select call. A packet arrived, so the
2375 * reader should be woken up.
2376 */
2377 do_wakeup = 1;
2378 caplen = totlen - hdrlen;
2379 tstype = d->bd_tstamp;
2380 do_timestamp = tstype != BPF_T_NONE;
2381 #ifndef BURN_BRIDGES
2382 if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) {
2383 struct bpf_ts ts;
2384 if (do_timestamp)
2385 bpf_bintime2ts(bt, &ts, tstype);
2386 #ifdef COMPAT_FREEBSD32
2387 if (d->bd_compat32) {
2388 bzero(&hdr32_old, sizeof(hdr32_old));
2389 if (do_timestamp) {
2390 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec;
2391 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac;
2392 }
2393 hdr32_old.bh_datalen = pktlen;
2394 hdr32_old.bh_hdrlen = hdrlen;
2395 hdr32_old.bh_caplen = caplen;
2396 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
2397 sizeof(hdr32_old));
2398 goto copy;
2399 }
2400 #endif
2401 bzero(&hdr_old, sizeof(hdr_old));
2402 if (do_timestamp) {
2403 hdr_old.bh_tstamp.tv_sec = ts.bt_sec;
2404 hdr_old.bh_tstamp.tv_usec = ts.bt_frac;
2405 }
2406 hdr_old.bh_datalen = pktlen;
2407 hdr_old.bh_hdrlen = hdrlen;
2408 hdr_old.bh_caplen = caplen;
2409 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
2410 sizeof(hdr_old));
2411 goto copy;
2412 }
2413 #endif
2414
2415 /*
2416 * Append the bpf header. Note we append the actual header size, but
2417 * move forward the length of the header plus padding.
2418 */
2419 bzero(&hdr, sizeof(hdr));
2420 if (do_timestamp)
2421 bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype);
2422 hdr.bh_datalen = pktlen;
2423 hdr.bh_hdrlen = hdrlen;
2424 hdr.bh_caplen = caplen;
2425 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2426
2427 /*
2428 * Copy the packet data into the store buffer and update its length.
2429 */
2430 #ifndef BURN_BRIDGES
2431 copy:
2432 #endif
2433 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
2434 d->bd_slen = curlen + totlen;
2435
2436 if (do_wakeup)
2437 bpf_wakeup(d);
2438 }
2439
2440 /*
2441 * Free buffers currently in use by a descriptor.
2442 * Called on close.
2443 */
2444 static void
2445 bpf_freed(struct bpf_d *d)
2446 {
2447
2448 /*
2449 * We don't need to lock out interrupts since this descriptor has
2450 * been detached from its interface and it yet hasn't been marked
2451 * free.
2452 */
2453 bpf_free(d);
2454 if (d->bd_rfilter != NULL) {
2455 free((caddr_t)d->bd_rfilter, M_BPF);
2456 #ifdef BPF_JITTER
2457 if (d->bd_bfilter != NULL)
2458 bpf_destroy_jit_filter(d->bd_bfilter);
2459 #endif
2460 }
2461 if (d->bd_wfilter != NULL)
2462 free((caddr_t)d->bd_wfilter, M_BPF);
2463 mtx_destroy(&d->bd_lock);
2464 }
2465
2466 /*
2467 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
2468 * fixed size of the link header (variable length headers not yet supported).
2469 */
2470 void
2471 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2472 {
2473
2474 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2475 }
2476
2477 /*
2478 * Attach an interface to bpf. ifp is a pointer to the structure
2479 * defining the interface to be attached, dlt is the link layer type,
2480 * and hdrlen is the fixed size of the link header (variable length
2481 * headers are not yet supporrted).
2482 */
2483 void
2484 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2485 {
2486 struct bpf_if *bp;
2487
2488 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
2489 if (bp == NULL)
2490 panic("bpfattach");
2491
2492 LIST_INIT(&bp->bif_dlist);
2493 LIST_INIT(&bp->bif_wlist);
2494 bp->bif_ifp = ifp;
2495 bp->bif_dlt = dlt;
2496 rw_init(&bp->bif_lock, "bpf interface lock");
2497 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
2498 *driverp = bp;
2499
2500 BPF_LOCK();
2501 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
2502 BPF_UNLOCK();
2503
2504 bp->bif_hdrlen = hdrlen;
2505
2506 if (bootverbose)
2507 if_printf(ifp, "bpf attached\n");
2508 }
2509
2510 /*
2511 * Detach bpf from an interface. This involves detaching each descriptor
2512 * associated with the interface. Notify each descriptor as it's detached
2513 * so that any sleepers wake up and get ENXIO.
2514 */
2515 void
2516 bpfdetach(struct ifnet *ifp)
2517 {
2518 struct bpf_if *bp;
2519 struct bpf_d *d;
2520 #ifdef INVARIANTS
2521 int ndetached;
2522
2523 ndetached = 0;
2524 #endif
2525
2526 BPF_LOCK();
2527 /* Find all bpf_if struct's which reference ifp and detach them. */
2528 do {
2529 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2530 if (ifp == bp->bif_ifp)
2531 break;
2532 }
2533 if (bp != NULL)
2534 LIST_REMOVE(bp, bif_next);
2535
2536 if (bp != NULL) {
2537 #ifdef INVARIANTS
2538 ndetached++;
2539 #endif
2540 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
2541 bpf_detachd_locked(d);
2542 BPFD_LOCK(d);
2543 bpf_wakeup(d);
2544 BPFD_UNLOCK(d);
2545 }
2546 /* Free writer-only descriptors */
2547 while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) {
2548 bpf_detachd_locked(d);
2549 BPFD_LOCK(d);
2550 bpf_wakeup(d);
2551 BPFD_UNLOCK(d);
2552 }
2553
2554 /*
2555 * Delay freing bp till interface is detached
2556 * and all routes through this interface are removed.
2557 * Mark bp as detached to restrict new consumers.
2558 */
2559 BPFIF_WLOCK(bp);
2560 bp->flags |= BPFIF_FLAG_DYING;
2561 BPFIF_WUNLOCK(bp);
2562 }
2563 } while (bp != NULL);
2564 BPF_UNLOCK();
2565
2566 #ifdef INVARIANTS
2567 if (ndetached == 0)
2568 printf("bpfdetach: %s was not attached\n", ifp->if_xname);
2569 #endif
2570 }
2571
2572 /*
2573 * Interface departure handler.
2574 * Note departure event does not guarantee interface is going down.
2575 */
2576 static void
2577 bpf_ifdetach(void *arg __unused, struct ifnet *ifp)
2578 {
2579 struct bpf_if *bp;
2580
2581 BPF_LOCK();
2582 if ((bp = ifp->if_bpf) == NULL) {
2583 BPF_UNLOCK();
2584 return;
2585 }
2586
2587 /* Check if bpfdetach() was called previously */
2588 if ((bp->flags & BPFIF_FLAG_DYING) == 0) {
2589 BPF_UNLOCK();
2590 return;
2591 }
2592
2593 CTR3(KTR_NET, "%s: freing BPF instance %p for interface %p",
2594 __func__, bp, ifp);
2595
2596 ifp->if_bpf = NULL;
2597 BPF_UNLOCK();
2598
2599 rw_destroy(&bp->bif_lock);
2600 free(bp, M_BPF);
2601 }
2602
2603 /*
2604 * Get a list of available data link type of the interface.
2605 */
2606 static int
2607 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2608 {
2609 int n, error;
2610 struct ifnet *ifp;
2611 struct bpf_if *bp;
2612
2613 BPF_LOCK_ASSERT();
2614
2615 ifp = d->bd_bif->bif_ifp;
2616 n = 0;
2617 error = 0;
2618 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2619 if (bp->bif_ifp != ifp)
2620 continue;
2621 if (bfl->bfl_list != NULL) {
2622 if (n >= bfl->bfl_len)
2623 return (ENOMEM);
2624 error = copyout(&bp->bif_dlt,
2625 bfl->bfl_list + n, sizeof(u_int));
2626 }
2627 n++;
2628 }
2629 bfl->bfl_len = n;
2630 return (error);
2631 }
2632
2633 /*
2634 * Set the data link type of a BPF instance.
2635 */
2636 static int
2637 bpf_setdlt(struct bpf_d *d, u_int dlt)
2638 {
2639 int error, opromisc;
2640 struct ifnet *ifp;
2641 struct bpf_if *bp;
2642
2643 BPF_LOCK_ASSERT();
2644
2645 if (d->bd_bif->bif_dlt == dlt)
2646 return (0);
2647 ifp = d->bd_bif->bif_ifp;
2648
2649 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2650 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2651 break;
2652 }
2653
2654 if (bp != NULL) {
2655 opromisc = d->bd_promisc;
2656 bpf_attachd(d, bp);
2657 BPFD_LOCK(d);
2658 reset_d(d);
2659 BPFD_UNLOCK(d);
2660 if (opromisc) {
2661 error = ifpromisc(bp->bif_ifp, 1);
2662 if (error)
2663 if_printf(bp->bif_ifp,
2664 "bpf_setdlt: ifpromisc failed (%d)\n",
2665 error);
2666 else
2667 d->bd_promisc = 1;
2668 }
2669 }
2670 return (bp == NULL ? EINVAL : 0);
2671 }
2672
2673 static void
2674 bpf_drvinit(void *unused)
2675 {
2676 struct cdev *dev;
2677
2678 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
2679 LIST_INIT(&bpf_iflist);
2680
2681 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2682 /* For compatibility */
2683 make_dev_alias(dev, "bpf0");
2684
2685 /* Register interface departure handler */
2686 bpf_ifdetach_cookie = EVENTHANDLER_REGISTER(
2687 ifnet_departure_event, bpf_ifdetach, NULL,
2688 EVENTHANDLER_PRI_ANY);
2689 }
2690
2691 /*
2692 * Zero out the various packet counters associated with all of the bpf
2693 * descriptors. At some point, we will probably want to get a bit more
2694 * granular and allow the user to specify descriptors to be zeroed.
2695 */
2696 static void
2697 bpf_zero_counters(void)
2698 {
2699 struct bpf_if *bp;
2700 struct bpf_d *bd;
2701
2702 BPF_LOCK();
2703 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2704 BPFIF_RLOCK(bp);
2705 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2706 BPFD_LOCK(bd);
2707 bd->bd_rcount = 0;
2708 bd->bd_dcount = 0;
2709 bd->bd_fcount = 0;
2710 bd->bd_wcount = 0;
2711 bd->bd_wfcount = 0;
2712 bd->bd_zcopy = 0;
2713 BPFD_UNLOCK(bd);
2714 }
2715 BPFIF_RUNLOCK(bp);
2716 }
2717 BPF_UNLOCK();
2718 }
2719
2720 /*
2721 * Fill filter statistics
2722 */
2723 static void
2724 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2725 {
2726
2727 bzero(d, sizeof(*d));
2728 BPFD_LOCK_ASSERT(bd);
2729 d->bd_structsize = sizeof(*d);
2730 /* XXX: reading should be protected by global lock */
2731 d->bd_immediate = bd->bd_immediate;
2732 d->bd_promisc = bd->bd_promisc;
2733 d->bd_hdrcmplt = bd->bd_hdrcmplt;
2734 d->bd_direction = bd->bd_direction;
2735 d->bd_feedback = bd->bd_feedback;
2736 d->bd_async = bd->bd_async;
2737 d->bd_rcount = bd->bd_rcount;
2738 d->bd_dcount = bd->bd_dcount;
2739 d->bd_fcount = bd->bd_fcount;
2740 d->bd_sig = bd->bd_sig;
2741 d->bd_slen = bd->bd_slen;
2742 d->bd_hlen = bd->bd_hlen;
2743 d->bd_bufsize = bd->bd_bufsize;
2744 d->bd_pid = bd->bd_pid;
2745 strlcpy(d->bd_ifname,
2746 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2747 d->bd_locked = bd->bd_locked;
2748 d->bd_wcount = bd->bd_wcount;
2749 d->bd_wdcount = bd->bd_wdcount;
2750 d->bd_wfcount = bd->bd_wfcount;
2751 d->bd_zcopy = bd->bd_zcopy;
2752 d->bd_bufmode = bd->bd_bufmode;
2753 }
2754
2755 /*
2756 * Handle `netstat -B' stats request
2757 */
2758 static int
2759 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2760 {
2761 struct xbpf_d *xbdbuf, *xbd, zerostats;
2762 int index, error;
2763 struct bpf_if *bp;
2764 struct bpf_d *bd;
2765
2766 /*
2767 * XXX This is not technically correct. It is possible for non
2768 * privileged users to open bpf devices. It would make sense
2769 * if the users who opened the devices were able to retrieve
2770 * the statistics for them, too.
2771 */
2772 error = priv_check(req->td, PRIV_NET_BPF);
2773 if (error)
2774 return (error);
2775 /*
2776 * Check to see if the user is requesting that the counters be
2777 * zeroed out. Explicitly check that the supplied data is zeroed,
2778 * as we aren't allowing the user to set the counters currently.
2779 */
2780 if (req->newptr != NULL) {
2781 if (req->newlen != sizeof(zerostats))
2782 return (EINVAL);
2783 bzero(&zerostats, sizeof(zerostats));
2784 xbd = req->newptr;
2785 if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0)
2786 return (EINVAL);
2787 bpf_zero_counters();
2788 return (0);
2789 }
2790 if (req->oldptr == NULL)
2791 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2792 if (bpf_bpfd_cnt == 0)
2793 return (SYSCTL_OUT(req, 0, 0));
2794 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2795 BPF_LOCK();
2796 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2797 BPF_UNLOCK();
2798 free(xbdbuf, M_BPF);
2799 return (ENOMEM);
2800 }
2801 index = 0;
2802 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2803 BPFIF_RLOCK(bp);
2804 /* Send writers-only first */
2805 LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
2806 xbd = &xbdbuf[index++];
2807 BPFD_LOCK(bd);
2808 bpfstats_fill_xbpf(xbd, bd);
2809 BPFD_UNLOCK(bd);
2810 }
2811 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2812 xbd = &xbdbuf[index++];
2813 BPFD_LOCK(bd);
2814 bpfstats_fill_xbpf(xbd, bd);
2815 BPFD_UNLOCK(bd);
2816 }
2817 BPFIF_RUNLOCK(bp);
2818 }
2819 BPF_UNLOCK();
2820 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2821 free(xbdbuf, M_BPF);
2822 return (error);
2823 }
2824
2825 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
2826
2827 #else /* !DEV_BPF && !NETGRAPH_BPF */
2828 /*
2829 * NOP stubs to allow bpf-using drivers to load and function.
2830 *
2831 * A 'better' implementation would allow the core bpf functionality
2832 * to be loaded at runtime.
2833 */
2834 static struct bpf_if bp_null;
2835
2836 void
2837 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2838 {
2839 }
2840
2841 void
2842 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2843 {
2844 }
2845
2846 void
2847 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
2848 {
2849 }
2850
2851 void
2852 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2853 {
2854
2855 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2856 }
2857
2858 void
2859 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2860 {
2861
2862 *driverp = &bp_null;
2863 }
2864
2865 void
2866 bpfdetach(struct ifnet *ifp)
2867 {
2868 }
2869
2870 u_int
2871 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
2872 {
2873 return -1; /* "no filter" behaviour */
2874 }
2875
2876 int
2877 bpf_validate(const struct bpf_insn *f, int len)
2878 {
2879 return 0; /* false */
2880 }
2881
2882 #endif /* !DEV_BPF && !NETGRAPH_BPF */
Cache object: 8c7f7ac994ed27d59e4f18f491b67f4f
|