FreeBSD/Linux Kernel Cross Reference
sys/net/bpf.c
1 /*-
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include "opt_bpf.h"
41 #include "opt_compat.h"
42 #include "opt_netgraph.h"
43
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/lock.h>
47 #include <sys/rwlock.h>
48 #include <sys/systm.h>
49 #include <sys/conf.h>
50 #include <sys/fcntl.h>
51 #include <sys/jail.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/time.h>
55 #include <sys/priv.h>
56 #include <sys/proc.h>
57 #include <sys/signalvar.h>
58 #include <sys/filio.h>
59 #include <sys/sockio.h>
60 #include <sys/ttycom.h>
61 #include <sys/uio.h>
62
63 #include <sys/event.h>
64 #include <sys/file.h>
65 #include <sys/poll.h>
66 #include <sys/proc.h>
67
68 #include <sys/socket.h>
69
70 #include <net/if.h>
71 #define BPF_INTERNAL
72 #include <net/bpf.h>
73 #include <net/bpf_buffer.h>
74 #ifdef BPF_JITTER
75 #include <net/bpf_jitter.h>
76 #endif
77 #include <net/bpf_zerocopy.h>
78 #include <net/bpfdesc.h>
79 #include <net/vnet.h>
80
81 #include <netinet/in.h>
82 #include <netinet/if_ether.h>
83 #include <sys/kernel.h>
84 #include <sys/sysctl.h>
85
86 #include <net80211/ieee80211_freebsd.h>
87
88 #include <security/mac/mac_framework.h>
89
90 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
91
92 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
93
94 #define PRINET 26 /* interruptible */
95
96 #ifdef COMPAT_FREEBSD32
97 #include <sys/mount.h>
98 #include <compat/freebsd32/freebsd32.h>
99 #define BPF_ALIGNMENT32 sizeof(int32_t)
100 #define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1))
101
102 /*
103 * 32-bit version of structure prepended to each packet. We use this header
104 * instead of the standard one for 32-bit streams. We mark the a stream as
105 * 32-bit the first time we see a 32-bit compat ioctl request.
106 */
107 struct bpf_hdr32 {
108 struct timeval32 bh_tstamp; /* time stamp */
109 uint32_t bh_caplen; /* length of captured portion */
110 uint32_t bh_datalen; /* original length of packet */
111 uint16_t bh_hdrlen; /* length of bpf header (this struct
112 plus alignment padding) */
113 };
114
115 struct bpf_program32 {
116 u_int bf_len;
117 uint32_t bf_insns;
118 };
119
120 struct bpf_dltlist32 {
121 u_int bfl_len;
122 u_int bfl_list;
123 };
124
125 #define BIOCSETF32 _IOW('B', 103, struct bpf_program32)
126 #define BIOCSRTIMEOUT32 _IOW('B',109, struct timeval32)
127 #define BIOCGRTIMEOUT32 _IOR('B',110, struct timeval32)
128 #define BIOCGDLTLIST32 _IOWR('B',121, struct bpf_dltlist32)
129 #define BIOCSETWF32 _IOW('B',123, struct bpf_program32)
130 #define BIOCSETFNR32 _IOW('B',130, struct bpf_program32)
131 #endif
132
133 /*
134 * bpf_iflist is a list of BPF interface structures, each corresponding to a
135 * specific DLT. The same network interface might have several BPF interface
136 * structures registered by different layers in the stack (i.e., 802.11
137 * frames, ethernet frames, etc).
138 */
139 static LIST_HEAD(, bpf_if) bpf_iflist;
140 static struct mtx bpf_mtx; /* bpf global lock */
141 static int bpf_bpfd_cnt;
142
143 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
144 static void bpf_detachd(struct bpf_d *);
145 static void bpf_detachd_locked(struct bpf_d *);
146 static void bpf_freed(struct bpf_d *);
147 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
148 struct sockaddr *, int *, struct bpf_insn *);
149 static int bpf_setif(struct bpf_d *, struct ifreq *);
150 static void bpf_timed_out(void *);
151 static __inline void
152 bpf_wakeup(struct bpf_d *);
153 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
154 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
155 struct timeval *);
156 static void reset_d(struct bpf_d *);
157 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
158 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
159 static int bpf_setdlt(struct bpf_d *, u_int);
160 static void filt_bpfdetach(struct knote *);
161 static int filt_bpfread(struct knote *, long);
162 static void bpf_drvinit(void *);
163 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
164
165 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
166 int bpf_maxinsns = BPF_MAXINSNS;
167 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
168 &bpf_maxinsns, 0, "Maximum bpf program instructions");
169 static int bpf_zerocopy_enable = 0;
170 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
171 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
172 SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
173 bpf_stats_sysctl, "bpf statistics portal");
174
175 static VNET_DEFINE(int, bpf_optimize_writers) = 0;
176 #define V_bpf_optimize_writers VNET(bpf_optimize_writers)
177 SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers,
178 CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0,
179 "Do not send packets until BPF program is set");
180
181 static d_open_t bpfopen;
182 static d_read_t bpfread;
183 static d_write_t bpfwrite;
184 static d_ioctl_t bpfioctl;
185 static d_poll_t bpfpoll;
186 static d_kqfilter_t bpfkqfilter;
187
188 static struct cdevsw bpf_cdevsw = {
189 .d_version = D_VERSION,
190 .d_open = bpfopen,
191 .d_read = bpfread,
192 .d_write = bpfwrite,
193 .d_ioctl = bpfioctl,
194 .d_poll = bpfpoll,
195 .d_name = "bpf",
196 .d_kqfilter = bpfkqfilter,
197 };
198
199 static struct filterops bpfread_filtops =
200 { 1, NULL, filt_bpfdetach, filt_bpfread };
201
202 eventhandler_tag bpf_ifdetach_cookie = NULL;
203
204 /*
205 * LOCKING MODEL USED BY BPF:
206 * Locks:
207 * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal,
208 * some global counters and every bpf_if reference.
209 * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters.
210 * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields
211 * used by bpf_mtap code.
212 *
213 * Lock order:
214 *
215 * Global lock, interface lock, descriptor lock
216 *
217 * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2]
218 * working model. In many places (like bpf_detachd) we start with BPF descriptor
219 * (and we need to at least rlock it to get reliable interface pointer). This
220 * gives us potential LOR. As a result, we use global lock to protect from bpf_if
221 * change in every such place.
222 *
223 * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and
224 * 3) descriptor main wlock.
225 * Reading bd_bif can be protected by any of these locks, typically global lock.
226 *
227 * Changing read/write BPF filter is protected by the same three locks,
228 * the same applies for reading.
229 *
230 * Sleeping in global lock is not allowed due to bpfdetach() using it.
231 */
232
233 /*
234 * Wrapper functions for various buffering methods. If the set of buffer
235 * modes expands, we will probably want to introduce a switch data structure
236 * similar to protosw, et.
237 */
238 static void
239 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
240 u_int len)
241 {
242
243 BPFD_LOCK_ASSERT(d);
244
245 switch (d->bd_bufmode) {
246 case BPF_BUFMODE_BUFFER:
247 return (bpf_buffer_append_bytes(d, buf, offset, src, len));
248
249 case BPF_BUFMODE_ZBUF:
250 d->bd_zcopy++;
251 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
252
253 default:
254 panic("bpf_buf_append_bytes");
255 }
256 }
257
258 static void
259 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
260 u_int len)
261 {
262
263 BPFD_LOCK_ASSERT(d);
264
265 switch (d->bd_bufmode) {
266 case BPF_BUFMODE_BUFFER:
267 return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
268
269 case BPF_BUFMODE_ZBUF:
270 d->bd_zcopy++;
271 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
272
273 default:
274 panic("bpf_buf_append_mbuf");
275 }
276 }
277
278 /*
279 * This function gets called when the free buffer is re-assigned.
280 */
281 static void
282 bpf_buf_reclaimed(struct bpf_d *d)
283 {
284
285 BPFD_LOCK_ASSERT(d);
286
287 switch (d->bd_bufmode) {
288 case BPF_BUFMODE_BUFFER:
289 return;
290
291 case BPF_BUFMODE_ZBUF:
292 bpf_zerocopy_buf_reclaimed(d);
293 return;
294
295 default:
296 panic("bpf_buf_reclaimed");
297 }
298 }
299
300 /*
301 * If the buffer mechanism has a way to decide that a held buffer can be made
302 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is
303 * returned if the buffer can be discarded, (0) is returned if it cannot.
304 */
305 static int
306 bpf_canfreebuf(struct bpf_d *d)
307 {
308
309 BPFD_LOCK_ASSERT(d);
310
311 switch (d->bd_bufmode) {
312 case BPF_BUFMODE_ZBUF:
313 return (bpf_zerocopy_canfreebuf(d));
314 }
315 return (0);
316 }
317
318 /*
319 * Allow the buffer model to indicate that the current store buffer is
320 * immutable, regardless of the appearance of space. Return (1) if the
321 * buffer is writable, and (0) if not.
322 */
323 static int
324 bpf_canwritebuf(struct bpf_d *d)
325 {
326 BPFD_LOCK_ASSERT(d);
327
328 switch (d->bd_bufmode) {
329 case BPF_BUFMODE_ZBUF:
330 return (bpf_zerocopy_canwritebuf(d));
331 }
332 return (1);
333 }
334
335 /*
336 * Notify buffer model that an attempt to write to the store buffer has
337 * resulted in a dropped packet, in which case the buffer may be considered
338 * full.
339 */
340 static void
341 bpf_buffull(struct bpf_d *d)
342 {
343
344 BPFD_LOCK_ASSERT(d);
345
346 switch (d->bd_bufmode) {
347 case BPF_BUFMODE_ZBUF:
348 bpf_zerocopy_buffull(d);
349 break;
350 }
351 }
352
353 /*
354 * Notify the buffer model that a buffer has moved into the hold position.
355 */
356 void
357 bpf_bufheld(struct bpf_d *d)
358 {
359
360 BPFD_LOCK_ASSERT(d);
361
362 switch (d->bd_bufmode) {
363 case BPF_BUFMODE_ZBUF:
364 bpf_zerocopy_bufheld(d);
365 break;
366 }
367 }
368
369 static void
370 bpf_free(struct bpf_d *d)
371 {
372
373 switch (d->bd_bufmode) {
374 case BPF_BUFMODE_BUFFER:
375 return (bpf_buffer_free(d));
376
377 case BPF_BUFMODE_ZBUF:
378 return (bpf_zerocopy_free(d));
379
380 default:
381 panic("bpf_buf_free");
382 }
383 }
384
385 static int
386 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
387 {
388
389 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
390 return (EOPNOTSUPP);
391 return (bpf_buffer_uiomove(d, buf, len, uio));
392 }
393
394 static int
395 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
396 {
397
398 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
399 return (EOPNOTSUPP);
400 return (bpf_buffer_ioctl_sblen(d, i));
401 }
402
403 static int
404 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
405 {
406
407 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
408 return (EOPNOTSUPP);
409 return (bpf_zerocopy_ioctl_getzmax(td, d, i));
410 }
411
412 static int
413 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
414 {
415
416 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
417 return (EOPNOTSUPP);
418 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
419 }
420
421 static int
422 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
423 {
424
425 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
426 return (EOPNOTSUPP);
427 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
428 }
429
430 /*
431 * General BPF functions.
432 */
433 static int
434 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
435 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
436 {
437 const struct ieee80211_bpf_params *p;
438 struct ether_header *eh;
439 struct mbuf *m;
440 int error;
441 int len;
442 int hlen;
443 int slen;
444
445 /*
446 * Build a sockaddr based on the data link layer type.
447 * We do this at this level because the ethernet header
448 * is copied directly into the data field of the sockaddr.
449 * In the case of SLIP, there is no header and the packet
450 * is forwarded as is.
451 * Also, we are careful to leave room at the front of the mbuf
452 * for the link level header.
453 */
454 switch (linktype) {
455
456 case DLT_SLIP:
457 sockp->sa_family = AF_INET;
458 hlen = 0;
459 break;
460
461 case DLT_EN10MB:
462 sockp->sa_family = AF_UNSPEC;
463 /* XXX Would MAXLINKHDR be better? */
464 hlen = ETHER_HDR_LEN;
465 break;
466
467 case DLT_FDDI:
468 sockp->sa_family = AF_IMPLINK;
469 hlen = 0;
470 break;
471
472 case DLT_RAW:
473 sockp->sa_family = AF_UNSPEC;
474 hlen = 0;
475 break;
476
477 case DLT_NULL:
478 /*
479 * null interface types require a 4 byte pseudo header which
480 * corresponds to the address family of the packet.
481 */
482 sockp->sa_family = AF_UNSPEC;
483 hlen = 4;
484 break;
485
486 case DLT_ATM_RFC1483:
487 /*
488 * en atm driver requires 4-byte atm pseudo header.
489 * though it isn't standard, vpi:vci needs to be
490 * specified anyway.
491 */
492 sockp->sa_family = AF_UNSPEC;
493 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
494 break;
495
496 case DLT_PPP:
497 sockp->sa_family = AF_UNSPEC;
498 hlen = 4; /* This should match PPP_HDRLEN */
499 break;
500
501 case DLT_IEEE802_11: /* IEEE 802.11 wireless */
502 sockp->sa_family = AF_IEEE80211;
503 hlen = 0;
504 break;
505
506 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */
507 sockp->sa_family = AF_IEEE80211;
508 sockp->sa_len = 12; /* XXX != 0 */
509 hlen = sizeof(struct ieee80211_bpf_params);
510 break;
511
512 default:
513 return (EIO);
514 }
515
516 len = uio->uio_resid;
517
518 if (len - hlen > ifp->if_mtu)
519 return (EMSGSIZE);
520
521 if ((unsigned)len > MJUM16BYTES)
522 return (EIO);
523
524 if (len <= MHLEN)
525 MGETHDR(m, M_WAIT, MT_DATA);
526 else if (len <= MCLBYTES)
527 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
528 else
529 m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR,
530 #if (MJUMPAGESIZE > MCLBYTES)
531 len <= MJUMPAGESIZE ? MJUMPAGESIZE :
532 #endif
533 (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES));
534 m->m_pkthdr.len = m->m_len = len;
535 m->m_pkthdr.rcvif = NULL;
536 *mp = m;
537
538 if (m->m_len < hlen) {
539 error = EPERM;
540 goto bad;
541 }
542
543 error = uiomove(mtod(m, u_char *), len, uio);
544 if (error)
545 goto bad;
546
547 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
548 if (slen == 0) {
549 error = EPERM;
550 goto bad;
551 }
552
553 /* Check for multicast destination */
554 switch (linktype) {
555 case DLT_EN10MB:
556 eh = mtod(m, struct ether_header *);
557 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
558 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
559 ETHER_ADDR_LEN) == 0)
560 m->m_flags |= M_BCAST;
561 else
562 m->m_flags |= M_MCAST;
563 }
564 break;
565 }
566
567 /*
568 * Make room for link header, and copy it to sockaddr
569 */
570 if (hlen != 0) {
571 if (sockp->sa_family == AF_IEEE80211) {
572 /*
573 * Collect true length from the parameter header
574 * NB: sockp is known to be zero'd so if we do a
575 * short copy unspecified parameters will be
576 * zero.
577 * NB: packet may not be aligned after stripping
578 * bpf params
579 * XXX check ibp_vers
580 */
581 p = mtod(m, const struct ieee80211_bpf_params *);
582 hlen = p->ibp_len;
583 if (hlen > sizeof(sockp->sa_data)) {
584 error = EINVAL;
585 goto bad;
586 }
587 }
588 bcopy(m->m_data, sockp->sa_data, hlen);
589 }
590 *hdrlen = hlen;
591
592 return (0);
593 bad:
594 m_freem(m);
595 return (error);
596 }
597
598 /*
599 * Attach file to the bpf interface, i.e. make d listen on bp.
600 */
601 static void
602 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
603 {
604 int op_w;
605
606 BPF_LOCK_ASSERT();
607
608 /*
609 * Save sysctl value to protect from sysctl change
610 * between reads
611 */
612 op_w = V_bpf_optimize_writers;
613
614 if (d->bd_bif != NULL)
615 bpf_detachd_locked(d);
616 /*
617 * Point d at bp, and add d to the interface's list.
618 * Since there are many applicaiotns using BPF for
619 * sending raw packets only (dhcpd, cdpd are good examples)
620 * we can delay adding d to the list of active listeners until
621 * some filter is configured.
622 */
623
624 BPFIF_WLOCK(bp);
625 BPFD_LOCK(d);
626
627 d->bd_bif = bp;
628
629 if (op_w != 0) {
630 /* Add to writers-only list */
631 LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
632 /*
633 * We decrement bd_writer on every filter set operation.
634 * First BIOCSETF is done by pcap_open_live() to set up
635 * snap length. After that appliation usually sets its own filter
636 */
637 d->bd_writer = 2;
638 } else
639 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
640
641 BPFD_UNLOCK(d);
642 BPFIF_WUNLOCK(bp);
643
644 bpf_bpfd_cnt++;
645
646 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
647 __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
648
649 if (op_w == 0)
650 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
651 }
652
653 /*
654 * Add d to the list of active bp filters.
655 * Reuqires bpf_attachd() to be called before
656 */
657 static void
658 bpf_upgraded(struct bpf_d *d)
659 {
660 struct bpf_if *bp;
661
662 BPF_LOCK_ASSERT();
663
664 bp = d->bd_bif;
665
666 /*
667 * Filter can be set several times without specifying interface.
668 * Mark d as reader and exit.
669 */
670 if (bp == NULL) {
671 BPFD_LOCK(d);
672 d->bd_writer = 0;
673 BPFD_UNLOCK(d);
674 return;
675 }
676
677 BPFIF_WLOCK(bp);
678 BPFD_LOCK(d);
679
680 /* Remove from writers-only list */
681 LIST_REMOVE(d, bd_next);
682 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
683 /* Mark d as reader */
684 d->bd_writer = 0;
685
686 BPFD_UNLOCK(d);
687 BPFIF_WUNLOCK(bp);
688
689 CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid);
690
691 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
692 }
693
694 /*
695 * Detach a file from its interface.
696 */
697 static void
698 bpf_detachd(struct bpf_d *d)
699 {
700 BPF_LOCK();
701 bpf_detachd_locked(d);
702 BPF_UNLOCK();
703 }
704
705 static void
706 bpf_detachd_locked(struct bpf_d *d)
707 {
708 int error;
709 struct bpf_if *bp;
710 struct ifnet *ifp;
711
712 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
713
714 BPF_LOCK_ASSERT();
715
716 /* Check if descriptor is attached */
717 if ((bp = d->bd_bif) == NULL)
718 return;
719
720 BPFIF_WLOCK(bp);
721 BPFD_LOCK(d);
722
723 /* Save bd_writer value */
724 error = d->bd_writer;
725
726 /*
727 * Remove d from the interface's descriptor list.
728 */
729 LIST_REMOVE(d, bd_next);
730
731 ifp = bp->bif_ifp;
732 d->bd_bif = NULL;
733 BPFD_UNLOCK(d);
734 BPFIF_WUNLOCK(bp);
735
736 bpf_bpfd_cnt--;
737
738 /* Call event handler iff d is attached */
739 if (error == 0)
740 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
741
742 /*
743 * Check if this descriptor had requested promiscuous mode.
744 * If so, turn it off.
745 */
746 if (d->bd_promisc) {
747 d->bd_promisc = 0;
748 CURVNET_SET(ifp->if_vnet);
749 error = ifpromisc(ifp, 0);
750 CURVNET_RESTORE();
751 if (error != 0 && error != ENXIO) {
752 /*
753 * ENXIO can happen if a pccard is unplugged
754 * Something is really wrong if we were able to put
755 * the driver into promiscuous mode, but can't
756 * take it out.
757 */
758 if_printf(bp->bif_ifp,
759 "bpf_detach: ifpromisc failed (%d)\n", error);
760 }
761 }
762 }
763
764 /*
765 * Close the descriptor by detaching it from its interface,
766 * deallocating its buffers, and marking it free.
767 */
768 static void
769 bpf_dtor(void *data)
770 {
771 struct bpf_d *d = data;
772
773 BPFD_LOCK(d);
774 if (d->bd_state == BPF_WAITING)
775 callout_stop(&d->bd_callout);
776 d->bd_state = BPF_IDLE;
777 BPFD_UNLOCK(d);
778 funsetown(&d->bd_sigio);
779 bpf_detachd(d);
780 #ifdef MAC
781 mac_bpfdesc_destroy(d);
782 #endif /* MAC */
783 seldrain(&d->bd_sel);
784 knlist_destroy(&d->bd_sel.si_note);
785 callout_drain(&d->bd_callout);
786 bpf_freed(d);
787 free(d, M_BPF);
788 }
789
790 /*
791 * Open ethernet device. Returns ENXIO for illegal minor device number,
792 * EBUSY if file is open by another process.
793 */
794 /* ARGSUSED */
795 static int
796 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
797 {
798 struct bpf_d *d;
799 int error, size;
800
801 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
802 error = devfs_set_cdevpriv(d, bpf_dtor);
803 if (error != 0) {
804 free(d, M_BPF);
805 return (error);
806 }
807
808 /*
809 * For historical reasons, perform a one-time initialization call to
810 * the buffer routines, even though we're not yet committed to a
811 * particular buffer method.
812 */
813 bpf_buffer_init(d);
814 d->bd_bufmode = BPF_BUFMODE_BUFFER;
815 d->bd_sig = SIGIO;
816 d->bd_direction = BPF_D_INOUT;
817 BPF_PID_REFRESH(d, td);
818 #ifdef MAC
819 mac_bpfdesc_init(d);
820 mac_bpfdesc_create(td->td_ucred, d);
821 #endif
822 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
823 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
824 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
825
826 /* Allocate default buffers */
827 size = d->bd_bufsize;
828 bpf_buffer_ioctl_sblen(d, &size);
829
830 return (0);
831 }
832
833 /*
834 * bpfread - read next chunk of packets from buffers
835 */
836 static int
837 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
838 {
839 struct bpf_d *d;
840 int error;
841 int non_block;
842 int timed_out;
843
844 error = devfs_get_cdevpriv((void **)&d);
845 if (error != 0)
846 return (error);
847
848 /*
849 * Restrict application to use a buffer the same size as
850 * as kernel buffers.
851 */
852 if (uio->uio_resid != d->bd_bufsize)
853 return (EINVAL);
854
855 non_block = ((ioflag & O_NONBLOCK) != 0);
856
857 BPFD_LOCK(d);
858 BPF_PID_REFRESH_CUR(d);
859 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
860 BPFD_UNLOCK(d);
861 return (EOPNOTSUPP);
862 }
863 if (d->bd_state == BPF_WAITING)
864 callout_stop(&d->bd_callout);
865 timed_out = (d->bd_state == BPF_TIMED_OUT);
866 d->bd_state = BPF_IDLE;
867 /*
868 * If the hold buffer is empty, then do a timed sleep, which
869 * ends when the timeout expires or when enough packets
870 * have arrived to fill the store buffer.
871 */
872 while (d->bd_hbuf == NULL) {
873 if (d->bd_slen != 0) {
874 /*
875 * A packet(s) either arrived since the previous
876 * read or arrived while we were asleep.
877 */
878 if (d->bd_immediate || non_block || timed_out) {
879 /*
880 * Rotate the buffers and return what's here
881 * if we are in immediate mode, non-blocking
882 * flag is set, or this descriptor timed out.
883 */
884 ROTATE_BUFFERS(d);
885 break;
886 }
887 }
888
889 /*
890 * No data is available, check to see if the bpf device
891 * is still pointed at a real interface. If not, return
892 * ENXIO so that the userland process knows to rebind
893 * it before using it again.
894 */
895 if (d->bd_bif == NULL) {
896 BPFD_UNLOCK(d);
897 return (ENXIO);
898 }
899
900 if (non_block) {
901 BPFD_UNLOCK(d);
902 return (EWOULDBLOCK);
903 }
904 error = msleep(d, &d->bd_lock, PRINET|PCATCH,
905 "bpf", d->bd_rtout);
906 if (error == EINTR || error == ERESTART) {
907 BPFD_UNLOCK(d);
908 return (error);
909 }
910 if (error == EWOULDBLOCK) {
911 /*
912 * On a timeout, return what's in the buffer,
913 * which may be nothing. If there is something
914 * in the store buffer, we can rotate the buffers.
915 */
916 if (d->bd_hbuf)
917 /*
918 * We filled up the buffer in between
919 * getting the timeout and arriving
920 * here, so we don't need to rotate.
921 */
922 break;
923
924 if (d->bd_slen == 0) {
925 BPFD_UNLOCK(d);
926 return (0);
927 }
928 ROTATE_BUFFERS(d);
929 break;
930 }
931 }
932 /*
933 * At this point, we know we have something in the hold slot.
934 */
935 BPFD_UNLOCK(d);
936
937 /*
938 * Move data from hold buffer into user space.
939 * We know the entire buffer is transferred since
940 * we checked above that the read buffer is bpf_bufsize bytes.
941 *
942 * XXXRW: More synchronization needed here: what if a second thread
943 * issues a read on the same fd at the same time? Don't want this
944 * getting invalidated.
945 */
946 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
947
948 BPFD_LOCK(d);
949 d->bd_fbuf = d->bd_hbuf;
950 d->bd_hbuf = NULL;
951 d->bd_hlen = 0;
952 bpf_buf_reclaimed(d);
953 BPFD_UNLOCK(d);
954
955 return (error);
956 }
957
958 /*
959 * If there are processes sleeping on this descriptor, wake them up.
960 */
961 static __inline void
962 bpf_wakeup(struct bpf_d *d)
963 {
964
965 BPFD_LOCK_ASSERT(d);
966 if (d->bd_state == BPF_WAITING) {
967 callout_stop(&d->bd_callout);
968 d->bd_state = BPF_IDLE;
969 }
970 wakeup(d);
971 if (d->bd_async && d->bd_sig && d->bd_sigio)
972 pgsigio(&d->bd_sigio, d->bd_sig, 0);
973
974 selwakeuppri(&d->bd_sel, PRINET);
975 KNOTE_LOCKED(&d->bd_sel.si_note, 0);
976 }
977
978 static void
979 bpf_timed_out(void *arg)
980 {
981 struct bpf_d *d = (struct bpf_d *)arg;
982
983 BPFD_LOCK_ASSERT(d);
984
985 if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
986 return;
987 if (d->bd_state == BPF_WAITING) {
988 d->bd_state = BPF_TIMED_OUT;
989 if (d->bd_slen != 0)
990 bpf_wakeup(d);
991 }
992 }
993
994 static int
995 bpf_ready(struct bpf_d *d)
996 {
997
998 BPFD_LOCK_ASSERT(d);
999
1000 if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1001 return (1);
1002 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1003 d->bd_slen != 0)
1004 return (1);
1005 return (0);
1006 }
1007
1008 static int
1009 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
1010 {
1011 struct bpf_d *d;
1012 struct ifnet *ifp;
1013 struct mbuf *m, *mc;
1014 struct sockaddr dst;
1015 int error, hlen;
1016
1017 error = devfs_get_cdevpriv((void **)&d);
1018 if (error != 0)
1019 return (error);
1020
1021 BPF_PID_REFRESH_CUR(d);
1022 d->bd_wcount++;
1023 /* XXX: locking required */
1024 if (d->bd_bif == NULL) {
1025 d->bd_wdcount++;
1026 return (ENXIO);
1027 }
1028
1029 ifp = d->bd_bif->bif_ifp;
1030
1031 if ((ifp->if_flags & IFF_UP) == 0) {
1032 d->bd_wdcount++;
1033 return (ENETDOWN);
1034 }
1035
1036 if (uio->uio_resid == 0) {
1037 d->bd_wdcount++;
1038 return (0);
1039 }
1040
1041 bzero(&dst, sizeof(dst));
1042 m = NULL;
1043 hlen = 0;
1044 /* XXX: bpf_movein() can sleep */
1045 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
1046 &m, &dst, &hlen, d->bd_wfilter);
1047 if (error) {
1048 d->bd_wdcount++;
1049 return (error);
1050 }
1051 d->bd_wfcount++;
1052 if (d->bd_hdrcmplt)
1053 dst.sa_family = pseudo_AF_HDRCMPLT;
1054
1055 if (d->bd_feedback) {
1056 mc = m_dup(m, M_DONTWAIT);
1057 if (mc != NULL)
1058 mc->m_pkthdr.rcvif = ifp;
1059 /* Set M_PROMISC for outgoing packets to be discarded. */
1060 if (d->bd_direction == BPF_D_INOUT)
1061 m->m_flags |= M_PROMISC;
1062 } else
1063 mc = NULL;
1064
1065 m->m_pkthdr.len -= hlen;
1066 m->m_len -= hlen;
1067 m->m_data += hlen; /* XXX */
1068
1069 CURVNET_SET(ifp->if_vnet);
1070 #ifdef MAC
1071 BPFD_LOCK(d);
1072 mac_bpfdesc_create_mbuf(d, m);
1073 if (mc != NULL)
1074 mac_bpfdesc_create_mbuf(d, mc);
1075 BPFD_UNLOCK(d);
1076 #endif
1077
1078 error = (*ifp->if_output)(ifp, m, &dst, NULL);
1079 if (error)
1080 d->bd_wdcount++;
1081
1082 if (mc != NULL) {
1083 if (error == 0)
1084 (*ifp->if_input)(ifp, mc);
1085 else
1086 m_freem(mc);
1087 }
1088 CURVNET_RESTORE();
1089
1090 return (error);
1091 }
1092
1093 /*
1094 * Reset a descriptor by flushing its packet buffer and clearing the receive
1095 * and drop counts. This is doable for kernel-only buffers, but with
1096 * zero-copy buffers, we can't write to (or rotate) buffers that are
1097 * currently owned by userspace. It would be nice if we could encapsulate
1098 * this logic in the buffer code rather than here.
1099 */
1100 static void
1101 reset_d(struct bpf_d *d)
1102 {
1103
1104 BPFD_LOCK_ASSERT(d);
1105
1106 if ((d->bd_hbuf != NULL) &&
1107 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1108 /* Free the hold buffer. */
1109 d->bd_fbuf = d->bd_hbuf;
1110 d->bd_hbuf = NULL;
1111 d->bd_hlen = 0;
1112 bpf_buf_reclaimed(d);
1113 }
1114 if (bpf_canwritebuf(d))
1115 d->bd_slen = 0;
1116 d->bd_rcount = 0;
1117 d->bd_dcount = 0;
1118 d->bd_fcount = 0;
1119 d->bd_wcount = 0;
1120 d->bd_wfcount = 0;
1121 d->bd_wdcount = 0;
1122 d->bd_zcopy = 0;
1123 }
1124
1125 /*
1126 * FIONREAD Check for read packet available.
1127 * SIOCGIFADDR Get interface address - convenient hook to driver.
1128 * BIOCGBLEN Get buffer len [for read()].
1129 * BIOCSETF Set read filter.
1130 * BIOCSETFNR Set read filter without resetting descriptor.
1131 * BIOCSETWF Set write filter.
1132 * BIOCFLUSH Flush read packet buffer.
1133 * BIOCPROMISC Put interface into promiscuous mode.
1134 * BIOCGDLT Get link layer type.
1135 * BIOCGETIF Get interface name.
1136 * BIOCSETIF Set interface.
1137 * BIOCSRTIMEOUT Set read timeout.
1138 * BIOCGRTIMEOUT Get read timeout.
1139 * BIOCGSTATS Get packet stats.
1140 * BIOCIMMEDIATE Set immediate mode.
1141 * BIOCVERSION Get filter language version.
1142 * BIOCGHDRCMPLT Get "header already complete" flag
1143 * BIOCSHDRCMPLT Set "header already complete" flag
1144 * BIOCGDIRECTION Get packet direction flag
1145 * BIOCSDIRECTION Set packet direction flag
1146 * BIOCLOCK Set "locked" flag
1147 * BIOCFEEDBACK Set packet feedback mode.
1148 * BIOCSETZBUF Set current zero-copy buffer locations.
1149 * BIOCGETZMAX Get maximum zero-copy buffer size.
1150 * BIOCROTZBUF Force rotation of zero-copy buffer
1151 * BIOCSETBUFMODE Set buffer mode.
1152 * BIOCGETBUFMODE Get current buffer mode.
1153 */
1154 /* ARGSUSED */
1155 static int
1156 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1157 struct thread *td)
1158 {
1159 struct bpf_d *d;
1160 int error;
1161
1162 error = devfs_get_cdevpriv((void **)&d);
1163 if (error != 0)
1164 return (error);
1165
1166 /*
1167 * Refresh PID associated with this descriptor.
1168 */
1169 BPFD_LOCK(d);
1170 BPF_PID_REFRESH(d, td);
1171 if (d->bd_state == BPF_WAITING)
1172 callout_stop(&d->bd_callout);
1173 d->bd_state = BPF_IDLE;
1174 BPFD_UNLOCK(d);
1175
1176 if (d->bd_locked == 1) {
1177 switch (cmd) {
1178 case BIOCGBLEN:
1179 case BIOCFLUSH:
1180 case BIOCGDLT:
1181 case BIOCGDLTLIST:
1182 #ifdef COMPAT_FREEBSD32
1183 case BIOCGDLTLIST32:
1184 #endif
1185 case BIOCGETIF:
1186 case BIOCGRTIMEOUT:
1187 #ifdef COMPAT_FREEBSD32
1188 case BIOCGRTIMEOUT32:
1189 #endif
1190 case BIOCGSTATS:
1191 case BIOCVERSION:
1192 case BIOCGRSIG:
1193 case BIOCGHDRCMPLT:
1194 case BIOCFEEDBACK:
1195 case FIONREAD:
1196 case BIOCLOCK:
1197 case BIOCSRTIMEOUT:
1198 #ifdef COMPAT_FREEBSD32
1199 case BIOCSRTIMEOUT32:
1200 #endif
1201 case BIOCIMMEDIATE:
1202 case TIOCGPGRP:
1203 case BIOCROTZBUF:
1204 break;
1205 default:
1206 return (EPERM);
1207 }
1208 }
1209 #ifdef COMPAT_FREEBSD32
1210 /*
1211 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
1212 * that it will get 32-bit packet headers.
1213 */
1214 switch (cmd) {
1215 case BIOCSETF32:
1216 case BIOCSETFNR32:
1217 case BIOCSETWF32:
1218 case BIOCGDLTLIST32:
1219 case BIOCGRTIMEOUT32:
1220 case BIOCSRTIMEOUT32:
1221 BPFD_LOCK(d);
1222 d->bd_compat32 = 1;
1223 BPFD_UNLOCK(d);
1224 }
1225 #endif
1226
1227 CURVNET_SET(TD_TO_VNET(td));
1228 switch (cmd) {
1229
1230 default:
1231 error = EINVAL;
1232 break;
1233
1234 /*
1235 * Check for read packet available.
1236 */
1237 case FIONREAD:
1238 {
1239 int n;
1240
1241 BPFD_LOCK(d);
1242 n = d->bd_slen;
1243 if (d->bd_hbuf)
1244 n += d->bd_hlen;
1245 BPFD_UNLOCK(d);
1246
1247 *(int *)addr = n;
1248 break;
1249 }
1250
1251 case SIOCGIFADDR:
1252 {
1253 struct ifnet *ifp;
1254
1255 if (d->bd_bif == NULL)
1256 error = EINVAL;
1257 else {
1258 ifp = d->bd_bif->bif_ifp;
1259 error = (*ifp->if_ioctl)(ifp, cmd, addr);
1260 }
1261 break;
1262 }
1263
1264 /*
1265 * Get buffer len [for read()].
1266 */
1267 case BIOCGBLEN:
1268 BPFD_LOCK(d);
1269 *(u_int *)addr = d->bd_bufsize;
1270 BPFD_UNLOCK(d);
1271 break;
1272
1273 /*
1274 * Set buffer length.
1275 */
1276 case BIOCSBLEN:
1277 error = bpf_ioctl_sblen(d, (u_int *)addr);
1278 break;
1279
1280 /*
1281 * Set link layer read filter.
1282 */
1283 case BIOCSETF:
1284 case BIOCSETFNR:
1285 case BIOCSETWF:
1286 #ifdef COMPAT_FREEBSD32
1287 case BIOCSETF32:
1288 case BIOCSETFNR32:
1289 case BIOCSETWF32:
1290 #endif
1291 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1292 break;
1293
1294 /*
1295 * Flush read packet buffer.
1296 */
1297 case BIOCFLUSH:
1298 BPFD_LOCK(d);
1299 reset_d(d);
1300 BPFD_UNLOCK(d);
1301 break;
1302
1303 /*
1304 * Put interface into promiscuous mode.
1305 */
1306 case BIOCPROMISC:
1307 if (d->bd_bif == NULL) {
1308 /*
1309 * No interface attached yet.
1310 */
1311 error = EINVAL;
1312 break;
1313 }
1314 if (d->bd_promisc == 0) {
1315 error = ifpromisc(d->bd_bif->bif_ifp, 1);
1316 if (error == 0)
1317 d->bd_promisc = 1;
1318 }
1319 break;
1320
1321 /*
1322 * Get current data link type.
1323 */
1324 case BIOCGDLT:
1325 BPF_LOCK();
1326 if (d->bd_bif == NULL)
1327 error = EINVAL;
1328 else
1329 *(u_int *)addr = d->bd_bif->bif_dlt;
1330 BPF_UNLOCK();
1331 break;
1332
1333 /*
1334 * Get a list of supported data link types.
1335 */
1336 #ifdef COMPAT_FREEBSD32
1337 case BIOCGDLTLIST32:
1338 {
1339 struct bpf_dltlist32 *list32;
1340 struct bpf_dltlist dltlist;
1341
1342 list32 = (struct bpf_dltlist32 *)addr;
1343 dltlist.bfl_len = list32->bfl_len;
1344 dltlist.bfl_list = PTRIN(list32->bfl_list);
1345 BPF_LOCK();
1346 if (d->bd_bif == NULL)
1347 error = EINVAL;
1348 else {
1349 error = bpf_getdltlist(d, &dltlist);
1350 if (error == 0)
1351 list32->bfl_len = dltlist.bfl_len;
1352 }
1353 BPF_UNLOCK();
1354 break;
1355 }
1356 #endif
1357
1358 case BIOCGDLTLIST:
1359 BPF_LOCK();
1360 if (d->bd_bif == NULL)
1361 error = EINVAL;
1362 else
1363 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1364 BPF_UNLOCK();
1365 break;
1366
1367 /*
1368 * Set data link type.
1369 */
1370 case BIOCSDLT:
1371 BPF_LOCK();
1372 if (d->bd_bif == NULL)
1373 error = EINVAL;
1374 else
1375 error = bpf_setdlt(d, *(u_int *)addr);
1376 BPF_UNLOCK();
1377 break;
1378
1379 /*
1380 * Get interface name.
1381 */
1382 case BIOCGETIF:
1383 BPF_LOCK();
1384 if (d->bd_bif == NULL)
1385 error = EINVAL;
1386 else {
1387 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1388 struct ifreq *const ifr = (struct ifreq *)addr;
1389
1390 strlcpy(ifr->ifr_name, ifp->if_xname,
1391 sizeof(ifr->ifr_name));
1392 }
1393 BPF_UNLOCK();
1394 break;
1395
1396 /*
1397 * Set interface.
1398 */
1399 case BIOCSETIF:
1400 BPF_LOCK();
1401 error = bpf_setif(d, (struct ifreq *)addr);
1402 BPF_UNLOCK();
1403 break;
1404
1405 /*
1406 * Set read timeout.
1407 */
1408 case BIOCSRTIMEOUT:
1409 #ifdef COMPAT_FREEBSD32
1410 case BIOCSRTIMEOUT32:
1411 #endif
1412 {
1413 struct timeval *tv = (struct timeval *)addr;
1414 #ifdef COMPAT_FREEBSD32
1415 struct timeval32 *tv32;
1416 struct timeval tv64;
1417
1418 if (cmd == BIOCSRTIMEOUT32) {
1419 tv32 = (struct timeval32 *)addr;
1420 tv = &tv64;
1421 tv->tv_sec = tv32->tv_sec;
1422 tv->tv_usec = tv32->tv_usec;
1423 } else
1424 #endif
1425 tv = (struct timeval *)addr;
1426
1427 /*
1428 * Subtract 1 tick from tvtohz() since this isn't
1429 * a one-shot timer.
1430 */
1431 if ((error = itimerfix(tv)) == 0)
1432 d->bd_rtout = tvtohz(tv) - 1;
1433 break;
1434 }
1435
1436 /*
1437 * Get read timeout.
1438 */
1439 case BIOCGRTIMEOUT:
1440 #ifdef COMPAT_FREEBSD32
1441 case BIOCGRTIMEOUT32:
1442 #endif
1443 {
1444 struct timeval *tv;
1445 #ifdef COMPAT_FREEBSD32
1446 struct timeval32 *tv32;
1447 struct timeval tv64;
1448
1449 if (cmd == BIOCGRTIMEOUT32)
1450 tv = &tv64;
1451 else
1452 #endif
1453 tv = (struct timeval *)addr;
1454
1455 tv->tv_sec = d->bd_rtout / hz;
1456 tv->tv_usec = (d->bd_rtout % hz) * tick;
1457 #ifdef COMPAT_FREEBSD32
1458 if (cmd == BIOCGRTIMEOUT32) {
1459 tv32 = (struct timeval32 *)addr;
1460 tv32->tv_sec = tv->tv_sec;
1461 tv32->tv_usec = tv->tv_usec;
1462 }
1463 #endif
1464
1465 break;
1466 }
1467
1468 /*
1469 * Get packet stats.
1470 */
1471 case BIOCGSTATS:
1472 {
1473 struct bpf_stat *bs = (struct bpf_stat *)addr;
1474
1475 /* XXXCSJP overflow */
1476 bs->bs_recv = d->bd_rcount;
1477 bs->bs_drop = d->bd_dcount;
1478 break;
1479 }
1480
1481 /*
1482 * Set immediate mode.
1483 */
1484 case BIOCIMMEDIATE:
1485 BPFD_LOCK(d);
1486 d->bd_immediate = *(u_int *)addr;
1487 BPFD_UNLOCK(d);
1488 break;
1489
1490 case BIOCVERSION:
1491 {
1492 struct bpf_version *bv = (struct bpf_version *)addr;
1493
1494 bv->bv_major = BPF_MAJOR_VERSION;
1495 bv->bv_minor = BPF_MINOR_VERSION;
1496 break;
1497 }
1498
1499 /*
1500 * Get "header already complete" flag
1501 */
1502 case BIOCGHDRCMPLT:
1503 BPFD_LOCK(d);
1504 *(u_int *)addr = d->bd_hdrcmplt;
1505 BPFD_UNLOCK(d);
1506 break;
1507
1508 /*
1509 * Set "header already complete" flag
1510 */
1511 case BIOCSHDRCMPLT:
1512 BPFD_LOCK(d);
1513 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1514 BPFD_UNLOCK(d);
1515 break;
1516
1517 /*
1518 * Get packet direction flag
1519 */
1520 case BIOCGDIRECTION:
1521 BPFD_LOCK(d);
1522 *(u_int *)addr = d->bd_direction;
1523 BPFD_UNLOCK(d);
1524 break;
1525
1526 /*
1527 * Set packet direction flag
1528 */
1529 case BIOCSDIRECTION:
1530 {
1531 u_int direction;
1532
1533 direction = *(u_int *)addr;
1534 switch (direction) {
1535 case BPF_D_IN:
1536 case BPF_D_INOUT:
1537 case BPF_D_OUT:
1538 BPFD_LOCK(d);
1539 d->bd_direction = direction;
1540 BPFD_UNLOCK(d);
1541 break;
1542 default:
1543 error = EINVAL;
1544 }
1545 }
1546 break;
1547
1548 case BIOCFEEDBACK:
1549 BPFD_LOCK(d);
1550 d->bd_feedback = *(u_int *)addr;
1551 BPFD_UNLOCK(d);
1552 break;
1553
1554 case BIOCLOCK:
1555 BPFD_LOCK(d);
1556 d->bd_locked = 1;
1557 BPFD_UNLOCK(d);
1558 break;
1559
1560 case FIONBIO: /* Non-blocking I/O */
1561 break;
1562
1563 case FIOASYNC: /* Send signal on receive packets */
1564 BPFD_LOCK(d);
1565 d->bd_async = *(int *)addr;
1566 BPFD_UNLOCK(d);
1567 break;
1568
1569 case FIOSETOWN:
1570 /*
1571 * XXX: Add some sort of locking here?
1572 * fsetown() can sleep.
1573 */
1574 error = fsetown(*(int *)addr, &d->bd_sigio);
1575 break;
1576
1577 case FIOGETOWN:
1578 BPFD_LOCK(d);
1579 *(int *)addr = fgetown(&d->bd_sigio);
1580 BPFD_UNLOCK(d);
1581 break;
1582
1583 /* This is deprecated, FIOSETOWN should be used instead. */
1584 case TIOCSPGRP:
1585 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1586 break;
1587
1588 /* This is deprecated, FIOGETOWN should be used instead. */
1589 case TIOCGPGRP:
1590 *(int *)addr = -fgetown(&d->bd_sigio);
1591 break;
1592
1593 case BIOCSRSIG: /* Set receive signal */
1594 {
1595 u_int sig;
1596
1597 sig = *(u_int *)addr;
1598
1599 if (sig >= NSIG)
1600 error = EINVAL;
1601 else {
1602 BPFD_LOCK(d);
1603 d->bd_sig = sig;
1604 BPFD_UNLOCK(d);
1605 }
1606 break;
1607 }
1608 case BIOCGRSIG:
1609 BPFD_LOCK(d);
1610 *(u_int *)addr = d->bd_sig;
1611 BPFD_UNLOCK(d);
1612 break;
1613
1614 case BIOCGETBUFMODE:
1615 BPFD_LOCK(d);
1616 *(u_int *)addr = d->bd_bufmode;
1617 BPFD_UNLOCK(d);
1618 break;
1619
1620 case BIOCSETBUFMODE:
1621 /*
1622 * Allow the buffering mode to be changed as long as we
1623 * haven't yet committed to a particular mode. Our
1624 * definition of commitment, for now, is whether or not a
1625 * buffer has been allocated or an interface attached, since
1626 * that's the point where things get tricky.
1627 */
1628 switch (*(u_int *)addr) {
1629 case BPF_BUFMODE_BUFFER:
1630 break;
1631
1632 case BPF_BUFMODE_ZBUF:
1633 if (bpf_zerocopy_enable)
1634 break;
1635 /* FALLSTHROUGH */
1636
1637 default:
1638 CURVNET_RESTORE();
1639 return (EINVAL);
1640 }
1641
1642 BPFD_LOCK(d);
1643 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1644 d->bd_fbuf != NULL || d->bd_bif != NULL) {
1645 BPFD_UNLOCK(d);
1646 CURVNET_RESTORE();
1647 return (EBUSY);
1648 }
1649 d->bd_bufmode = *(u_int *)addr;
1650 BPFD_UNLOCK(d);
1651 break;
1652
1653 case BIOCGETZMAX:
1654 error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1655 break;
1656
1657 case BIOCSETZBUF:
1658 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1659 break;
1660
1661 case BIOCROTZBUF:
1662 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1663 break;
1664 }
1665 CURVNET_RESTORE();
1666 return (error);
1667 }
1668
1669 /*
1670 * Set d's packet filter program to fp. If this file already has a filter,
1671 * free it and replace it. Returns EINVAL for bogus requests.
1672 *
1673 * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls
1674 * since reading d->bd_bif can't be protected by d or interface lock due to
1675 * lock order.
1676 *
1677 * Additionally, we have to acquire interface write lock due to bpf_mtap() uses
1678 * interface read lock to read all filers.
1679 *
1680 */
1681 static int
1682 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1683 {
1684 #ifdef COMPAT_FREEBSD32
1685 struct bpf_program fp_swab;
1686 struct bpf_program32 *fp32;
1687 #endif
1688 struct bpf_insn *fcode, *old;
1689 #ifdef BPF_JITTER
1690 bpf_jit_filter *jfunc, *ofunc;
1691 #endif
1692 size_t size;
1693 u_int flen;
1694 int need_upgrade;
1695
1696 #ifdef COMPAT_FREEBSD32
1697 switch (cmd) {
1698 case BIOCSETF32:
1699 case BIOCSETWF32:
1700 case BIOCSETFNR32:
1701 fp32 = (struct bpf_program32 *)fp;
1702 fp_swab.bf_len = fp32->bf_len;
1703 fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
1704 fp = &fp_swab;
1705 switch (cmd) {
1706 case BIOCSETF32:
1707 cmd = BIOCSETF;
1708 break;
1709 case BIOCSETWF32:
1710 cmd = BIOCSETWF;
1711 break;
1712 }
1713 break;
1714 }
1715 #endif
1716
1717 fcode = NULL;
1718 #ifdef BPF_JITTER
1719 jfunc = ofunc = NULL;
1720 #endif
1721 need_upgrade = 0;
1722
1723 /*
1724 * Check new filter validness before acquiring any locks.
1725 * Allocate memory for new filter, if needed.
1726 */
1727 flen = fp->bf_len;
1728 if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
1729 return (EINVAL);
1730 size = flen * sizeof(*fp->bf_insns);
1731 if (size > 0) {
1732 /* We're setting up new filter. Copy and check actual data. */
1733 fcode = malloc(size, M_BPF, M_WAITOK);
1734 if (copyin(fp->bf_insns, fcode, size) != 0 ||
1735 !bpf_validate(fcode, flen)) {
1736 free(fcode, M_BPF);
1737 return (EINVAL);
1738 }
1739 #ifdef BPF_JITTER
1740 /* Filter is copied inside fcode and is perfectly valid. */
1741 jfunc = bpf_jitter(fcode, flen);
1742 #endif
1743 }
1744
1745 BPF_LOCK();
1746
1747 /*
1748 * Set up new filter.
1749 * Protect filter change by interface lock.
1750 * Additionally, we are protected by global lock here.
1751 */
1752 if (d->bd_bif != NULL)
1753 BPFIF_WLOCK(d->bd_bif);
1754 BPFD_LOCK(d);
1755 if (cmd == BIOCSETWF) {
1756 old = d->bd_wfilter;
1757 d->bd_wfilter = fcode;
1758 } else {
1759 old = d->bd_rfilter;
1760 d->bd_rfilter = fcode;
1761 #ifdef BPF_JITTER
1762 ofunc = d->bd_bfilter;
1763 d->bd_bfilter = jfunc;
1764 #endif
1765 if (cmd == BIOCSETF)
1766 reset_d(d);
1767
1768 if (fcode != NULL) {
1769 /*
1770 * Do not require upgrade by first BIOCSETF
1771 * (used to set snaplen) by pcap_open_live().
1772 */
1773 if (d->bd_writer != 0 && --d->bd_writer == 0)
1774 need_upgrade = 1;
1775 CTR4(KTR_NET, "%s: filter function set by pid %d, "
1776 "bd_writer counter %d, need_upgrade %d",
1777 __func__, d->bd_pid, d->bd_writer, need_upgrade);
1778 }
1779 }
1780 BPFD_UNLOCK(d);
1781 if (d->bd_bif != NULL)
1782 BPFIF_WUNLOCK(d->bd_bif);
1783 if (old != NULL)
1784 free(old, M_BPF);
1785 #ifdef BPF_JITTER
1786 if (ofunc != NULL)
1787 bpf_destroy_jit_filter(ofunc);
1788 #endif
1789
1790 /* Move d to active readers list. */
1791 if (need_upgrade)
1792 bpf_upgraded(d);
1793
1794 BPF_UNLOCK();
1795 return (0);
1796 }
1797
1798 /*
1799 * Detach a file from its current interface (if attached at all) and attach
1800 * to the interface indicated by the name stored in ifr.
1801 * Return an errno or 0.
1802 */
1803 static int
1804 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1805 {
1806 struct bpf_if *bp;
1807 struct ifnet *theywant;
1808
1809 BPF_LOCK_ASSERT();
1810
1811 theywant = ifunit(ifr->ifr_name);
1812 if (theywant == NULL || theywant->if_bpf == NULL)
1813 return (ENXIO);
1814
1815 bp = theywant->if_bpf;
1816
1817 /* Check if interface is not being detached from BPF */
1818 BPFIF_RLOCK(bp);
1819 if (bp->flags & BPFIF_FLAG_DYING) {
1820 BPFIF_RUNLOCK(bp);
1821 return (ENXIO);
1822 }
1823 BPFIF_RUNLOCK(bp);
1824
1825 /*
1826 * Behavior here depends on the buffering model. If we're using
1827 * kernel memory buffers, then we can allocate them here. If we're
1828 * using zero-copy, then the user process must have registered
1829 * buffers by the time we get here. If not, return an error.
1830 */
1831 switch (d->bd_bufmode) {
1832 case BPF_BUFMODE_BUFFER:
1833 case BPF_BUFMODE_ZBUF:
1834 if (d->bd_sbuf == NULL)
1835 return (EINVAL);
1836 break;
1837
1838 default:
1839 panic("bpf_setif: bufmode %d", d->bd_bufmode);
1840 }
1841 if (bp != d->bd_bif)
1842 bpf_attachd(d, bp);
1843 BPFD_LOCK(d);
1844 reset_d(d);
1845 BPFD_UNLOCK(d);
1846 return (0);
1847 }
1848
1849 /*
1850 * Support for select() and poll() system calls
1851 *
1852 * Return true iff the specific operation will not block indefinitely.
1853 * Otherwise, return false but make a note that a selwakeup() must be done.
1854 */
1855 static int
1856 bpfpoll(struct cdev *dev, int events, struct thread *td)
1857 {
1858 struct bpf_d *d;
1859 int revents;
1860
1861 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
1862 return (events &
1863 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
1864
1865 /*
1866 * Refresh PID associated with this descriptor.
1867 */
1868 revents = events & (POLLOUT | POLLWRNORM);
1869 BPFD_LOCK(d);
1870 BPF_PID_REFRESH(d, td);
1871 if (events & (POLLIN | POLLRDNORM)) {
1872 if (bpf_ready(d))
1873 revents |= events & (POLLIN | POLLRDNORM);
1874 else {
1875 selrecord(td, &d->bd_sel);
1876 /* Start the read timeout if necessary. */
1877 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1878 callout_reset(&d->bd_callout, d->bd_rtout,
1879 bpf_timed_out, d);
1880 d->bd_state = BPF_WAITING;
1881 }
1882 }
1883 }
1884 BPFD_UNLOCK(d);
1885 return (revents);
1886 }
1887
1888 /*
1889 * Support for kevent() system call. Register EVFILT_READ filters and
1890 * reject all others.
1891 */
1892 int
1893 bpfkqfilter(struct cdev *dev, struct knote *kn)
1894 {
1895 struct bpf_d *d;
1896
1897 if (devfs_get_cdevpriv((void **)&d) != 0 ||
1898 kn->kn_filter != EVFILT_READ)
1899 return (1);
1900
1901 /*
1902 * Refresh PID associated with this descriptor.
1903 */
1904 BPFD_LOCK(d);
1905 BPF_PID_REFRESH_CUR(d);
1906 kn->kn_fop = &bpfread_filtops;
1907 kn->kn_hook = d;
1908 knlist_add(&d->bd_sel.si_note, kn, 1);
1909 BPFD_UNLOCK(d);
1910
1911 return (0);
1912 }
1913
1914 static void
1915 filt_bpfdetach(struct knote *kn)
1916 {
1917 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1918
1919 knlist_remove(&d->bd_sel.si_note, kn, 0);
1920 }
1921
1922 static int
1923 filt_bpfread(struct knote *kn, long hint)
1924 {
1925 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1926 int ready;
1927
1928 BPFD_LOCK_ASSERT(d);
1929 ready = bpf_ready(d);
1930 if (ready) {
1931 kn->kn_data = d->bd_slen;
1932 if (d->bd_hbuf)
1933 kn->kn_data += d->bd_hlen;
1934 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1935 callout_reset(&d->bd_callout, d->bd_rtout,
1936 bpf_timed_out, d);
1937 d->bd_state = BPF_WAITING;
1938 }
1939
1940 return (ready);
1941 }
1942
1943 /*
1944 * Incoming linkage from device drivers. Process the packet pkt, of length
1945 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1946 * by each process' filter, and if accepted, stashed into the corresponding
1947 * buffer.
1948 */
1949 void
1950 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1951 {
1952 struct bpf_d *d;
1953 #ifdef BPF_JITTER
1954 bpf_jit_filter *bf;
1955 #endif
1956 u_int slen;
1957 int gottime;
1958 struct timeval tv;
1959
1960 gottime = 0;
1961
1962 BPFIF_RLOCK(bp);
1963
1964 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1965 /*
1966 * We are not using any locks for d here because:
1967 * 1) any filter change is protected by interface
1968 * write lock
1969 * 2) destroying/detaching d is protected by interface
1970 * write lock, too
1971 */
1972
1973 /* XXX: Do not protect counter for the sake of performance. */
1974 ++d->bd_rcount;
1975 /*
1976 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
1977 * way for the caller to indiciate to us whether this packet
1978 * is inbound or outbound. In the bpf_mtap() routines, we use
1979 * the interface pointers on the mbuf to figure it out.
1980 */
1981 #ifdef BPF_JITTER
1982 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
1983 if (bf != NULL)
1984 slen = (*(bf->func))(pkt, pktlen, pktlen);
1985 else
1986 #endif
1987 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
1988 if (slen != 0) {
1989 /*
1990 * Filter matches. Let's to acquire write lock.
1991 */
1992 BPFD_LOCK(d);
1993
1994 d->bd_fcount++;
1995 if (!gottime) {
1996 microtime(&tv);
1997 gottime = 1;
1998 }
1999 #ifdef MAC
2000 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2001 #endif
2002 catchpacket(d, pkt, pktlen, slen,
2003 bpf_append_bytes, &tv);
2004 BPFD_UNLOCK(d);
2005 }
2006 }
2007 BPFIF_RUNLOCK(bp);
2008 }
2009
2010 #define BPF_CHECK_DIRECTION(d, r, i) \
2011 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
2012 ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2013
2014 /*
2015 * Incoming linkage from device drivers, when packet is in an mbuf chain.
2016 * Locking model is explained in bpf_tap().
2017 */
2018 void
2019 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2020 {
2021 struct bpf_d *d;
2022 #ifdef BPF_JITTER
2023 bpf_jit_filter *bf;
2024 #endif
2025 u_int pktlen, slen;
2026 int gottime;
2027 struct timeval tv;
2028
2029 /* Skip outgoing duplicate packets. */
2030 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2031 m->m_flags &= ~M_PROMISC;
2032 return;
2033 }
2034
2035 gottime = 0;
2036
2037 pktlen = m_length(m, NULL);
2038
2039 BPFIF_RLOCK(bp);
2040
2041 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2042 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2043 continue;
2044 ++d->bd_rcount;
2045 #ifdef BPF_JITTER
2046 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2047 /* XXX We cannot handle multiple mbufs. */
2048 if (bf != NULL && m->m_next == NULL)
2049 slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
2050 else
2051 #endif
2052 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2053 if (slen != 0) {
2054 BPFD_LOCK(d);
2055
2056 d->bd_fcount++;
2057 if (!gottime) {
2058 microtime(&tv);
2059 gottime = 1;
2060 }
2061 #ifdef MAC
2062 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2063 #endif
2064 catchpacket(d, (u_char *)m, pktlen, slen,
2065 bpf_append_mbuf, &tv);
2066 BPFD_UNLOCK(d);
2067 }
2068 }
2069 BPFIF_RUNLOCK(bp);
2070 }
2071
2072 /*
2073 * Incoming linkage from device drivers, when packet is in
2074 * an mbuf chain and to be prepended by a contiguous header.
2075 */
2076 void
2077 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
2078 {
2079 struct mbuf mb;
2080 struct bpf_d *d;
2081 u_int pktlen, slen;
2082 int gottime;
2083 struct timeval tv;
2084
2085 /* Skip outgoing duplicate packets. */
2086 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2087 m->m_flags &= ~M_PROMISC;
2088 return;
2089 }
2090
2091 gottime = 0;
2092
2093 pktlen = m_length(m, NULL);
2094 /*
2095 * Craft on-stack mbuf suitable for passing to bpf_filter.
2096 * Note that we cut corners here; we only setup what's
2097 * absolutely needed--this mbuf should never go anywhere else.
2098 */
2099 mb.m_next = m;
2100 mb.m_data = data;
2101 mb.m_len = dlen;
2102 pktlen += dlen;
2103
2104
2105 BPFIF_RLOCK(bp);
2106
2107 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2108 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2109 continue;
2110 ++d->bd_rcount;
2111 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2112 if (slen != 0) {
2113 BPFD_LOCK(d);
2114
2115 d->bd_fcount++;
2116 if (!gottime) {
2117 microtime(&tv);
2118 gottime = 1;
2119 }
2120 #ifdef MAC
2121 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2122 #endif
2123 catchpacket(d, (u_char *)&mb, pktlen, slen,
2124 bpf_append_mbuf, &tv);
2125 BPFD_UNLOCK(d);
2126 }
2127 }
2128 BPFIF_RUNLOCK(bp);
2129 }
2130
2131 #undef BPF_CHECK_DIRECTION
2132
2133 /*
2134 * Move the packet data from interface memory (pkt) into the
2135 * store buffer. "cpfn" is the routine called to do the actual data
2136 * transfer. bcopy is passed in to copy contiguous chunks, while
2137 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case,
2138 * pkt is really an mbuf.
2139 */
2140 static void
2141 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2142 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
2143 struct timeval *tv)
2144 {
2145 struct bpf_hdr hdr;
2146 #ifdef COMPAT_FREEBSD32
2147 struct bpf_hdr32 hdr32;
2148 #endif
2149 int totlen, curlen;
2150 int hdrlen = d->bd_bif->bif_hdrlen;
2151 int do_wakeup = 0;
2152
2153 BPFD_LOCK_ASSERT(d);
2154
2155 /*
2156 * Detect whether user space has released a buffer back to us, and if
2157 * so, move it from being a hold buffer to a free buffer. This may
2158 * not be the best place to do it (for example, we might only want to
2159 * run this check if we need the space), but for now it's a reliable
2160 * spot to do it.
2161 */
2162 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2163 d->bd_fbuf = d->bd_hbuf;
2164 d->bd_hbuf = NULL;
2165 d->bd_hlen = 0;
2166 bpf_buf_reclaimed(d);
2167 }
2168
2169 /*
2170 * Figure out how many bytes to move. If the packet is
2171 * greater or equal to the snapshot length, transfer that
2172 * much. Otherwise, transfer the whole packet (unless
2173 * we hit the buffer size limit).
2174 */
2175 totlen = hdrlen + min(snaplen, pktlen);
2176 if (totlen > d->bd_bufsize)
2177 totlen = d->bd_bufsize;
2178
2179 /*
2180 * Round up the end of the previous packet to the next longword.
2181 *
2182 * Drop the packet if there's no room and no hope of room
2183 * If the packet would overflow the storage buffer or the storage
2184 * buffer is considered immutable by the buffer model, try to rotate
2185 * the buffer and wakeup pending processes.
2186 */
2187 #ifdef COMPAT_FREEBSD32
2188 if (d->bd_compat32)
2189 curlen = BPF_WORDALIGN32(d->bd_slen);
2190 else
2191 #endif
2192 curlen = BPF_WORDALIGN(d->bd_slen);
2193 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2194 if (d->bd_fbuf == NULL) {
2195 /*
2196 * There's no room in the store buffer, and no
2197 * prospect of room, so drop the packet. Notify the
2198 * buffer model.
2199 */
2200 bpf_buffull(d);
2201 ++d->bd_dcount;
2202 return;
2203 }
2204 ROTATE_BUFFERS(d);
2205 do_wakeup = 1;
2206 curlen = 0;
2207 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
2208 /*
2209 * Immediate mode is set, or the read timeout has already
2210 * expired during a select call. A packet arrived, so the
2211 * reader should be woken up.
2212 */
2213 do_wakeup = 1;
2214 #ifdef COMPAT_FREEBSD32
2215 /*
2216 * If this is a 32-bit stream, then stick a 32-bit header at the
2217 * front and copy the data into the buffer.
2218 */
2219 if (d->bd_compat32) {
2220 bzero(&hdr32, sizeof(hdr32));
2221 hdr32.bh_tstamp.tv_sec = tv->tv_sec;
2222 hdr32.bh_tstamp.tv_usec = tv->tv_usec;
2223 hdr32.bh_datalen = pktlen;
2224 hdr32.bh_hdrlen = hdrlen;
2225 hdr.bh_caplen = hdr32.bh_caplen = totlen - hdrlen;
2226 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32, sizeof(hdr32));
2227 goto copy;
2228 }
2229 #endif
2230
2231 /*
2232 * Append the bpf header. Note we append the actual header size, but
2233 * move forward the length of the header plus padding.
2234 */
2235 bzero(&hdr, sizeof(hdr));
2236 hdr.bh_tstamp = *tv;
2237 hdr.bh_datalen = pktlen;
2238 hdr.bh_hdrlen = hdrlen;
2239 hdr.bh_caplen = totlen - hdrlen;
2240 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2241
2242 /*
2243 * Copy the packet data into the store buffer and update its length.
2244 */
2245 #ifdef COMPAT_FREEBSD32
2246 copy:
2247 #endif
2248 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, hdr.bh_caplen);
2249 d->bd_slen = curlen + totlen;
2250
2251 if (do_wakeup)
2252 bpf_wakeup(d);
2253 }
2254
2255 /*
2256 * Free buffers currently in use by a descriptor.
2257 * Called on close.
2258 */
2259 static void
2260 bpf_freed(struct bpf_d *d)
2261 {
2262
2263 /*
2264 * We don't need to lock out interrupts since this descriptor has
2265 * been detached from its interface and it yet hasn't been marked
2266 * free.
2267 */
2268 bpf_free(d);
2269 if (d->bd_rfilter != NULL) {
2270 free((caddr_t)d->bd_rfilter, M_BPF);
2271 #ifdef BPF_JITTER
2272 if (d->bd_bfilter != NULL)
2273 bpf_destroy_jit_filter(d->bd_bfilter);
2274 #endif
2275 }
2276 if (d->bd_wfilter != NULL)
2277 free((caddr_t)d->bd_wfilter, M_BPF);
2278 mtx_destroy(&d->bd_lock);
2279 }
2280
2281 /*
2282 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
2283 * fixed size of the link header (variable length headers not yet supported).
2284 */
2285 void
2286 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2287 {
2288
2289 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2290 }
2291
2292 /*
2293 * Attach an interface to bpf. ifp is a pointer to the structure
2294 * defining the interface to be attached, dlt is the link layer type,
2295 * and hdrlen is the fixed size of the link header (variable length
2296 * headers are not yet supporrted).
2297 */
2298 void
2299 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2300 {
2301 struct bpf_if *bp;
2302
2303 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
2304 if (bp == NULL)
2305 panic("bpfattach");
2306
2307 LIST_INIT(&bp->bif_dlist);
2308 LIST_INIT(&bp->bif_wlist);
2309 bp->bif_ifp = ifp;
2310 bp->bif_dlt = dlt;
2311 rw_init(&bp->bif_lock, "bpf interface lock");
2312 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
2313 *driverp = bp;
2314
2315 BPF_LOCK();
2316 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
2317 BPF_UNLOCK();
2318
2319 /*
2320 * Compute the length of the bpf header. This is not necessarily
2321 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
2322 * that the network layer header begins on a longword boundary (for
2323 * performance reasons and to alleviate alignment restrictions).
2324 */
2325 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
2326
2327 if (bootverbose)
2328 if_printf(ifp, "bpf attached\n");
2329 }
2330
2331 /*
2332 * Detach bpf from an interface. This involves detaching each descriptor
2333 * associated with the interface. Notify each descriptor as it's detached
2334 * so that any sleepers wake up and get ENXIO.
2335 */
2336 void
2337 bpfdetach(struct ifnet *ifp)
2338 {
2339 struct bpf_if *bp;
2340 struct bpf_d *d;
2341 #ifdef INVARIANTS
2342 int ndetached;
2343
2344 ndetached = 0;
2345 #endif
2346
2347 BPF_LOCK();
2348 /* Find all bpf_if struct's which reference ifp and detach them. */
2349 do {
2350 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2351 if (ifp == bp->bif_ifp)
2352 break;
2353 }
2354 if (bp != NULL)
2355 LIST_REMOVE(bp, bif_next);
2356
2357 if (bp != NULL) {
2358 #ifdef INVARIANTS
2359 ndetached++;
2360 #endif
2361 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
2362 bpf_detachd_locked(d);
2363 BPFD_LOCK(d);
2364 bpf_wakeup(d);
2365 BPFD_UNLOCK(d);
2366 }
2367 /* Free writer-only descriptors */
2368 while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) {
2369 bpf_detachd_locked(d);
2370 BPFD_LOCK(d);
2371 bpf_wakeup(d);
2372 BPFD_UNLOCK(d);
2373 }
2374
2375 /*
2376 * Delay freing bp till interface is detached
2377 * and all routes through this interface are removed.
2378 * Mark bp as detached to restrict new consumers.
2379 */
2380 BPFIF_WLOCK(bp);
2381 bp->flags |= BPFIF_FLAG_DYING;
2382 BPFIF_WUNLOCK(bp);
2383 }
2384 } while (bp != NULL);
2385 BPF_UNLOCK();
2386
2387 #ifdef INVARIANTS
2388 if (ndetached == 0)
2389 printf("bpfdetach: %s was not attached\n", ifp->if_xname);
2390 #endif
2391 }
2392
2393 /*
2394 * Interface departure handler.
2395 * Note departure event does not guarantee interface is going down.
2396 */
2397 static void
2398 bpf_ifdetach(void *arg __unused, struct ifnet *ifp)
2399 {
2400 struct bpf_if *bp;
2401
2402 BPF_LOCK();
2403 if ((bp = ifp->if_bpf) == NULL) {
2404 BPF_UNLOCK();
2405 return;
2406 }
2407
2408 /* Check if bpfdetach() was called previously */
2409 if ((bp->flags & BPFIF_FLAG_DYING) == 0) {
2410 BPF_UNLOCK();
2411 return;
2412 }
2413
2414 CTR3(KTR_NET, "%s: freing BPF instance %p for interface %p",
2415 __func__, bp, ifp);
2416
2417 ifp->if_bpf = NULL;
2418 BPF_UNLOCK();
2419
2420 rw_destroy(&bp->bif_lock);
2421 free(bp, M_BPF);
2422 }
2423
2424 /*
2425 * Get a list of available data link type of the interface.
2426 */
2427 static int
2428 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2429 {
2430 int n, error;
2431 struct ifnet *ifp;
2432 struct bpf_if *bp;
2433
2434 BPF_LOCK_ASSERT();
2435
2436 ifp = d->bd_bif->bif_ifp;
2437 n = 0;
2438 error = 0;
2439 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2440 if (bp->bif_ifp != ifp)
2441 continue;
2442 if (bfl->bfl_list != NULL) {
2443 if (n >= bfl->bfl_len)
2444 return (ENOMEM);
2445 error = copyout(&bp->bif_dlt,
2446 bfl->bfl_list + n, sizeof(u_int));
2447 }
2448 n++;
2449 }
2450 bfl->bfl_len = n;
2451 return (error);
2452 }
2453
2454 /*
2455 * Set the data link type of a BPF instance.
2456 */
2457 static int
2458 bpf_setdlt(struct bpf_d *d, u_int dlt)
2459 {
2460 int error, opromisc;
2461 struct ifnet *ifp;
2462 struct bpf_if *bp;
2463
2464 BPF_LOCK_ASSERT();
2465
2466 if (d->bd_bif->bif_dlt == dlt)
2467 return (0);
2468 ifp = d->bd_bif->bif_ifp;
2469
2470 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2471 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2472 break;
2473 }
2474
2475 if (bp != NULL) {
2476 opromisc = d->bd_promisc;
2477 bpf_attachd(d, bp);
2478 BPFD_LOCK(d);
2479 reset_d(d);
2480 BPFD_UNLOCK(d);
2481 if (opromisc) {
2482 error = ifpromisc(bp->bif_ifp, 1);
2483 if (error)
2484 if_printf(bp->bif_ifp,
2485 "bpf_setdlt: ifpromisc failed (%d)\n",
2486 error);
2487 else
2488 d->bd_promisc = 1;
2489 }
2490 }
2491 return (bp == NULL ? EINVAL : 0);
2492 }
2493
2494 static void
2495 bpf_drvinit(void *unused)
2496 {
2497 struct cdev *dev;
2498
2499 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
2500 LIST_INIT(&bpf_iflist);
2501
2502 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2503 /* For compatibility */
2504 make_dev_alias(dev, "bpf0");
2505
2506 /* Register interface departure handler */
2507 bpf_ifdetach_cookie = EVENTHANDLER_REGISTER(
2508 ifnet_departure_event, bpf_ifdetach, NULL,
2509 EVENTHANDLER_PRI_ANY);
2510 }
2511
2512 /*
2513 * Zero out the various packet counters associated with all of the bpf
2514 * descriptors. At some point, we will probably want to get a bit more
2515 * granular and allow the user to specify descriptors to be zeroed.
2516 */
2517 static void
2518 bpf_zero_counters(void)
2519 {
2520 struct bpf_if *bp;
2521 struct bpf_d *bd;
2522
2523 BPF_LOCK();
2524 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2525 BPFIF_RLOCK(bp);
2526 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2527 BPFD_LOCK(bd);
2528 bd->bd_rcount = 0;
2529 bd->bd_dcount = 0;
2530 bd->bd_fcount = 0;
2531 bd->bd_wcount = 0;
2532 bd->bd_wfcount = 0;
2533 bd->bd_zcopy = 0;
2534 BPFD_UNLOCK(bd);
2535 }
2536 BPFIF_RUNLOCK(bp);
2537 }
2538 BPF_UNLOCK();
2539 }
2540
2541 /*
2542 * Fill filter statistics
2543 */
2544 static void
2545 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2546 {
2547
2548 bzero(d, sizeof(*d));
2549 BPFD_LOCK_ASSERT(bd);
2550 d->bd_structsize = sizeof(*d);
2551 /* XXX: reading should be protected by global lock */
2552 d->bd_immediate = bd->bd_immediate;
2553 d->bd_promisc = bd->bd_promisc;
2554 d->bd_hdrcmplt = bd->bd_hdrcmplt;
2555 d->bd_direction = bd->bd_direction;
2556 d->bd_feedback = bd->bd_feedback;
2557 d->bd_async = bd->bd_async;
2558 d->bd_rcount = bd->bd_rcount;
2559 d->bd_dcount = bd->bd_dcount;
2560 d->bd_fcount = bd->bd_fcount;
2561 d->bd_sig = bd->bd_sig;
2562 d->bd_slen = bd->bd_slen;
2563 d->bd_hlen = bd->bd_hlen;
2564 d->bd_bufsize = bd->bd_bufsize;
2565 d->bd_pid = bd->bd_pid;
2566 strlcpy(d->bd_ifname,
2567 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2568 d->bd_locked = bd->bd_locked;
2569 d->bd_wcount = bd->bd_wcount;
2570 d->bd_wdcount = bd->bd_wdcount;
2571 d->bd_wfcount = bd->bd_wfcount;
2572 d->bd_zcopy = bd->bd_zcopy;
2573 d->bd_bufmode = bd->bd_bufmode;
2574 }
2575
2576 /*
2577 * Handle `netstat -B' stats request
2578 */
2579 static int
2580 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2581 {
2582 struct xbpf_d *xbdbuf, *xbd, zerostats;
2583 int index, error;
2584 struct bpf_if *bp;
2585 struct bpf_d *bd;
2586
2587 /*
2588 * XXX This is not technically correct. It is possible for non
2589 * privileged users to open bpf devices. It would make sense
2590 * if the users who opened the devices were able to retrieve
2591 * the statistics for them, too.
2592 */
2593 error = priv_check(req->td, PRIV_NET_BPF);
2594 if (error)
2595 return (error);
2596 /*
2597 * Check to see if the user is requesting that the counters be
2598 * zeroed out. Explicitly check that the supplied data is zeroed,
2599 * as we aren't allowing the user to set the counters currently.
2600 */
2601 if (req->newptr != NULL) {
2602 if (req->newlen != sizeof(zerostats))
2603 return (EINVAL);
2604 bzero(&zerostats, sizeof(zerostats));
2605 xbd = req->newptr;
2606 if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0)
2607 return (EINVAL);
2608 bpf_zero_counters();
2609 return (0);
2610 }
2611 if (req->oldptr == NULL)
2612 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2613 if (bpf_bpfd_cnt == 0)
2614 return (SYSCTL_OUT(req, 0, 0));
2615 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2616 BPF_LOCK();
2617 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2618 BPF_UNLOCK();
2619 free(xbdbuf, M_BPF);
2620 return (ENOMEM);
2621 }
2622 index = 0;
2623 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2624 BPFIF_RLOCK(bp);
2625 /* Send writers-only first */
2626 LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
2627 xbd = &xbdbuf[index++];
2628 BPFD_LOCK(bd);
2629 bpfstats_fill_xbpf(xbd, bd);
2630 BPFD_UNLOCK(bd);
2631 }
2632 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2633 xbd = &xbdbuf[index++];
2634 BPFD_LOCK(bd);
2635 bpfstats_fill_xbpf(xbd, bd);
2636 BPFD_UNLOCK(bd);
2637 }
2638 BPFIF_RUNLOCK(bp);
2639 }
2640 BPF_UNLOCK();
2641 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2642 free(xbdbuf, M_BPF);
2643 return (error);
2644 }
2645
2646 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
2647
2648 #else /* !DEV_BPF && !NETGRAPH_BPF */
2649 /*
2650 * NOP stubs to allow bpf-using drivers to load and function.
2651 *
2652 * A 'better' implementation would allow the core bpf functionality
2653 * to be loaded at runtime.
2654 */
2655 static struct bpf_if bp_null;
2656
2657 void
2658 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2659 {
2660 }
2661
2662 void
2663 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2664 {
2665 }
2666
2667 void
2668 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
2669 {
2670 }
2671
2672 void
2673 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2674 {
2675
2676 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2677 }
2678
2679 void
2680 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2681 {
2682
2683 *driverp = &bp_null;
2684 }
2685
2686 void
2687 bpfdetach(struct ifnet *ifp)
2688 {
2689 }
2690
2691 u_int
2692 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
2693 {
2694 return -1; /* "no filter" behaviour */
2695 }
2696
2697 int
2698 bpf_validate(const struct bpf_insn *f, int len)
2699 {
2700 return 0; /* false */
2701 }
2702
2703 #endif /* !DEV_BPF && !NETGRAPH_BPF */
Cache object: 8bc6c2d485c4b3ec8e5934c1bf56e0b8
|