FreeBSD/Linux Kernel Cross Reference
sys/net/bpf.c
1 /*-
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
35 *
36 * $FreeBSD: releng/6.0/sys/net/bpf.c 150788 2005-10-01 18:43:16Z csjp $
37 */
38
39 #include "opt_bpf.h"
40 #include "opt_mac.h"
41 #include "opt_netgraph.h"
42
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/conf.h>
47 #include <sys/fcntl.h>
48 #include <sys/mac.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/time.h>
52 #include <sys/proc.h>
53 #include <sys/signalvar.h>
54 #include <sys/filio.h>
55 #include <sys/sockio.h>
56 #include <sys/ttycom.h>
57 #include <sys/uio.h>
58
59 #include <sys/event.h>
60 #include <sys/file.h>
61 #include <sys/poll.h>
62 #include <sys/proc.h>
63
64 #include <sys/socket.h>
65
66 #include <net/if.h>
67 #include <net/bpf.h>
68 #include <net/bpfdesc.h>
69
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <sys/kernel.h>
73 #include <sys/sysctl.h>
74
75 static MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
76
77 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
78
79 #define PRINET 26 /* interruptible */
80
81 /*
82 * bpf_iflist is a list of BPF interface structures, each corresponding to a
83 * specific DLT. The same network interface might have several BPF interface
84 * structures registered by different layers in the stack (i.e., 802.11
85 * frames, ethernet frames, etc).
86 */
87 static LIST_HEAD(, bpf_if) bpf_iflist;
88 static struct mtx bpf_mtx; /* bpf global lock */
89 static int bpf_bpfd_cnt;
90
91 static int bpf_allocbufs(struct bpf_d *);
92 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
93 static void bpf_detachd(struct bpf_d *d);
94 static void bpf_freed(struct bpf_d *);
95 static void bpf_mcopy(const void *, void *, size_t);
96 static int bpf_movein(struct uio *, int, int,
97 struct mbuf **, struct sockaddr *, struct bpf_insn *);
98 static int bpf_setif(struct bpf_d *, struct ifreq *);
99 static void bpf_timed_out(void *);
100 static __inline void
101 bpf_wakeup(struct bpf_d *);
102 static void catchpacket(struct bpf_d *, u_char *, u_int,
103 u_int, void (*)(const void *, void *, size_t));
104 static void reset_d(struct bpf_d *);
105 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
106 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
107 static int bpf_setdlt(struct bpf_d *, u_int);
108 static void filt_bpfdetach(struct knote *);
109 static int filt_bpfread(struct knote *, long);
110 static void bpf_drvinit(void *);
111 static void bpf_clone(void *, struct ucred *, char *, int, struct cdev **);
112 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
113
114 /*
115 * The default read buffer size is patchable.
116 */
117 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
118 static int bpf_bufsize = 4096;
119 SYSCTL_INT(_net_bpf, OID_AUTO, bufsize, CTLFLAG_RW,
120 &bpf_bufsize, 0, "");
121 static int bpf_maxbufsize = BPF_MAXBUFSIZE;
122 SYSCTL_INT(_net_bpf, OID_AUTO, maxbufsize, CTLFLAG_RW,
123 &bpf_maxbufsize, 0, "");
124 static int bpf_maxinsns = BPF_MAXINSNS;
125 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
126 &bpf_maxinsns, 0, "Maximum bpf program instructions");
127 SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_RW,
128 bpf_stats_sysctl, "bpf statistics portal");
129
130 static d_open_t bpfopen;
131 static d_close_t bpfclose;
132 static d_read_t bpfread;
133 static d_write_t bpfwrite;
134 static d_ioctl_t bpfioctl;
135 static d_poll_t bpfpoll;
136 static d_kqfilter_t bpfkqfilter;
137
138 static struct cdevsw bpf_cdevsw = {
139 .d_version = D_VERSION,
140 .d_flags = D_NEEDGIANT,
141 .d_open = bpfopen,
142 .d_close = bpfclose,
143 .d_read = bpfread,
144 .d_write = bpfwrite,
145 .d_ioctl = bpfioctl,
146 .d_poll = bpfpoll,
147 .d_name = "bpf",
148 .d_kqfilter = bpfkqfilter,
149 };
150
151 static struct filterops bpfread_filtops =
152 { 1, NULL, filt_bpfdetach, filt_bpfread };
153
154 static int
155 bpf_movein(uio, linktype, mtu, mp, sockp, wfilter)
156 struct uio *uio;
157 int linktype;
158 int mtu;
159 struct mbuf **mp;
160 struct sockaddr *sockp;
161 struct bpf_insn *wfilter;
162 {
163 struct mbuf *m;
164 int error;
165 int len;
166 int hlen;
167 int slen;
168
169 /*
170 * Build a sockaddr based on the data link layer type.
171 * We do this at this level because the ethernet header
172 * is copied directly into the data field of the sockaddr.
173 * In the case of SLIP, there is no header and the packet
174 * is forwarded as is.
175 * Also, we are careful to leave room at the front of the mbuf
176 * for the link level header.
177 */
178 switch (linktype) {
179
180 case DLT_SLIP:
181 sockp->sa_family = AF_INET;
182 hlen = 0;
183 break;
184
185 case DLT_EN10MB:
186 sockp->sa_family = AF_UNSPEC;
187 /* XXX Would MAXLINKHDR be better? */
188 hlen = ETHER_HDR_LEN;
189 break;
190
191 case DLT_FDDI:
192 sockp->sa_family = AF_IMPLINK;
193 hlen = 0;
194 break;
195
196 case DLT_RAW:
197 sockp->sa_family = AF_UNSPEC;
198 hlen = 0;
199 break;
200
201 case DLT_NULL:
202 /*
203 * null interface types require a 4 byte pseudo header which
204 * corresponds to the address family of the packet.
205 */
206 sockp->sa_family = AF_UNSPEC;
207 hlen = 4;
208 break;
209
210 case DLT_ATM_RFC1483:
211 /*
212 * en atm driver requires 4-byte atm pseudo header.
213 * though it isn't standard, vpi:vci needs to be
214 * specified anyway.
215 */
216 sockp->sa_family = AF_UNSPEC;
217 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
218 break;
219
220 case DLT_PPP:
221 sockp->sa_family = AF_UNSPEC;
222 hlen = 4; /* This should match PPP_HDRLEN */
223 break;
224
225 default:
226 return (EIO);
227 }
228
229 len = uio->uio_resid;
230
231 if (len - hlen > mtu)
232 return (EMSGSIZE);
233
234 if ((unsigned)len > MCLBYTES)
235 return (EIO);
236
237 if (len > MHLEN) {
238 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
239 } else {
240 MGETHDR(m, M_TRYWAIT, MT_DATA);
241 }
242 if (m == NULL)
243 return (ENOBUFS);
244 m->m_pkthdr.len = m->m_len = len;
245 m->m_pkthdr.rcvif = NULL;
246 *mp = m;
247
248 if (m->m_len < hlen) {
249 error = EPERM;
250 goto bad;
251 }
252
253 error = uiomove(mtod(m, u_char *), len, uio);
254 if (error)
255 goto bad;
256
257 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
258 if (slen == 0) {
259 error = EPERM;
260 goto bad;
261 }
262
263 /*
264 * Make room for link header, and copy it to sockaddr
265 */
266 if (hlen != 0) {
267 bcopy(m->m_data, sockp->sa_data, hlen);
268 m->m_pkthdr.len -= hlen;
269 m->m_len -= hlen;
270 #if BSD >= 199103
271 m->m_data += hlen; /* XXX */
272 #else
273 m->m_off += hlen;
274 #endif
275 }
276
277 return (0);
278 bad:
279 m_freem(m);
280 return (error);
281 }
282
283 /*
284 * Attach file to the bpf interface, i.e. make d listen on bp.
285 */
286 static void
287 bpf_attachd(d, bp)
288 struct bpf_d *d;
289 struct bpf_if *bp;
290 {
291 /*
292 * Point d at bp, and add d to the interface's list of listeners.
293 * Finally, point the driver's bpf cookie at the interface so
294 * it will divert packets to bpf.
295 */
296 BPFIF_LOCK(bp);
297 d->bd_bif = bp;
298 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
299
300 bpf_bpfd_cnt++;
301 *bp->bif_driverp = bp;
302 BPFIF_UNLOCK(bp);
303 }
304
305 /*
306 * Detach a file from its interface.
307 */
308 static void
309 bpf_detachd(d)
310 struct bpf_d *d;
311 {
312 int error;
313 struct bpf_if *bp;
314 struct ifnet *ifp;
315
316 bp = d->bd_bif;
317 BPFIF_LOCK(bp);
318 BPFD_LOCK(d);
319 ifp = d->bd_bif->bif_ifp;
320
321 /*
322 * Remove d from the interface's descriptor list.
323 */
324 LIST_REMOVE(d, bd_next);
325
326 bpf_bpfd_cnt--;
327 /*
328 * Let the driver know that there are no more listeners.
329 */
330 if (LIST_EMPTY(&bp->bif_dlist))
331 *bp->bif_driverp = NULL;
332
333 d->bd_bif = NULL;
334 BPFD_UNLOCK(d);
335 BPFIF_UNLOCK(bp);
336
337 /*
338 * Check if this descriptor had requested promiscuous mode.
339 * If so, turn it off.
340 */
341 if (d->bd_promisc) {
342 d->bd_promisc = 0;
343 error = ifpromisc(ifp, 0);
344 if (error != 0 && error != ENXIO) {
345 /*
346 * ENXIO can happen if a pccard is unplugged
347 * Something is really wrong if we were able to put
348 * the driver into promiscuous mode, but can't
349 * take it out.
350 */
351 if_printf(bp->bif_ifp,
352 "bpf_detach: ifpromisc failed (%d)\n", error);
353 }
354 }
355 }
356
357 /*
358 * Open ethernet device. Returns ENXIO for illegal minor device number,
359 * EBUSY if file is open by another process.
360 */
361 /* ARGSUSED */
362 static int
363 bpfopen(dev, flags, fmt, td)
364 struct cdev *dev;
365 int flags;
366 int fmt;
367 struct thread *td;
368 {
369 struct bpf_d *d;
370
371 mtx_lock(&bpf_mtx);
372 d = dev->si_drv1;
373 /*
374 * Each minor can be opened by only one process. If the requested
375 * minor is in use, return EBUSY.
376 */
377 if (d != NULL) {
378 mtx_unlock(&bpf_mtx);
379 return (EBUSY);
380 }
381 dev->si_drv1 = (struct bpf_d *)~0; /* mark device in use */
382 mtx_unlock(&bpf_mtx);
383
384 if ((dev->si_flags & SI_NAMED) == 0)
385 make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600,
386 "bpf%d", dev2unit(dev));
387 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
388 dev->si_drv1 = d;
389 d->bd_bufsize = bpf_bufsize;
390 d->bd_sig = SIGIO;
391 d->bd_seesent = 1;
392 d->bd_pid = td->td_proc->p_pid;
393 strlcpy(d->bd_pcomm, td->td_proc->p_comm, MAXCOMLEN);
394 #ifdef MAC
395 mac_init_bpfdesc(d);
396 mac_create_bpfdesc(td->td_ucred, d);
397 #endif
398 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
399 callout_init(&d->bd_callout, NET_CALLOUT_MPSAFE);
400 knlist_init(&d->bd_sel.si_note, &d->bd_mtx, NULL, NULL, NULL);
401
402 return (0);
403 }
404
405 /*
406 * Close the descriptor by detaching it from its interface,
407 * deallocating its buffers, and marking it free.
408 */
409 /* ARGSUSED */
410 static int
411 bpfclose(dev, flags, fmt, td)
412 struct cdev *dev;
413 int flags;
414 int fmt;
415 struct thread *td;
416 {
417 struct bpf_d *d = dev->si_drv1;
418
419 BPFD_LOCK(d);
420 if (d->bd_state == BPF_WAITING)
421 callout_stop(&d->bd_callout);
422 d->bd_state = BPF_IDLE;
423 BPFD_UNLOCK(d);
424 funsetown(&d->bd_sigio);
425 mtx_lock(&bpf_mtx);
426 if (d->bd_bif)
427 bpf_detachd(d);
428 mtx_unlock(&bpf_mtx);
429 selwakeuppri(&d->bd_sel, PRINET);
430 #ifdef MAC
431 mac_destroy_bpfdesc(d);
432 #endif /* MAC */
433 knlist_destroy(&d->bd_sel.si_note);
434 bpf_freed(d);
435 dev->si_drv1 = NULL;
436 free(d, M_BPF);
437
438 return (0);
439 }
440
441
442 /*
443 * Rotate the packet buffers in descriptor d. Move the store buffer
444 * into the hold slot, and the free buffer into the store slot.
445 * Zero the length of the new store buffer.
446 */
447 #define ROTATE_BUFFERS(d) \
448 (d)->bd_hbuf = (d)->bd_sbuf; \
449 (d)->bd_hlen = (d)->bd_slen; \
450 (d)->bd_sbuf = (d)->bd_fbuf; \
451 (d)->bd_slen = 0; \
452 (d)->bd_fbuf = NULL;
453 /*
454 * bpfread - read next chunk of packets from buffers
455 */
456 static int
457 bpfread(dev, uio, ioflag)
458 struct cdev *dev;
459 struct uio *uio;
460 int ioflag;
461 {
462 struct bpf_d *d = dev->si_drv1;
463 int timed_out;
464 int error;
465
466 /*
467 * Restrict application to use a buffer the same size as
468 * as kernel buffers.
469 */
470 if (uio->uio_resid != d->bd_bufsize)
471 return (EINVAL);
472
473 BPFD_LOCK(d);
474 if (d->bd_state == BPF_WAITING)
475 callout_stop(&d->bd_callout);
476 timed_out = (d->bd_state == BPF_TIMED_OUT);
477 d->bd_state = BPF_IDLE;
478 /*
479 * If the hold buffer is empty, then do a timed sleep, which
480 * ends when the timeout expires or when enough packets
481 * have arrived to fill the store buffer.
482 */
483 while (d->bd_hbuf == NULL) {
484 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
485 /*
486 * A packet(s) either arrived since the previous
487 * read or arrived while we were asleep.
488 * Rotate the buffers and return what's here.
489 */
490 ROTATE_BUFFERS(d);
491 break;
492 }
493
494 /*
495 * No data is available, check to see if the bpf device
496 * is still pointed at a real interface. If not, return
497 * ENXIO so that the userland process knows to rebind
498 * it before using it again.
499 */
500 if (d->bd_bif == NULL) {
501 BPFD_UNLOCK(d);
502 return (ENXIO);
503 }
504
505 if (ioflag & O_NONBLOCK) {
506 BPFD_UNLOCK(d);
507 return (EWOULDBLOCK);
508 }
509 error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
510 "bpf", d->bd_rtout);
511 if (error == EINTR || error == ERESTART) {
512 BPFD_UNLOCK(d);
513 return (error);
514 }
515 if (error == EWOULDBLOCK) {
516 /*
517 * On a timeout, return what's in the buffer,
518 * which may be nothing. If there is something
519 * in the store buffer, we can rotate the buffers.
520 */
521 if (d->bd_hbuf)
522 /*
523 * We filled up the buffer in between
524 * getting the timeout and arriving
525 * here, so we don't need to rotate.
526 */
527 break;
528
529 if (d->bd_slen == 0) {
530 BPFD_UNLOCK(d);
531 return (0);
532 }
533 ROTATE_BUFFERS(d);
534 break;
535 }
536 }
537 /*
538 * At this point, we know we have something in the hold slot.
539 */
540 BPFD_UNLOCK(d);
541
542 /*
543 * Move data from hold buffer into user space.
544 * We know the entire buffer is transferred since
545 * we checked above that the read buffer is bpf_bufsize bytes.
546 */
547 error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
548
549 BPFD_LOCK(d);
550 d->bd_fbuf = d->bd_hbuf;
551 d->bd_hbuf = NULL;
552 d->bd_hlen = 0;
553 BPFD_UNLOCK(d);
554
555 return (error);
556 }
557
558
559 /*
560 * If there are processes sleeping on this descriptor, wake them up.
561 */
562 static __inline void
563 bpf_wakeup(d)
564 struct bpf_d *d;
565 {
566
567 BPFD_LOCK_ASSERT(d);
568 if (d->bd_state == BPF_WAITING) {
569 callout_stop(&d->bd_callout);
570 d->bd_state = BPF_IDLE;
571 }
572 wakeup(d);
573 if (d->bd_async && d->bd_sig && d->bd_sigio)
574 pgsigio(&d->bd_sigio, d->bd_sig, 0);
575
576 selwakeuppri(&d->bd_sel, PRINET);
577 KNOTE_LOCKED(&d->bd_sel.si_note, 0);
578 }
579
580 static void
581 bpf_timed_out(arg)
582 void *arg;
583 {
584 struct bpf_d *d = (struct bpf_d *)arg;
585
586 BPFD_LOCK(d);
587 if (d->bd_state == BPF_WAITING) {
588 d->bd_state = BPF_TIMED_OUT;
589 if (d->bd_slen != 0)
590 bpf_wakeup(d);
591 }
592 BPFD_UNLOCK(d);
593 }
594
595 static int
596 bpfwrite(dev, uio, ioflag)
597 struct cdev *dev;
598 struct uio *uio;
599 int ioflag;
600 {
601 struct bpf_d *d = dev->si_drv1;
602 struct ifnet *ifp;
603 struct mbuf *m;
604 int error;
605 struct sockaddr dst;
606
607 if (d->bd_bif == NULL)
608 return (ENXIO);
609
610 ifp = d->bd_bif->bif_ifp;
611
612 if ((ifp->if_flags & IFF_UP) == 0)
613 return (ENETDOWN);
614
615 if (uio->uio_resid == 0)
616 return (0);
617
618 bzero(&dst, sizeof(dst));
619 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu,
620 &m, &dst, d->bd_wfilter);
621 if (error)
622 return (error);
623
624 if (d->bd_hdrcmplt)
625 dst.sa_family = pseudo_AF_HDRCMPLT;
626
627 #ifdef MAC
628 BPFD_LOCK(d);
629 mac_create_mbuf_from_bpfdesc(d, m);
630 BPFD_UNLOCK(d);
631 #endif
632 NET_LOCK_GIANT();
633 error = (*ifp->if_output)(ifp, m, &dst, NULL);
634 NET_UNLOCK_GIANT();
635 /*
636 * The driver frees the mbuf.
637 */
638 return (error);
639 }
640
641 /*
642 * Reset a descriptor by flushing its packet buffer and clearing the
643 * receive and drop counts.
644 */
645 static void
646 reset_d(d)
647 struct bpf_d *d;
648 {
649
650 mtx_assert(&d->bd_mtx, MA_OWNED);
651 if (d->bd_hbuf) {
652 /* Free the hold buffer. */
653 d->bd_fbuf = d->bd_hbuf;
654 d->bd_hbuf = NULL;
655 }
656 d->bd_slen = 0;
657 d->bd_hlen = 0;
658 d->bd_rcount = 0;
659 d->bd_dcount = 0;
660 d->bd_fcount = 0;
661 }
662
663 /*
664 * FIONREAD Check for read packet available.
665 * SIOCGIFADDR Get interface address - convenient hook to driver.
666 * BIOCGBLEN Get buffer len [for read()].
667 * BIOCSETF Set ethernet read filter.
668 * BIOCSETWF Set ethernet write filter.
669 * BIOCFLUSH Flush read packet buffer.
670 * BIOCPROMISC Put interface into promiscuous mode.
671 * BIOCGDLT Get link layer type.
672 * BIOCGETIF Get interface name.
673 * BIOCSETIF Set interface.
674 * BIOCSRTIMEOUT Set read timeout.
675 * BIOCGRTIMEOUT Get read timeout.
676 * BIOCGSTATS Get packet stats.
677 * BIOCIMMEDIATE Set immediate mode.
678 * BIOCVERSION Get filter language version.
679 * BIOCGHDRCMPLT Get "header already complete" flag
680 * BIOCSHDRCMPLT Set "header already complete" flag
681 * BIOCGSEESENT Get "see packets sent" flag
682 * BIOCSSEESENT Set "see packets sent" flag
683 * BIOCLOCK Set "locked" flag
684 */
685 /* ARGSUSED */
686 static int
687 bpfioctl(dev, cmd, addr, flags, td)
688 struct cdev *dev;
689 u_long cmd;
690 caddr_t addr;
691 int flags;
692 struct thread *td;
693 {
694 struct bpf_d *d = dev->si_drv1;
695 int error = 0;
696
697 /*
698 * Refresh PID associated with this descriptor.
699 */
700 d->bd_pid = td->td_proc->p_pid;
701 BPFD_LOCK(d);
702 if (d->bd_state == BPF_WAITING)
703 callout_stop(&d->bd_callout);
704 d->bd_state = BPF_IDLE;
705 BPFD_UNLOCK(d);
706
707 if (d->bd_locked == 1) {
708 switch (cmd) {
709 case BIOCGBLEN:
710 case BIOCFLUSH:
711 case BIOCGDLT:
712 case BIOCGDLTLIST:
713 case BIOCGETIF:
714 case BIOCGRTIMEOUT:
715 case BIOCGSTATS:
716 case BIOCVERSION:
717 case BIOCGRSIG:
718 case BIOCGHDRCMPLT:
719 case FIONREAD:
720 case BIOCLOCK:
721 case BIOCSRTIMEOUT:
722 case BIOCIMMEDIATE:
723 case TIOCGPGRP:
724 break;
725 default:
726 return (EPERM);
727 }
728 }
729 switch (cmd) {
730
731 default:
732 error = EINVAL;
733 break;
734
735 /*
736 * Check for read packet available.
737 */
738 case FIONREAD:
739 {
740 int n;
741
742 BPFD_LOCK(d);
743 n = d->bd_slen;
744 if (d->bd_hbuf)
745 n += d->bd_hlen;
746 BPFD_UNLOCK(d);
747
748 *(int *)addr = n;
749 break;
750 }
751
752 case SIOCGIFADDR:
753 {
754 struct ifnet *ifp;
755
756 if (d->bd_bif == NULL)
757 error = EINVAL;
758 else {
759 ifp = d->bd_bif->bif_ifp;
760 error = (*ifp->if_ioctl)(ifp, cmd, addr);
761 }
762 break;
763 }
764
765 /*
766 * Get buffer len [for read()].
767 */
768 case BIOCGBLEN:
769 *(u_int *)addr = d->bd_bufsize;
770 break;
771
772 /*
773 * Set buffer length.
774 */
775 case BIOCSBLEN:
776 if (d->bd_bif != NULL)
777 error = EINVAL;
778 else {
779 u_int size = *(u_int *)addr;
780
781 if (size > bpf_maxbufsize)
782 *(u_int *)addr = size = bpf_maxbufsize;
783 else if (size < BPF_MINBUFSIZE)
784 *(u_int *)addr = size = BPF_MINBUFSIZE;
785 d->bd_bufsize = size;
786 }
787 break;
788
789 /*
790 * Set link layer read filter.
791 */
792 case BIOCSETF:
793 case BIOCSETWF:
794 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
795 break;
796
797 /*
798 * Flush read packet buffer.
799 */
800 case BIOCFLUSH:
801 BPFD_LOCK(d);
802 reset_d(d);
803 BPFD_UNLOCK(d);
804 break;
805
806 /*
807 * Put interface into promiscuous mode.
808 */
809 case BIOCPROMISC:
810 if (d->bd_bif == NULL) {
811 /*
812 * No interface attached yet.
813 */
814 error = EINVAL;
815 break;
816 }
817 if (d->bd_promisc == 0) {
818 mtx_lock(&Giant);
819 error = ifpromisc(d->bd_bif->bif_ifp, 1);
820 mtx_unlock(&Giant);
821 if (error == 0)
822 d->bd_promisc = 1;
823 }
824 break;
825
826 /*
827 * Get current data link type.
828 */
829 case BIOCGDLT:
830 if (d->bd_bif == NULL)
831 error = EINVAL;
832 else
833 *(u_int *)addr = d->bd_bif->bif_dlt;
834 break;
835
836 /*
837 * Get a list of supported data link types.
838 */
839 case BIOCGDLTLIST:
840 if (d->bd_bif == NULL)
841 error = EINVAL;
842 else
843 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
844 break;
845
846 /*
847 * Set data link type.
848 */
849 case BIOCSDLT:
850 if (d->bd_bif == NULL)
851 error = EINVAL;
852 else
853 error = bpf_setdlt(d, *(u_int *)addr);
854 break;
855
856 /*
857 * Get interface name.
858 */
859 case BIOCGETIF:
860 if (d->bd_bif == NULL)
861 error = EINVAL;
862 else {
863 struct ifnet *const ifp = d->bd_bif->bif_ifp;
864 struct ifreq *const ifr = (struct ifreq *)addr;
865
866 strlcpy(ifr->ifr_name, ifp->if_xname,
867 sizeof(ifr->ifr_name));
868 }
869 break;
870
871 /*
872 * Set interface.
873 */
874 case BIOCSETIF:
875 error = bpf_setif(d, (struct ifreq *)addr);
876 break;
877
878 /*
879 * Set read timeout.
880 */
881 case BIOCSRTIMEOUT:
882 {
883 struct timeval *tv = (struct timeval *)addr;
884
885 /*
886 * Subtract 1 tick from tvtohz() since this isn't
887 * a one-shot timer.
888 */
889 if ((error = itimerfix(tv)) == 0)
890 d->bd_rtout = tvtohz(tv) - 1;
891 break;
892 }
893
894 /*
895 * Get read timeout.
896 */
897 case BIOCGRTIMEOUT:
898 {
899 struct timeval *tv = (struct timeval *)addr;
900
901 tv->tv_sec = d->bd_rtout / hz;
902 tv->tv_usec = (d->bd_rtout % hz) * tick;
903 break;
904 }
905
906 /*
907 * Get packet stats.
908 */
909 case BIOCGSTATS:
910 {
911 struct bpf_stat *bs = (struct bpf_stat *)addr;
912
913 bs->bs_recv = d->bd_rcount;
914 bs->bs_drop = d->bd_dcount;
915 break;
916 }
917
918 /*
919 * Set immediate mode.
920 */
921 case BIOCIMMEDIATE:
922 d->bd_immediate = *(u_int *)addr;
923 break;
924
925 case BIOCVERSION:
926 {
927 struct bpf_version *bv = (struct bpf_version *)addr;
928
929 bv->bv_major = BPF_MAJOR_VERSION;
930 bv->bv_minor = BPF_MINOR_VERSION;
931 break;
932 }
933
934 /*
935 * Get "header already complete" flag
936 */
937 case BIOCGHDRCMPLT:
938 *(u_int *)addr = d->bd_hdrcmplt;
939 break;
940
941 case BIOCLOCK:
942 d->bd_locked = 1;
943 break;
944 /*
945 * Set "header already complete" flag
946 */
947 case BIOCSHDRCMPLT:
948 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
949 break;
950
951 /*
952 * Get "see sent packets" flag
953 */
954 case BIOCGSEESENT:
955 *(u_int *)addr = d->bd_seesent;
956 break;
957
958 /*
959 * Set "see sent packets" flag
960 */
961 case BIOCSSEESENT:
962 d->bd_seesent = *(u_int *)addr;
963 break;
964
965 case FIONBIO: /* Non-blocking I/O */
966 break;
967
968 case FIOASYNC: /* Send signal on receive packets */
969 d->bd_async = *(int *)addr;
970 break;
971
972 case FIOSETOWN:
973 error = fsetown(*(int *)addr, &d->bd_sigio);
974 break;
975
976 case FIOGETOWN:
977 *(int *)addr = fgetown(&d->bd_sigio);
978 break;
979
980 /* This is deprecated, FIOSETOWN should be used instead. */
981 case TIOCSPGRP:
982 error = fsetown(-(*(int *)addr), &d->bd_sigio);
983 break;
984
985 /* This is deprecated, FIOGETOWN should be used instead. */
986 case TIOCGPGRP:
987 *(int *)addr = -fgetown(&d->bd_sigio);
988 break;
989
990 case BIOCSRSIG: /* Set receive signal */
991 {
992 u_int sig;
993
994 sig = *(u_int *)addr;
995
996 if (sig >= NSIG)
997 error = EINVAL;
998 else
999 d->bd_sig = sig;
1000 break;
1001 }
1002 case BIOCGRSIG:
1003 *(u_int *)addr = d->bd_sig;
1004 break;
1005 }
1006 return (error);
1007 }
1008
1009 /*
1010 * Set d's packet filter program to fp. If this file already has a filter,
1011 * free it and replace it. Returns EINVAL for bogus requests.
1012 */
1013 static int
1014 bpf_setf(d, fp, cmd)
1015 struct bpf_d *d;
1016 struct bpf_program *fp;
1017 u_long cmd;
1018 {
1019 struct bpf_insn *fcode, *old;
1020 u_int wfilter, flen, size;
1021
1022 if (cmd == BIOCSETWF) {
1023 old = d->bd_wfilter;
1024 wfilter = 1;
1025 } else {
1026 wfilter = 0;
1027 old = d->bd_rfilter;
1028 }
1029 if (fp->bf_insns == NULL) {
1030 if (fp->bf_len != 0)
1031 return (EINVAL);
1032 BPFD_LOCK(d);
1033 if (wfilter)
1034 d->bd_wfilter = NULL;
1035 else
1036 d->bd_rfilter = NULL;
1037 reset_d(d);
1038 BPFD_UNLOCK(d);
1039 if (old != NULL)
1040 free((caddr_t)old, M_BPF);
1041 return (0);
1042 }
1043 flen = fp->bf_len;
1044 if (flen > bpf_maxinsns)
1045 return (EINVAL);
1046
1047 size = flen * sizeof(*fp->bf_insns);
1048 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
1049 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
1050 bpf_validate(fcode, (int)flen)) {
1051 BPFD_LOCK(d);
1052 if (wfilter)
1053 d->bd_wfilter = fcode;
1054 else
1055 d->bd_rfilter = fcode;
1056 reset_d(d);
1057 BPFD_UNLOCK(d);
1058 if (old != NULL)
1059 free((caddr_t)old, M_BPF);
1060
1061 return (0);
1062 }
1063 free((caddr_t)fcode, M_BPF);
1064 return (EINVAL);
1065 }
1066
1067 /*
1068 * Detach a file from its current interface (if attached at all) and attach
1069 * to the interface indicated by the name stored in ifr.
1070 * Return an errno or 0.
1071 */
1072 static int
1073 bpf_setif(d, ifr)
1074 struct bpf_d *d;
1075 struct ifreq *ifr;
1076 {
1077 struct bpf_if *bp;
1078 int error;
1079 struct ifnet *theywant;
1080
1081 theywant = ifunit(ifr->ifr_name);
1082 if (theywant == NULL)
1083 return ENXIO;
1084
1085 /*
1086 * Look through attached interfaces for the named one.
1087 */
1088 mtx_lock(&bpf_mtx);
1089 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1090 struct ifnet *ifp = bp->bif_ifp;
1091
1092 if (ifp == NULL || ifp != theywant)
1093 continue;
1094 /* skip additional entry */
1095 if (bp->bif_driverp != &ifp->if_bpf)
1096 continue;
1097
1098 mtx_unlock(&bpf_mtx);
1099 /*
1100 * We found the requested interface.
1101 * Allocate the packet buffers if we need to.
1102 * If we're already attached to requested interface,
1103 * just flush the buffer.
1104 */
1105 if (d->bd_sbuf == NULL) {
1106 error = bpf_allocbufs(d);
1107 if (error != 0)
1108 return (error);
1109 }
1110 if (bp != d->bd_bif) {
1111 if (d->bd_bif)
1112 /*
1113 * Detach if attached to something else.
1114 */
1115 bpf_detachd(d);
1116
1117 bpf_attachd(d, bp);
1118 }
1119 BPFD_LOCK(d);
1120 reset_d(d);
1121 BPFD_UNLOCK(d);
1122 return (0);
1123 }
1124 mtx_unlock(&bpf_mtx);
1125 /* Not found. */
1126 return (ENXIO);
1127 }
1128
1129 /*
1130 * Support for select() and poll() system calls
1131 *
1132 * Return true iff the specific operation will not block indefinitely.
1133 * Otherwise, return false but make a note that a selwakeup() must be done.
1134 */
1135 static int
1136 bpfpoll(dev, events, td)
1137 struct cdev *dev;
1138 int events;
1139 struct thread *td;
1140 {
1141 struct bpf_d *d;
1142 int revents;
1143
1144 d = dev->si_drv1;
1145 if (d->bd_bif == NULL)
1146 return (ENXIO);
1147
1148 /*
1149 * Refresh PID associated with this descriptor.
1150 */
1151 d->bd_pid = td->td_proc->p_pid;
1152 revents = events & (POLLOUT | POLLWRNORM);
1153 BPFD_LOCK(d);
1154 if (events & (POLLIN | POLLRDNORM)) {
1155 if (bpf_ready(d))
1156 revents |= events & (POLLIN | POLLRDNORM);
1157 else {
1158 selrecord(td, &d->bd_sel);
1159 /* Start the read timeout if necessary. */
1160 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1161 callout_reset(&d->bd_callout, d->bd_rtout,
1162 bpf_timed_out, d);
1163 d->bd_state = BPF_WAITING;
1164 }
1165 }
1166 }
1167 BPFD_UNLOCK(d);
1168 return (revents);
1169 }
1170
1171 /*
1172 * Support for kevent() system call. Register EVFILT_READ filters and
1173 * reject all others.
1174 */
1175 int
1176 bpfkqfilter(dev, kn)
1177 struct cdev *dev;
1178 struct knote *kn;
1179 {
1180 struct bpf_d *d = (struct bpf_d *)dev->si_drv1;
1181
1182 if (kn->kn_filter != EVFILT_READ)
1183 return (1);
1184
1185 /*
1186 * Refresh PID associated with this descriptor.
1187 */
1188 d->bd_pid = curthread->td_proc->p_pid;
1189 kn->kn_fop = &bpfread_filtops;
1190 kn->kn_hook = d;
1191 knlist_add(&d->bd_sel.si_note, kn, 0);
1192
1193 return (0);
1194 }
1195
1196 static void
1197 filt_bpfdetach(kn)
1198 struct knote *kn;
1199 {
1200 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1201
1202 knlist_remove(&d->bd_sel.si_note, kn, 0);
1203 }
1204
1205 static int
1206 filt_bpfread(kn, hint)
1207 struct knote *kn;
1208 long hint;
1209 {
1210 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1211 int ready;
1212
1213 BPFD_LOCK_ASSERT(d);
1214 ready = bpf_ready(d);
1215 if (ready) {
1216 kn->kn_data = d->bd_slen;
1217 if (d->bd_hbuf)
1218 kn->kn_data += d->bd_hlen;
1219 }
1220 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1221 callout_reset(&d->bd_callout, d->bd_rtout,
1222 bpf_timed_out, d);
1223 d->bd_state = BPF_WAITING;
1224 }
1225
1226 return (ready);
1227 }
1228
1229 /*
1230 * Incoming linkage from device drivers. Process the packet pkt, of length
1231 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1232 * by each process' filter, and if accepted, stashed into the corresponding
1233 * buffer.
1234 */
1235 void
1236 bpf_tap(bp, pkt, pktlen)
1237 struct bpf_if *bp;
1238 u_char *pkt;
1239 u_int pktlen;
1240 {
1241 struct bpf_d *d;
1242 u_int slen;
1243
1244 /*
1245 * Lockless read to avoid cost of locking the interface if there are
1246 * no descriptors attached.
1247 */
1248 if (LIST_EMPTY(&bp->bif_dlist))
1249 return;
1250
1251 BPFIF_LOCK(bp);
1252 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1253 BPFD_LOCK(d);
1254 ++d->bd_rcount;
1255 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
1256 if (slen != 0) {
1257 d->bd_fcount++;
1258 #ifdef MAC
1259 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1260 #endif
1261 catchpacket(d, pkt, pktlen, slen, bcopy);
1262 }
1263 BPFD_UNLOCK(d);
1264 }
1265 BPFIF_UNLOCK(bp);
1266 }
1267
1268 /*
1269 * Copy data from an mbuf chain into a buffer. This code is derived
1270 * from m_copydata in sys/uipc_mbuf.c.
1271 */
1272 static void
1273 bpf_mcopy(src_arg, dst_arg, len)
1274 const void *src_arg;
1275 void *dst_arg;
1276 size_t len;
1277 {
1278 const struct mbuf *m;
1279 u_int count;
1280 u_char *dst;
1281
1282 m = src_arg;
1283 dst = dst_arg;
1284 while (len > 0) {
1285 if (m == NULL)
1286 panic("bpf_mcopy");
1287 count = min(m->m_len, len);
1288 bcopy(mtod(m, void *), dst, count);
1289 m = m->m_next;
1290 dst += count;
1291 len -= count;
1292 }
1293 }
1294
1295 /*
1296 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1297 */
1298 void
1299 bpf_mtap(bp, m)
1300 struct bpf_if *bp;
1301 struct mbuf *m;
1302 {
1303 struct bpf_d *d;
1304 u_int pktlen, slen;
1305
1306 /*
1307 * Lockless read to avoid cost of locking the interface if there are
1308 * no descriptors attached.
1309 */
1310 if (LIST_EMPTY(&bp->bif_dlist))
1311 return;
1312
1313 pktlen = m_length(m, NULL);
1314
1315 BPFIF_LOCK(bp);
1316 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1317 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1318 continue;
1319 BPFD_LOCK(d);
1320 ++d->bd_rcount;
1321 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
1322 if (slen != 0) {
1323 d->bd_fcount++;
1324 #ifdef MAC
1325 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1326 #endif
1327 catchpacket(d, (u_char *)m, pktlen, slen,
1328 bpf_mcopy);
1329 }
1330 BPFD_UNLOCK(d);
1331 }
1332 BPFIF_UNLOCK(bp);
1333 }
1334
1335 /*
1336 * Incoming linkage from device drivers, when packet is in
1337 * an mbuf chain and to be prepended by a contiguous header.
1338 */
1339 void
1340 bpf_mtap2(bp, data, dlen, m)
1341 struct bpf_if *bp;
1342 void *data;
1343 u_int dlen;
1344 struct mbuf *m;
1345 {
1346 struct mbuf mb;
1347 struct bpf_d *d;
1348 u_int pktlen, slen;
1349
1350 /*
1351 * Lockless read to avoid cost of locking the interface if there are
1352 * no descriptors attached.
1353 */
1354 if (LIST_EMPTY(&bp->bif_dlist))
1355 return;
1356
1357 pktlen = m_length(m, NULL);
1358 /*
1359 * Craft on-stack mbuf suitable for passing to bpf_filter.
1360 * Note that we cut corners here; we only setup what's
1361 * absolutely needed--this mbuf should never go anywhere else.
1362 */
1363 mb.m_next = m;
1364 mb.m_data = data;
1365 mb.m_len = dlen;
1366 pktlen += dlen;
1367
1368 BPFIF_LOCK(bp);
1369 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1370 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1371 continue;
1372 BPFD_LOCK(d);
1373 ++d->bd_rcount;
1374 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
1375 if (slen != 0) {
1376 d->bd_fcount++;
1377 #ifdef MAC
1378 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1379 #endif
1380 catchpacket(d, (u_char *)&mb, pktlen, slen,
1381 bpf_mcopy);
1382 }
1383 BPFD_UNLOCK(d);
1384 }
1385 BPFIF_UNLOCK(bp);
1386 }
1387
1388 /*
1389 * Move the packet data from interface memory (pkt) into the
1390 * store buffer. "cpfn" is the routine called to do the actual data
1391 * transfer. bcopy is passed in to copy contiguous chunks, while
1392 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1393 * pkt is really an mbuf.
1394 */
1395 static void
1396 catchpacket(d, pkt, pktlen, snaplen, cpfn)
1397 struct bpf_d *d;
1398 u_char *pkt;
1399 u_int pktlen, snaplen;
1400 void (*cpfn)(const void *, void *, size_t);
1401 {
1402 struct bpf_hdr *hp;
1403 int totlen, curlen;
1404 int hdrlen = d->bd_bif->bif_hdrlen;
1405 int do_wakeup = 0;
1406
1407 BPFD_LOCK_ASSERT(d);
1408 /*
1409 * Figure out how many bytes to move. If the packet is
1410 * greater or equal to the snapshot length, transfer that
1411 * much. Otherwise, transfer the whole packet (unless
1412 * we hit the buffer size limit).
1413 */
1414 totlen = hdrlen + min(snaplen, pktlen);
1415 if (totlen > d->bd_bufsize)
1416 totlen = d->bd_bufsize;
1417
1418 /*
1419 * Round up the end of the previous packet to the next longword.
1420 */
1421 curlen = BPF_WORDALIGN(d->bd_slen);
1422 if (curlen + totlen > d->bd_bufsize) {
1423 /*
1424 * This packet will overflow the storage buffer.
1425 * Rotate the buffers if we can, then wakeup any
1426 * pending reads.
1427 */
1428 if (d->bd_fbuf == NULL) {
1429 /*
1430 * We haven't completed the previous read yet,
1431 * so drop the packet.
1432 */
1433 ++d->bd_dcount;
1434 return;
1435 }
1436 ROTATE_BUFFERS(d);
1437 do_wakeup = 1;
1438 curlen = 0;
1439 }
1440 else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1441 /*
1442 * Immediate mode is set, or the read timeout has
1443 * already expired during a select call. A packet
1444 * arrived, so the reader should be woken up.
1445 */
1446 do_wakeup = 1;
1447
1448 /*
1449 * Append the bpf header.
1450 */
1451 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1452 microtime(&hp->bh_tstamp);
1453 hp->bh_datalen = pktlen;
1454 hp->bh_hdrlen = hdrlen;
1455 /*
1456 * Copy the packet data into the store buffer and update its length.
1457 */
1458 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1459 d->bd_slen = curlen + totlen;
1460
1461 if (do_wakeup)
1462 bpf_wakeup(d);
1463 }
1464
1465 /*
1466 * Initialize all nonzero fields of a descriptor.
1467 */
1468 static int
1469 bpf_allocbufs(d)
1470 struct bpf_d *d;
1471 {
1472 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1473 if (d->bd_fbuf == NULL)
1474 return (ENOBUFS);
1475
1476 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1477 if (d->bd_sbuf == NULL) {
1478 free(d->bd_fbuf, M_BPF);
1479 return (ENOBUFS);
1480 }
1481 d->bd_slen = 0;
1482 d->bd_hlen = 0;
1483 return (0);
1484 }
1485
1486 /*
1487 * Free buffers currently in use by a descriptor.
1488 * Called on close.
1489 */
1490 static void
1491 bpf_freed(d)
1492 struct bpf_d *d;
1493 {
1494 /*
1495 * We don't need to lock out interrupts since this descriptor has
1496 * been detached from its interface and it yet hasn't been marked
1497 * free.
1498 */
1499 if (d->bd_sbuf != NULL) {
1500 free(d->bd_sbuf, M_BPF);
1501 if (d->bd_hbuf != NULL)
1502 free(d->bd_hbuf, M_BPF);
1503 if (d->bd_fbuf != NULL)
1504 free(d->bd_fbuf, M_BPF);
1505 }
1506 if (d->bd_rfilter)
1507 free((caddr_t)d->bd_rfilter, M_BPF);
1508 if (d->bd_wfilter)
1509 free((caddr_t)d->bd_wfilter, M_BPF);
1510 mtx_destroy(&d->bd_mtx);
1511 }
1512
1513 /*
1514 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
1515 * fixed size of the link header (variable length headers not yet supported).
1516 */
1517 void
1518 bpfattach(ifp, dlt, hdrlen)
1519 struct ifnet *ifp;
1520 u_int dlt, hdrlen;
1521 {
1522
1523 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1524 }
1525
1526 /*
1527 * Attach an interface to bpf. ifp is a pointer to the structure
1528 * defining the interface to be attached, dlt is the link layer type,
1529 * and hdrlen is the fixed size of the link header (variable length
1530 * headers are not yet supporrted).
1531 */
1532 void
1533 bpfattach2(ifp, dlt, hdrlen, driverp)
1534 struct ifnet *ifp;
1535 u_int dlt, hdrlen;
1536 struct bpf_if **driverp;
1537 {
1538 struct bpf_if *bp;
1539 bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
1540 if (bp == NULL)
1541 panic("bpfattach");
1542
1543 LIST_INIT(&bp->bif_dlist);
1544 bp->bif_driverp = driverp;
1545 bp->bif_ifp = ifp;
1546 bp->bif_dlt = dlt;
1547 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
1548
1549 mtx_lock(&bpf_mtx);
1550 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
1551 mtx_unlock(&bpf_mtx);
1552
1553 *bp->bif_driverp = NULL;
1554
1555 /*
1556 * Compute the length of the bpf header. This is not necessarily
1557 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1558 * that the network layer header begins on a longword boundary (for
1559 * performance reasons and to alleviate alignment restrictions).
1560 */
1561 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1562
1563 if (bootverbose)
1564 if_printf(ifp, "bpf attached\n");
1565 }
1566
1567 /*
1568 * Detach bpf from an interface. This involves detaching each descriptor
1569 * associated with the interface, and leaving bd_bif NULL. Notify each
1570 * descriptor as it's detached so that any sleepers wake up and get
1571 * ENXIO.
1572 */
1573 void
1574 bpfdetach(ifp)
1575 struct ifnet *ifp;
1576 {
1577 struct bpf_if *bp;
1578 struct bpf_d *d;
1579
1580 /* Locate BPF interface information */
1581 mtx_lock(&bpf_mtx);
1582 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1583 if (ifp == bp->bif_ifp)
1584 break;
1585 }
1586
1587 /* Interface wasn't attached */
1588 if ((bp == NULL) || (bp->bif_ifp == NULL)) {
1589 mtx_unlock(&bpf_mtx);
1590 printf("bpfdetach: %s was not attached\n", ifp->if_xname);
1591 return;
1592 }
1593
1594 LIST_REMOVE(bp, bif_next);
1595 mtx_unlock(&bpf_mtx);
1596
1597 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
1598 bpf_detachd(d);
1599 BPFD_LOCK(d);
1600 bpf_wakeup(d);
1601 BPFD_UNLOCK(d);
1602 }
1603
1604 mtx_destroy(&bp->bif_mtx);
1605 free(bp, M_BPF);
1606 }
1607
1608 /*
1609 * Get a list of available data link type of the interface.
1610 */
1611 static int
1612 bpf_getdltlist(d, bfl)
1613 struct bpf_d *d;
1614 struct bpf_dltlist *bfl;
1615 {
1616 int n, error;
1617 struct ifnet *ifp;
1618 struct bpf_if *bp;
1619
1620 ifp = d->bd_bif->bif_ifp;
1621 n = 0;
1622 error = 0;
1623 mtx_lock(&bpf_mtx);
1624 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1625 if (bp->bif_ifp != ifp)
1626 continue;
1627 if (bfl->bfl_list != NULL) {
1628 if (n >= bfl->bfl_len) {
1629 mtx_unlock(&bpf_mtx);
1630 return (ENOMEM);
1631 }
1632 error = copyout(&bp->bif_dlt,
1633 bfl->bfl_list + n, sizeof(u_int));
1634 }
1635 n++;
1636 }
1637 mtx_unlock(&bpf_mtx);
1638 bfl->bfl_len = n;
1639 return (error);
1640 }
1641
1642 /*
1643 * Set the data link type of a BPF instance.
1644 */
1645 static int
1646 bpf_setdlt(d, dlt)
1647 struct bpf_d *d;
1648 u_int dlt;
1649 {
1650 int error, opromisc;
1651 struct ifnet *ifp;
1652 struct bpf_if *bp;
1653
1654 if (d->bd_bif->bif_dlt == dlt)
1655 return (0);
1656 ifp = d->bd_bif->bif_ifp;
1657 mtx_lock(&bpf_mtx);
1658 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1659 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1660 break;
1661 }
1662 mtx_unlock(&bpf_mtx);
1663 if (bp != NULL) {
1664 opromisc = d->bd_promisc;
1665 bpf_detachd(d);
1666 bpf_attachd(d, bp);
1667 BPFD_LOCK(d);
1668 reset_d(d);
1669 BPFD_UNLOCK(d);
1670 if (opromisc) {
1671 error = ifpromisc(bp->bif_ifp, 1);
1672 if (error)
1673 if_printf(bp->bif_ifp,
1674 "bpf_setdlt: ifpromisc failed (%d)\n",
1675 error);
1676 else
1677 d->bd_promisc = 1;
1678 }
1679 }
1680 return (bp == NULL ? EINVAL : 0);
1681 }
1682
1683 static void
1684 bpf_clone(arg, cred, name, namelen, dev)
1685 void *arg;
1686 struct ucred *cred;
1687 char *name;
1688 int namelen;
1689 struct cdev **dev;
1690 {
1691 int u;
1692
1693 if (*dev != NULL)
1694 return;
1695 if (dev_stdclone(name, NULL, "bpf", &u) != 1)
1696 return;
1697 *dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600,
1698 "bpf%d", u);
1699 dev_ref(*dev);
1700 (*dev)->si_flags |= SI_CHEAPCLONE;
1701 return;
1702 }
1703
1704 static void
1705 bpf_drvinit(unused)
1706 void *unused;
1707 {
1708
1709 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
1710 LIST_INIT(&bpf_iflist);
1711 EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000);
1712 }
1713
1714 static void
1715 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
1716 {
1717
1718 bzero(d, sizeof(*d));
1719 BPFD_LOCK_ASSERT(bd);
1720 d->bd_immediate = bd->bd_immediate;
1721 d->bd_promisc = bd->bd_promisc;
1722 d->bd_hdrcmplt = bd->bd_hdrcmplt;
1723 d->bd_seesent = bd->bd_seesent;
1724 d->bd_async = bd->bd_async;
1725 d->bd_rcount = bd->bd_rcount;
1726 d->bd_dcount = bd->bd_dcount;
1727 d->bd_fcount = bd->bd_fcount;
1728 d->bd_sig = bd->bd_sig;
1729 d->bd_slen = bd->bd_slen;
1730 d->bd_hlen = bd->bd_hlen;
1731 d->bd_bufsize = bd->bd_bufsize;
1732 d->bd_pid = bd->bd_pid;
1733 strlcpy(d->bd_ifname,
1734 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
1735 strlcpy(d->bd_pcomm, bd->bd_pcomm, MAXCOMLEN);
1736 d->bd_locked = bd->bd_locked;
1737 }
1738
1739 static int
1740 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
1741 {
1742 struct xbpf_d *xbdbuf, *xbd;
1743 int index, error;
1744 struct bpf_if *bp;
1745 struct bpf_d *bd;
1746
1747 /*
1748 * XXX This is not technically correct. It is possible for non
1749 * privileged users to open bpf devices. It would make sense
1750 * if the users who opened the devices were able to retrieve
1751 * the statistics for them, too.
1752 */
1753 error = suser(req->td);
1754 if (error)
1755 return (error);
1756 if (req->oldptr == NULL)
1757 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
1758 if (bpf_bpfd_cnt == 0)
1759 return (SYSCTL_OUT(req, 0, 0));
1760 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
1761 mtx_lock(&bpf_mtx);
1762 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
1763 mtx_unlock(&bpf_mtx);
1764 free(xbdbuf, M_BPF);
1765 return (ENOMEM);
1766 }
1767 index = 0;
1768 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1769 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
1770 xbd = &xbdbuf[index++];
1771 BPFD_LOCK(bd);
1772 bpfstats_fill_xbpf(xbd, bd);
1773 BPFD_UNLOCK(bd);
1774 }
1775 }
1776 mtx_unlock(&bpf_mtx);
1777 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
1778 free(xbdbuf, M_BPF);
1779 return (error);
1780 }
1781
1782 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL)
1783
1784 #else /* !DEV_BPF && !NETGRAPH_BPF */
1785 /*
1786 * NOP stubs to allow bpf-using drivers to load and function.
1787 *
1788 * A 'better' implementation would allow the core bpf functionality
1789 * to be loaded at runtime.
1790 */
1791
1792 void
1793 bpf_tap(bp, pkt, pktlen)
1794 struct bpf_if *bp;
1795 u_char *pkt;
1796 u_int pktlen;
1797 {
1798 }
1799
1800 void
1801 bpf_mtap(bp, m)
1802 struct bpf_if *bp;
1803 struct mbuf *m;
1804 {
1805 }
1806
1807 void
1808 bpf_mtap2(bp, d, l, m)
1809 struct bpf_if *bp;
1810 void *d;
1811 u_int l;
1812 struct mbuf *m;
1813 {
1814 }
1815
1816 void
1817 bpfattach(ifp, dlt, hdrlen)
1818 struct ifnet *ifp;
1819 u_int dlt, hdrlen;
1820 {
1821 }
1822
1823 void
1824 bpfattach2(ifp, dlt, hdrlen, driverp)
1825 struct ifnet *ifp;
1826 u_int dlt, hdrlen;
1827 struct bpf_if **driverp;
1828 {
1829 }
1830
1831 void
1832 bpfdetach(ifp)
1833 struct ifnet *ifp;
1834 {
1835 }
1836
1837 u_int
1838 bpf_filter(pc, p, wirelen, buflen)
1839 const struct bpf_insn *pc;
1840 u_char *p;
1841 u_int wirelen;
1842 u_int buflen;
1843 {
1844 return -1; /* "no filter" behaviour */
1845 }
1846
1847 int
1848 bpf_validate(f, len)
1849 const struct bpf_insn *f;
1850 int len;
1851 {
1852 return 0; /* false */
1853 }
1854
1855 #endif /* !DEV_BPF && !NETGRAPH_BPF */
Cache object: a5887a65d50f6aa689b15d49667580e1
|