FreeBSD/Linux Kernel Cross Reference
sys/net/bpf.c
1 /*-
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
35 *
36 * $FreeBSD: releng/6.3/sys/net/bpf.c 173886 2007-11-24 19:45:58Z cvs2svn $
37 */
38
39 #include "opt_bpf.h"
40 #include "opt_mac.h"
41 #include "opt_netgraph.h"
42
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/conf.h>
47 #include <sys/fcntl.h>
48 #include <sys/mac.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/time.h>
52 #include <sys/proc.h>
53 #include <sys/signalvar.h>
54 #include <sys/filio.h>
55 #include <sys/sockio.h>
56 #include <sys/ttycom.h>
57 #include <sys/uio.h>
58
59 #include <sys/event.h>
60 #include <sys/file.h>
61 #include <sys/poll.h>
62 #include <sys/proc.h>
63
64 #include <sys/socket.h>
65
66 #include <net/if.h>
67 #include <net/bpf.h>
68 #include <net/bpfdesc.h>
69
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <sys/kernel.h>
73 #include <sys/sysctl.h>
74
75 static MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
76
77 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
78
79 #define PRINET 26 /* interruptible */
80
81 /*
82 * bpf_iflist is a list of BPF interface structures, each corresponding to a
83 * specific DLT. The same network interface might have several BPF interface
84 * structures registered by different layers in the stack (i.e., 802.11
85 * frames, ethernet frames, etc).
86 */
87 static LIST_HEAD(, bpf_if) bpf_iflist;
88 static struct mtx bpf_mtx; /* bpf global lock */
89 static int bpf_bpfd_cnt;
90
91 static int bpf_allocbufs(struct bpf_d *);
92 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
93 static void bpf_detachd(struct bpf_d *);
94 static void bpf_freed(struct bpf_d *);
95 static void bpf_mcopy(const void *, void *, size_t);
96 static int bpf_movein(struct uio *, int, struct ifnet *,
97 struct mbuf **, struct sockaddr *, struct bpf_insn *);
98 static int bpf_setif(struct bpf_d *, struct ifreq *);
99 static void bpf_timed_out(void *);
100 static __inline void
101 bpf_wakeup(struct bpf_d *);
102 static void catchpacket(struct bpf_d *, u_char *, u_int,
103 u_int, void (*)(const void *, void *, size_t),
104 struct timeval *);
105 static void reset_d(struct bpf_d *);
106 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
107 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
108 static int bpf_setdlt(struct bpf_d *, u_int);
109 static void filt_bpfdetach(struct knote *);
110 static int filt_bpfread(struct knote *, long);
111 static void bpf_drvinit(void *);
112 static void bpf_clone(void *, struct ucred *, char *, int, struct cdev **);
113 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
114
115 /*
116 * The default read buffer size is patchable.
117 */
118 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
119 static int bpf_bufsize = 4096;
120 SYSCTL_INT(_net_bpf, OID_AUTO, bufsize, CTLFLAG_RW,
121 &bpf_bufsize, 0, "");
122 static int bpf_maxbufsize = BPF_MAXBUFSIZE;
123 SYSCTL_INT(_net_bpf, OID_AUTO, maxbufsize, CTLFLAG_RW,
124 &bpf_maxbufsize, 0, "");
125 static int bpf_maxinsns = BPF_MAXINSNS;
126 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
127 &bpf_maxinsns, 0, "Maximum bpf program instructions");
128 SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_RW,
129 bpf_stats_sysctl, "bpf statistics portal");
130
131 static d_open_t bpfopen;
132 static d_close_t bpfclose;
133 static d_read_t bpfread;
134 static d_write_t bpfwrite;
135 static d_ioctl_t bpfioctl;
136 static d_poll_t bpfpoll;
137 static d_kqfilter_t bpfkqfilter;
138
139 static struct cdevsw bpf_cdevsw = {
140 .d_version = D_VERSION,
141 .d_flags = D_NEEDGIANT,
142 .d_open = bpfopen,
143 .d_close = bpfclose,
144 .d_read = bpfread,
145 .d_write = bpfwrite,
146 .d_ioctl = bpfioctl,
147 .d_poll = bpfpoll,
148 .d_name = "bpf",
149 .d_kqfilter = bpfkqfilter,
150 };
151
152 static struct filterops bpfread_filtops =
153 { 1, NULL, filt_bpfdetach, filt_bpfread };
154
155 static int
156 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
157 struct sockaddr *sockp, struct bpf_insn *wfilter)
158 {
159 struct ether_header *eh;
160 struct mbuf *m;
161 int error;
162 int len;
163 int hlen;
164 int slen;
165
166 /*
167 * Build a sockaddr based on the data link layer type.
168 * We do this at this level because the ethernet header
169 * is copied directly into the data field of the sockaddr.
170 * In the case of SLIP, there is no header and the packet
171 * is forwarded as is.
172 * Also, we are careful to leave room at the front of the mbuf
173 * for the link level header.
174 */
175 switch (linktype) {
176
177 case DLT_SLIP:
178 sockp->sa_family = AF_INET;
179 hlen = 0;
180 break;
181
182 case DLT_EN10MB:
183 sockp->sa_family = AF_UNSPEC;
184 /* XXX Would MAXLINKHDR be better? */
185 hlen = ETHER_HDR_LEN;
186 break;
187
188 case DLT_FDDI:
189 sockp->sa_family = AF_IMPLINK;
190 hlen = 0;
191 break;
192
193 case DLT_RAW:
194 sockp->sa_family = AF_UNSPEC;
195 hlen = 0;
196 break;
197
198 case DLT_NULL:
199 /*
200 * null interface types require a 4 byte pseudo header which
201 * corresponds to the address family of the packet.
202 */
203 sockp->sa_family = AF_UNSPEC;
204 hlen = 4;
205 break;
206
207 case DLT_ATM_RFC1483:
208 /*
209 * en atm driver requires 4-byte atm pseudo header.
210 * though it isn't standard, vpi:vci needs to be
211 * specified anyway.
212 */
213 sockp->sa_family = AF_UNSPEC;
214 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
215 break;
216
217 case DLT_PPP:
218 sockp->sa_family = AF_UNSPEC;
219 hlen = 4; /* This should match PPP_HDRLEN */
220 break;
221
222 default:
223 return (EIO);
224 }
225
226 len = uio->uio_resid;
227
228 if (len - hlen > ifp->if_mtu)
229 return (EMSGSIZE);
230
231 if ((unsigned)len > MCLBYTES)
232 return (EIO);
233
234 if (len > MHLEN) {
235 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
236 } else {
237 MGETHDR(m, M_TRYWAIT, MT_DATA);
238 }
239 if (m == NULL)
240 return (ENOBUFS);
241 m->m_pkthdr.len = m->m_len = len;
242 m->m_pkthdr.rcvif = NULL;
243 *mp = m;
244
245 if (m->m_len < hlen) {
246 error = EPERM;
247 goto bad;
248 }
249
250 error = uiomove(mtod(m, u_char *), len, uio);
251 if (error)
252 goto bad;
253
254 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
255 if (slen == 0) {
256 error = EPERM;
257 goto bad;
258 }
259
260 /* Check for multicast destination */
261 switch (linktype) {
262 case DLT_EN10MB:
263 eh = mtod(m, struct ether_header *);
264 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
265 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
266 ETHER_ADDR_LEN) == 0)
267 m->m_flags |= M_BCAST;
268 else
269 m->m_flags |= M_MCAST;
270 }
271 break;
272 }
273
274 /*
275 * Make room for link header, and copy it to sockaddr
276 */
277 if (hlen != 0) {
278 bcopy(m->m_data, sockp->sa_data, hlen);
279 m->m_pkthdr.len -= hlen;
280 m->m_len -= hlen;
281 #if BSD >= 199103
282 m->m_data += hlen; /* XXX */
283 #else
284 m->m_off += hlen;
285 #endif
286 }
287
288 return (0);
289 bad:
290 m_freem(m);
291 return (error);
292 }
293
294 /*
295 * Attach file to the bpf interface, i.e. make d listen on bp.
296 */
297 static void
298 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
299 {
300 /*
301 * Point d at bp, and add d to the interface's list of listeners.
302 * Finally, point the driver's bpf cookie at the interface so
303 * it will divert packets to bpf.
304 */
305 BPFIF_LOCK(bp);
306 d->bd_bif = bp;
307 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
308
309 bpf_bpfd_cnt++;
310 BPFIF_UNLOCK(bp);
311 }
312
313 /*
314 * Detach a file from its interface.
315 */
316 static void
317 bpf_detachd(struct bpf_d *d)
318 {
319 int error;
320 struct bpf_if *bp;
321 struct ifnet *ifp;
322
323 bp = d->bd_bif;
324 BPFIF_LOCK(bp);
325 BPFD_LOCK(d);
326 ifp = d->bd_bif->bif_ifp;
327
328 /*
329 * Remove d from the interface's descriptor list.
330 */
331 LIST_REMOVE(d, bd_next);
332
333 bpf_bpfd_cnt--;
334 d->bd_bif = NULL;
335 BPFD_UNLOCK(d);
336 BPFIF_UNLOCK(bp);
337
338 /*
339 * Check if this descriptor had requested promiscuous mode.
340 * If so, turn it off.
341 */
342 if (d->bd_promisc) {
343 d->bd_promisc = 0;
344 error = ifpromisc(ifp, 0);
345 if (error != 0 && error != ENXIO) {
346 /*
347 * ENXIO can happen if a pccard is unplugged
348 * Something is really wrong if we were able to put
349 * the driver into promiscuous mode, but can't
350 * take it out.
351 */
352 if_printf(bp->bif_ifp,
353 "bpf_detach: ifpromisc failed (%d)\n", error);
354 }
355 }
356 }
357
358 /*
359 * Open ethernet device. Returns ENXIO for illegal minor device number,
360 * EBUSY if file is open by another process.
361 */
362 /* ARGSUSED */
363 static int
364 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
365 {
366 struct bpf_d *d;
367
368 mtx_lock(&bpf_mtx);
369 d = dev->si_drv1;
370 /*
371 * Each minor can be opened by only one process. If the requested
372 * minor is in use, return EBUSY.
373 */
374 if (d != NULL) {
375 mtx_unlock(&bpf_mtx);
376 return (EBUSY);
377 }
378 dev->si_drv1 = (struct bpf_d *)~0; /* mark device in use */
379 mtx_unlock(&bpf_mtx);
380
381 if ((dev->si_flags & SI_NAMED) == 0)
382 make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600,
383 "bpf%d", dev2unit(dev));
384 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
385 dev->si_drv1 = d;
386 d->bd_bufsize = bpf_bufsize;
387 d->bd_sig = SIGIO;
388 d->bd_seesent = 1;
389 d->bd_pid = td->td_proc->p_pid;
390 strlcpy(d->bd_pcomm, td->td_proc->p_comm, MAXCOMLEN);
391 #ifdef MAC
392 mac_init_bpfdesc(d);
393 mac_create_bpfdesc(td->td_ucred, d);
394 #endif
395 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
396 callout_init(&d->bd_callout, NET_CALLOUT_MPSAFE);
397 knlist_init(&d->bd_sel.si_note, &d->bd_mtx, NULL, NULL, NULL);
398
399 return (0);
400 }
401
402 /*
403 * Close the descriptor by detaching it from its interface,
404 * deallocating its buffers, and marking it free.
405 */
406 /* ARGSUSED */
407 static int
408 bpfclose(struct cdev *dev, int flags, int fmt, struct thread *td)
409 {
410 struct bpf_d *d = dev->si_drv1;
411
412 BPFD_LOCK(d);
413 if (d->bd_state == BPF_WAITING)
414 callout_stop(&d->bd_callout);
415 d->bd_state = BPF_IDLE;
416 BPFD_UNLOCK(d);
417 funsetown(&d->bd_sigio);
418 mtx_lock(&bpf_mtx);
419 if (d->bd_bif)
420 bpf_detachd(d);
421 mtx_unlock(&bpf_mtx);
422 selwakeuppri(&d->bd_sel, PRINET);
423 #ifdef MAC
424 mac_destroy_bpfdesc(d);
425 #endif /* MAC */
426 knlist_destroy(&d->bd_sel.si_note);
427 bpf_freed(d);
428 dev->si_drv1 = NULL;
429 free(d, M_BPF);
430
431 return (0);
432 }
433
434
435 /*
436 * Rotate the packet buffers in descriptor d. Move the store buffer
437 * into the hold slot, and the free buffer into the store slot.
438 * Zero the length of the new store buffer.
439 */
440 #define ROTATE_BUFFERS(d) \
441 (d)->bd_hbuf = (d)->bd_sbuf; \
442 (d)->bd_hlen = (d)->bd_slen; \
443 (d)->bd_sbuf = (d)->bd_fbuf; \
444 (d)->bd_slen = 0; \
445 (d)->bd_fbuf = NULL;
446 /*
447 * bpfread - read next chunk of packets from buffers
448 */
449 static int
450 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
451 {
452 struct bpf_d *d = dev->si_drv1;
453 int timed_out;
454 int error;
455
456 /*
457 * Restrict application to use a buffer the same size as
458 * as kernel buffers.
459 */
460 if (uio->uio_resid != d->bd_bufsize)
461 return (EINVAL);
462
463 BPFD_LOCK(d);
464 d->bd_pid = curthread->td_proc->p_pid;
465 if (d->bd_state == BPF_WAITING)
466 callout_stop(&d->bd_callout);
467 timed_out = (d->bd_state == BPF_TIMED_OUT);
468 d->bd_state = BPF_IDLE;
469 /*
470 * If the hold buffer is empty, then do a timed sleep, which
471 * ends when the timeout expires or when enough packets
472 * have arrived to fill the store buffer.
473 */
474 while (d->bd_hbuf == NULL) {
475 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
476 /*
477 * A packet(s) either arrived since the previous
478 * read or arrived while we were asleep.
479 * Rotate the buffers and return what's here.
480 */
481 ROTATE_BUFFERS(d);
482 break;
483 }
484
485 /*
486 * No data is available, check to see if the bpf device
487 * is still pointed at a real interface. If not, return
488 * ENXIO so that the userland process knows to rebind
489 * it before using it again.
490 */
491 if (d->bd_bif == NULL) {
492 BPFD_UNLOCK(d);
493 return (ENXIO);
494 }
495
496 if (ioflag & O_NONBLOCK) {
497 BPFD_UNLOCK(d);
498 return (EWOULDBLOCK);
499 }
500 error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
501 "bpf", d->bd_rtout);
502 if (error == EINTR || error == ERESTART) {
503 BPFD_UNLOCK(d);
504 return (error);
505 }
506 if (error == EWOULDBLOCK) {
507 /*
508 * On a timeout, return what's in the buffer,
509 * which may be nothing. If there is something
510 * in the store buffer, we can rotate the buffers.
511 */
512 if (d->bd_hbuf)
513 /*
514 * We filled up the buffer in between
515 * getting the timeout and arriving
516 * here, so we don't need to rotate.
517 */
518 break;
519
520 if (d->bd_slen == 0) {
521 BPFD_UNLOCK(d);
522 return (0);
523 }
524 ROTATE_BUFFERS(d);
525 break;
526 }
527 }
528 /*
529 * At this point, we know we have something in the hold slot.
530 */
531 BPFD_UNLOCK(d);
532
533 /*
534 * Move data from hold buffer into user space.
535 * We know the entire buffer is transferred since
536 * we checked above that the read buffer is bpf_bufsize bytes.
537 */
538 error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
539
540 BPFD_LOCK(d);
541 d->bd_fbuf = d->bd_hbuf;
542 d->bd_hbuf = NULL;
543 d->bd_hlen = 0;
544 BPFD_UNLOCK(d);
545
546 return (error);
547 }
548
549
550 /*
551 * If there are processes sleeping on this descriptor, wake them up.
552 */
553 static __inline void
554 bpf_wakeup(struct bpf_d *d)
555 {
556
557 BPFD_LOCK_ASSERT(d);
558 if (d->bd_state == BPF_WAITING) {
559 callout_stop(&d->bd_callout);
560 d->bd_state = BPF_IDLE;
561 }
562 wakeup(d);
563 if (d->bd_async && d->bd_sig && d->bd_sigio)
564 pgsigio(&d->bd_sigio, d->bd_sig, 0);
565
566 selwakeuppri(&d->bd_sel, PRINET);
567 KNOTE_LOCKED(&d->bd_sel.si_note, 0);
568 }
569
570 static void
571 bpf_timed_out(void *arg)
572 {
573 struct bpf_d *d = (struct bpf_d *)arg;
574
575 BPFD_LOCK(d);
576 if (d->bd_state == BPF_WAITING) {
577 d->bd_state = BPF_TIMED_OUT;
578 if (d->bd_slen != 0)
579 bpf_wakeup(d);
580 }
581 BPFD_UNLOCK(d);
582 }
583
584 static int
585 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
586 {
587 struct bpf_d *d = dev->si_drv1;
588 struct ifnet *ifp;
589 struct mbuf *m;
590 int error;
591 struct sockaddr dst;
592
593 d->bd_pid = curthread->td_proc->p_pid;
594 if (d->bd_bif == NULL)
595 return (ENXIO);
596
597 ifp = d->bd_bif->bif_ifp;
598
599 if ((ifp->if_flags & IFF_UP) == 0)
600 return (ENETDOWN);
601
602 if (uio->uio_resid == 0)
603 return (0);
604
605 bzero(&dst, sizeof(dst));
606 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
607 &m, &dst, d->bd_wfilter);
608 if (error)
609 return (error);
610
611 if (d->bd_hdrcmplt)
612 dst.sa_family = pseudo_AF_HDRCMPLT;
613
614 #ifdef MAC
615 BPFD_LOCK(d);
616 mac_create_mbuf_from_bpfdesc(d, m);
617 BPFD_UNLOCK(d);
618 #endif
619 NET_LOCK_GIANT();
620 error = (*ifp->if_output)(ifp, m, &dst, NULL);
621 NET_UNLOCK_GIANT();
622 /*
623 * The driver frees the mbuf.
624 */
625 return (error);
626 }
627
628 /*
629 * Reset a descriptor by flushing its packet buffer and clearing the
630 * receive and drop counts.
631 */
632 static void
633 reset_d(struct bpf_d *d)
634 {
635
636 mtx_assert(&d->bd_mtx, MA_OWNED);
637 if (d->bd_hbuf) {
638 /* Free the hold buffer. */
639 d->bd_fbuf = d->bd_hbuf;
640 d->bd_hbuf = NULL;
641 }
642 d->bd_slen = 0;
643 d->bd_hlen = 0;
644 d->bd_rcount = 0;
645 d->bd_dcount = 0;
646 d->bd_fcount = 0;
647 }
648
649 /*
650 * FIONREAD Check for read packet available.
651 * SIOCGIFADDR Get interface address - convenient hook to driver.
652 * BIOCGBLEN Get buffer len [for read()].
653 * BIOCSETF Set ethernet read filter.
654 * BIOCSETWF Set ethernet write filter.
655 * BIOCFLUSH Flush read packet buffer.
656 * BIOCPROMISC Put interface into promiscuous mode.
657 * BIOCGDLT Get link layer type.
658 * BIOCGETIF Get interface name.
659 * BIOCSETIF Set interface.
660 * BIOCSRTIMEOUT Set read timeout.
661 * BIOCGRTIMEOUT Get read timeout.
662 * BIOCGSTATS Get packet stats.
663 * BIOCIMMEDIATE Set immediate mode.
664 * BIOCVERSION Get filter language version.
665 * BIOCGHDRCMPLT Get "header already complete" flag
666 * BIOCSHDRCMPLT Set "header already complete" flag
667 * BIOCGSEESENT Get "see packets sent" flag
668 * BIOCSSEESENT Set "see packets sent" flag
669 * BIOCLOCK Set "locked" flag
670 */
671 /* ARGSUSED */
672 static int
673 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
674 struct thread *td)
675 {
676 struct bpf_d *d = dev->si_drv1;
677 int error = 0;
678
679 /*
680 * Refresh PID associated with this descriptor.
681 */
682 d->bd_pid = td->td_proc->p_pid;
683 BPFD_LOCK(d);
684 if (d->bd_state == BPF_WAITING)
685 callout_stop(&d->bd_callout);
686 d->bd_state = BPF_IDLE;
687 BPFD_UNLOCK(d);
688
689 if (d->bd_locked == 1) {
690 switch (cmd) {
691 case BIOCGBLEN:
692 case BIOCFLUSH:
693 case BIOCGDLT:
694 case BIOCGDLTLIST:
695 case BIOCGETIF:
696 case BIOCGRTIMEOUT:
697 case BIOCGSTATS:
698 case BIOCVERSION:
699 case BIOCGRSIG:
700 case BIOCGHDRCMPLT:
701 case FIONREAD:
702 case BIOCLOCK:
703 case BIOCSRTIMEOUT:
704 case BIOCIMMEDIATE:
705 case TIOCGPGRP:
706 break;
707 default:
708 return (EPERM);
709 }
710 }
711 switch (cmd) {
712
713 default:
714 error = EINVAL;
715 break;
716
717 /*
718 * Check for read packet available.
719 */
720 case FIONREAD:
721 {
722 int n;
723
724 BPFD_LOCK(d);
725 n = d->bd_slen;
726 if (d->bd_hbuf)
727 n += d->bd_hlen;
728 BPFD_UNLOCK(d);
729
730 *(int *)addr = n;
731 break;
732 }
733
734 case SIOCGIFADDR:
735 {
736 struct ifnet *ifp;
737
738 if (d->bd_bif == NULL)
739 error = EINVAL;
740 else {
741 ifp = d->bd_bif->bif_ifp;
742 error = (*ifp->if_ioctl)(ifp, cmd, addr);
743 }
744 break;
745 }
746
747 /*
748 * Get buffer len [for read()].
749 */
750 case BIOCGBLEN:
751 *(u_int *)addr = d->bd_bufsize;
752 break;
753
754 /*
755 * Set buffer length.
756 */
757 case BIOCSBLEN:
758 if (d->bd_bif != NULL)
759 error = EINVAL;
760 else {
761 u_int size = *(u_int *)addr;
762
763 if (size > bpf_maxbufsize)
764 *(u_int *)addr = size = bpf_maxbufsize;
765 else if (size < BPF_MINBUFSIZE)
766 *(u_int *)addr = size = BPF_MINBUFSIZE;
767 d->bd_bufsize = size;
768 }
769 break;
770
771 /*
772 * Set link layer read filter.
773 */
774 case BIOCSETF:
775 case BIOCSETWF:
776 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
777 break;
778
779 /*
780 * Flush read packet buffer.
781 */
782 case BIOCFLUSH:
783 BPFD_LOCK(d);
784 reset_d(d);
785 BPFD_UNLOCK(d);
786 break;
787
788 /*
789 * Put interface into promiscuous mode.
790 */
791 case BIOCPROMISC:
792 if (d->bd_bif == NULL) {
793 /*
794 * No interface attached yet.
795 */
796 error = EINVAL;
797 break;
798 }
799 if (d->bd_promisc == 0) {
800 mtx_lock(&Giant);
801 error = ifpromisc(d->bd_bif->bif_ifp, 1);
802 mtx_unlock(&Giant);
803 if (error == 0)
804 d->bd_promisc = 1;
805 }
806 break;
807
808 /*
809 * Get current data link type.
810 */
811 case BIOCGDLT:
812 if (d->bd_bif == NULL)
813 error = EINVAL;
814 else
815 *(u_int *)addr = d->bd_bif->bif_dlt;
816 break;
817
818 /*
819 * Get a list of supported data link types.
820 */
821 case BIOCGDLTLIST:
822 if (d->bd_bif == NULL)
823 error = EINVAL;
824 else
825 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
826 break;
827
828 /*
829 * Set data link type.
830 */
831 case BIOCSDLT:
832 if (d->bd_bif == NULL)
833 error = EINVAL;
834 else
835 error = bpf_setdlt(d, *(u_int *)addr);
836 break;
837
838 /*
839 * Get interface name.
840 */
841 case BIOCGETIF:
842 if (d->bd_bif == NULL)
843 error = EINVAL;
844 else {
845 struct ifnet *const ifp = d->bd_bif->bif_ifp;
846 struct ifreq *const ifr = (struct ifreq *)addr;
847
848 strlcpy(ifr->ifr_name, ifp->if_xname,
849 sizeof(ifr->ifr_name));
850 }
851 break;
852
853 /*
854 * Set interface.
855 */
856 case BIOCSETIF:
857 error = bpf_setif(d, (struct ifreq *)addr);
858 break;
859
860 /*
861 * Set read timeout.
862 */
863 case BIOCSRTIMEOUT:
864 {
865 struct timeval *tv = (struct timeval *)addr;
866
867 /*
868 * Subtract 1 tick from tvtohz() since this isn't
869 * a one-shot timer.
870 */
871 if ((error = itimerfix(tv)) == 0)
872 d->bd_rtout = tvtohz(tv) - 1;
873 break;
874 }
875
876 /*
877 * Get read timeout.
878 */
879 case BIOCGRTIMEOUT:
880 {
881 struct timeval *tv = (struct timeval *)addr;
882
883 tv->tv_sec = d->bd_rtout / hz;
884 tv->tv_usec = (d->bd_rtout % hz) * tick;
885 break;
886 }
887
888 /*
889 * Get packet stats.
890 */
891 case BIOCGSTATS:
892 {
893 struct bpf_stat *bs = (struct bpf_stat *)addr;
894
895 bs->bs_recv = d->bd_rcount;
896 bs->bs_drop = d->bd_dcount;
897 break;
898 }
899
900 /*
901 * Set immediate mode.
902 */
903 case BIOCIMMEDIATE:
904 d->bd_immediate = *(u_int *)addr;
905 break;
906
907 case BIOCVERSION:
908 {
909 struct bpf_version *bv = (struct bpf_version *)addr;
910
911 bv->bv_major = BPF_MAJOR_VERSION;
912 bv->bv_minor = BPF_MINOR_VERSION;
913 break;
914 }
915
916 /*
917 * Get "header already complete" flag
918 */
919 case BIOCGHDRCMPLT:
920 *(u_int *)addr = d->bd_hdrcmplt;
921 break;
922
923 case BIOCLOCK:
924 d->bd_locked = 1;
925 break;
926 /*
927 * Set "header already complete" flag
928 */
929 case BIOCSHDRCMPLT:
930 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
931 break;
932
933 /*
934 * Get "see sent packets" flag
935 */
936 case BIOCGSEESENT:
937 *(u_int *)addr = d->bd_seesent;
938 break;
939
940 /*
941 * Set "see sent packets" flag
942 */
943 case BIOCSSEESENT:
944 d->bd_seesent = *(u_int *)addr;
945 break;
946
947 case FIONBIO: /* Non-blocking I/O */
948 break;
949
950 case FIOASYNC: /* Send signal on receive packets */
951 d->bd_async = *(int *)addr;
952 break;
953
954 case FIOSETOWN:
955 error = fsetown(*(int *)addr, &d->bd_sigio);
956 break;
957
958 case FIOGETOWN:
959 *(int *)addr = fgetown(&d->bd_sigio);
960 break;
961
962 /* This is deprecated, FIOSETOWN should be used instead. */
963 case TIOCSPGRP:
964 error = fsetown(-(*(int *)addr), &d->bd_sigio);
965 break;
966
967 /* This is deprecated, FIOGETOWN should be used instead. */
968 case TIOCGPGRP:
969 *(int *)addr = -fgetown(&d->bd_sigio);
970 break;
971
972 case BIOCSRSIG: /* Set receive signal */
973 {
974 u_int sig;
975
976 sig = *(u_int *)addr;
977
978 if (sig >= NSIG)
979 error = EINVAL;
980 else
981 d->bd_sig = sig;
982 break;
983 }
984 case BIOCGRSIG:
985 *(u_int *)addr = d->bd_sig;
986 break;
987 }
988 return (error);
989 }
990
991 /*
992 * Set d's packet filter program to fp. If this file already has a filter,
993 * free it and replace it. Returns EINVAL for bogus requests.
994 */
995 static int
996 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
997 {
998 struct bpf_insn *fcode, *old;
999 u_int wfilter, flen, size;
1000
1001 if (cmd == BIOCSETWF) {
1002 old = d->bd_wfilter;
1003 wfilter = 1;
1004 } else {
1005 wfilter = 0;
1006 old = d->bd_rfilter;
1007 }
1008 if (fp->bf_insns == NULL) {
1009 if (fp->bf_len != 0)
1010 return (EINVAL);
1011 BPFD_LOCK(d);
1012 if (wfilter)
1013 d->bd_wfilter = NULL;
1014 else
1015 d->bd_rfilter = NULL;
1016 reset_d(d);
1017 BPFD_UNLOCK(d);
1018 if (old != NULL)
1019 free((caddr_t)old, M_BPF);
1020 return (0);
1021 }
1022 flen = fp->bf_len;
1023 if (flen > bpf_maxinsns)
1024 return (EINVAL);
1025
1026 size = flen * sizeof(*fp->bf_insns);
1027 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
1028 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
1029 bpf_validate(fcode, (int)flen)) {
1030 BPFD_LOCK(d);
1031 if (wfilter)
1032 d->bd_wfilter = fcode;
1033 else
1034 d->bd_rfilter = fcode;
1035 reset_d(d);
1036 BPFD_UNLOCK(d);
1037 if (old != NULL)
1038 free((caddr_t)old, M_BPF);
1039
1040 return (0);
1041 }
1042 free((caddr_t)fcode, M_BPF);
1043 return (EINVAL);
1044 }
1045
1046 /*
1047 * Detach a file from its current interface (if attached at all) and attach
1048 * to the interface indicated by the name stored in ifr.
1049 * Return an errno or 0.
1050 */
1051 static int
1052 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1053 {
1054 struct bpf_if *bp;
1055 int error;
1056 struct ifnet *theywant;
1057
1058 theywant = ifunit(ifr->ifr_name);
1059 if (theywant == NULL || theywant->if_bpf == NULL)
1060 return (ENXIO);
1061
1062 bp = theywant->if_bpf;
1063 /*
1064 * Allocate the packet buffers if we need to.
1065 * If we're already attached to requested interface,
1066 * just flush the buffer.
1067 */
1068 if (d->bd_sbuf == NULL) {
1069 error = bpf_allocbufs(d);
1070 if (error != 0)
1071 return (error);
1072 }
1073 if (bp != d->bd_bif) {
1074 if (d->bd_bif)
1075 /*
1076 * Detach if attached to something else.
1077 */
1078 bpf_detachd(d);
1079
1080 bpf_attachd(d, bp);
1081 }
1082 BPFD_LOCK(d);
1083 reset_d(d);
1084 BPFD_UNLOCK(d);
1085 return (0);
1086 }
1087
1088 /*
1089 * Support for select() and poll() system calls
1090 *
1091 * Return true iff the specific operation will not block indefinitely.
1092 * Otherwise, return false but make a note that a selwakeup() must be done.
1093 */
1094 static int
1095 bpfpoll(struct cdev *dev, int events, struct thread *td)
1096 {
1097 struct bpf_d *d;
1098 int revents;
1099
1100 d = dev->si_drv1;
1101 if (d->bd_bif == NULL)
1102 return (ENXIO);
1103
1104 /*
1105 * Refresh PID associated with this descriptor.
1106 */
1107 d->bd_pid = td->td_proc->p_pid;
1108 revents = events & (POLLOUT | POLLWRNORM);
1109 BPFD_LOCK(d);
1110 if (events & (POLLIN | POLLRDNORM)) {
1111 if (bpf_ready(d))
1112 revents |= events & (POLLIN | POLLRDNORM);
1113 else {
1114 selrecord(td, &d->bd_sel);
1115 /* Start the read timeout if necessary. */
1116 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1117 callout_reset(&d->bd_callout, d->bd_rtout,
1118 bpf_timed_out, d);
1119 d->bd_state = BPF_WAITING;
1120 }
1121 }
1122 }
1123 BPFD_UNLOCK(d);
1124 return (revents);
1125 }
1126
1127 /*
1128 * Support for kevent() system call. Register EVFILT_READ filters and
1129 * reject all others.
1130 */
1131 int
1132 bpfkqfilter(struct cdev *dev, struct knote *kn)
1133 {
1134 struct bpf_d *d = (struct bpf_d *)dev->si_drv1;
1135
1136 if (kn->kn_filter != EVFILT_READ)
1137 return (1);
1138
1139 /*
1140 * Refresh PID associated with this descriptor.
1141 */
1142 d->bd_pid = curthread->td_proc->p_pid;
1143 kn->kn_fop = &bpfread_filtops;
1144 kn->kn_hook = d;
1145 knlist_add(&d->bd_sel.si_note, kn, 0);
1146
1147 return (0);
1148 }
1149
1150 static void
1151 filt_bpfdetach(struct knote *kn)
1152 {
1153 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1154
1155 knlist_remove(&d->bd_sel.si_note, kn, 0);
1156 }
1157
1158 static int
1159 filt_bpfread(struct knote *kn, long hint)
1160 {
1161 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1162 int ready;
1163
1164 BPFD_LOCK_ASSERT(d);
1165 ready = bpf_ready(d);
1166 if (ready) {
1167 kn->kn_data = d->bd_slen;
1168 if (d->bd_hbuf)
1169 kn->kn_data += d->bd_hlen;
1170 }
1171 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1172 callout_reset(&d->bd_callout, d->bd_rtout,
1173 bpf_timed_out, d);
1174 d->bd_state = BPF_WAITING;
1175 }
1176
1177 return (ready);
1178 }
1179
1180 /*
1181 * Incoming linkage from device drivers. Process the packet pkt, of length
1182 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1183 * by each process' filter, and if accepted, stashed into the corresponding
1184 * buffer.
1185 */
1186 void
1187 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1188 {
1189 struct bpf_d *d;
1190 u_int slen;
1191 int gottime;
1192 struct timeval tv;
1193
1194 gottime = 0;
1195 BPFIF_LOCK(bp);
1196 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1197 BPFD_LOCK(d);
1198 ++d->bd_rcount;
1199 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
1200 if (slen != 0) {
1201 d->bd_fcount++;
1202 if (!gottime) {
1203 microtime(&tv);
1204 gottime = 1;
1205 }
1206 #ifdef MAC
1207 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1208 #endif
1209 catchpacket(d, pkt, pktlen, slen, bcopy, &tv);
1210 }
1211 BPFD_UNLOCK(d);
1212 }
1213 BPFIF_UNLOCK(bp);
1214 }
1215
1216 /*
1217 * Copy data from an mbuf chain into a buffer. This code is derived
1218 * from m_copydata in sys/uipc_mbuf.c.
1219 */
1220 static void
1221 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
1222 {
1223 const struct mbuf *m;
1224 u_int count;
1225 u_char *dst;
1226
1227 m = src_arg;
1228 dst = dst_arg;
1229 while (len > 0) {
1230 if (m == NULL)
1231 panic("bpf_mcopy");
1232 count = min(m->m_len, len);
1233 bcopy(mtod(m, void *), dst, count);
1234 m = m->m_next;
1235 dst += count;
1236 len -= count;
1237 }
1238 }
1239
1240 /*
1241 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1242 */
1243 void
1244 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1245 {
1246 struct bpf_d *d;
1247 u_int pktlen, slen;
1248 int gottime;
1249 struct timeval tv;
1250
1251 gottime = 0;
1252
1253 pktlen = m_length(m, NULL);
1254
1255 BPFIF_LOCK(bp);
1256 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1257 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1258 continue;
1259 BPFD_LOCK(d);
1260 ++d->bd_rcount;
1261 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
1262 if (slen != 0) {
1263 d->bd_fcount++;
1264 if (!gottime) {
1265 microtime(&tv);
1266 gottime = 1;
1267 }
1268 #ifdef MAC
1269 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1270 #endif
1271 catchpacket(d, (u_char *)m, pktlen, slen,
1272 bpf_mcopy, &tv);
1273 }
1274 BPFD_UNLOCK(d);
1275 }
1276 BPFIF_UNLOCK(bp);
1277 }
1278
1279 /*
1280 * Incoming linkage from device drivers, when packet is in
1281 * an mbuf chain and to be prepended by a contiguous header.
1282 */
1283 void
1284 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
1285 {
1286 struct mbuf mb;
1287 struct bpf_d *d;
1288 u_int pktlen, slen;
1289 int gottime;
1290 struct timeval tv;
1291
1292 gottime = 0;
1293
1294 pktlen = m_length(m, NULL);
1295 /*
1296 * Craft on-stack mbuf suitable for passing to bpf_filter.
1297 * Note that we cut corners here; we only setup what's
1298 * absolutely needed--this mbuf should never go anywhere else.
1299 */
1300 mb.m_next = m;
1301 mb.m_data = data;
1302 mb.m_len = dlen;
1303 pktlen += dlen;
1304
1305 BPFIF_LOCK(bp);
1306 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1307 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1308 continue;
1309 BPFD_LOCK(d);
1310 ++d->bd_rcount;
1311 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
1312 if (slen != 0) {
1313 d->bd_fcount++;
1314 if (!gottime) {
1315 microtime(&tv);
1316 gottime = 1;
1317 }
1318 #ifdef MAC
1319 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1320 #endif
1321 catchpacket(d, (u_char *)&mb, pktlen, slen,
1322 bpf_mcopy, &tv);
1323 }
1324 BPFD_UNLOCK(d);
1325 }
1326 BPFIF_UNLOCK(bp);
1327 }
1328
1329 /*
1330 * Move the packet data from interface memory (pkt) into the
1331 * store buffer. "cpfn" is the routine called to do the actual data
1332 * transfer. bcopy is passed in to copy contiguous chunks, while
1333 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1334 * pkt is really an mbuf.
1335 */
1336 static void
1337 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1338 void (*cpfn)(const void *, void *, size_t), struct timeval *tv)
1339 {
1340 struct bpf_hdr *hp;
1341 int totlen, curlen;
1342 int hdrlen = d->bd_bif->bif_hdrlen;
1343 int do_wakeup = 0;
1344
1345 BPFD_LOCK_ASSERT(d);
1346 /*
1347 * Figure out how many bytes to move. If the packet is
1348 * greater or equal to the snapshot length, transfer that
1349 * much. Otherwise, transfer the whole packet (unless
1350 * we hit the buffer size limit).
1351 */
1352 totlen = hdrlen + min(snaplen, pktlen);
1353 if (totlen > d->bd_bufsize)
1354 totlen = d->bd_bufsize;
1355
1356 /*
1357 * Round up the end of the previous packet to the next longword.
1358 */
1359 curlen = BPF_WORDALIGN(d->bd_slen);
1360 if (curlen + totlen > d->bd_bufsize) {
1361 /*
1362 * This packet will overflow the storage buffer.
1363 * Rotate the buffers if we can, then wakeup any
1364 * pending reads.
1365 */
1366 if (d->bd_fbuf == NULL) {
1367 /*
1368 * We haven't completed the previous read yet,
1369 * so drop the packet.
1370 */
1371 ++d->bd_dcount;
1372 return;
1373 }
1374 ROTATE_BUFFERS(d);
1375 do_wakeup = 1;
1376 curlen = 0;
1377 }
1378 else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1379 /*
1380 * Immediate mode is set, or the read timeout has
1381 * already expired during a select call. A packet
1382 * arrived, so the reader should be woken up.
1383 */
1384 do_wakeup = 1;
1385
1386 /*
1387 * Append the bpf header.
1388 */
1389 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1390 hp->bh_tstamp = *tv;
1391 hp->bh_datalen = pktlen;
1392 hp->bh_hdrlen = hdrlen;
1393 /*
1394 * Copy the packet data into the store buffer and update its length.
1395 */
1396 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1397 d->bd_slen = curlen + totlen;
1398
1399 if (do_wakeup)
1400 bpf_wakeup(d);
1401 }
1402
1403 /*
1404 * Initialize all nonzero fields of a descriptor.
1405 */
1406 static int
1407 bpf_allocbufs(struct bpf_d *d)
1408 {
1409 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1410 if (d->bd_fbuf == NULL)
1411 return (ENOBUFS);
1412
1413 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1414 if (d->bd_sbuf == NULL) {
1415 free(d->bd_fbuf, M_BPF);
1416 return (ENOBUFS);
1417 }
1418 d->bd_slen = 0;
1419 d->bd_hlen = 0;
1420 return (0);
1421 }
1422
1423 /*
1424 * Free buffers currently in use by a descriptor.
1425 * Called on close.
1426 */
1427 static void
1428 bpf_freed(struct bpf_d *d)
1429 {
1430 /*
1431 * We don't need to lock out interrupts since this descriptor has
1432 * been detached from its interface and it yet hasn't been marked
1433 * free.
1434 */
1435 if (d->bd_sbuf != NULL) {
1436 free(d->bd_sbuf, M_BPF);
1437 if (d->bd_hbuf != NULL)
1438 free(d->bd_hbuf, M_BPF);
1439 if (d->bd_fbuf != NULL)
1440 free(d->bd_fbuf, M_BPF);
1441 }
1442 if (d->bd_rfilter)
1443 free((caddr_t)d->bd_rfilter, M_BPF);
1444 if (d->bd_wfilter)
1445 free((caddr_t)d->bd_wfilter, M_BPF);
1446 mtx_destroy(&d->bd_mtx);
1447 }
1448
1449 /*
1450 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
1451 * fixed size of the link header (variable length headers not yet supported).
1452 */
1453 void
1454 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1455 {
1456
1457 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1458 }
1459
1460 /*
1461 * Attach an interface to bpf. ifp is a pointer to the structure
1462 * defining the interface to be attached, dlt is the link layer type,
1463 * and hdrlen is the fixed size of the link header (variable length
1464 * headers are not yet supporrted).
1465 */
1466 void
1467 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1468 {
1469 struct bpf_if *bp;
1470
1471 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
1472 if (bp == NULL)
1473 panic("bpfattach");
1474
1475 LIST_INIT(&bp->bif_dlist);
1476 bp->bif_ifp = ifp;
1477 bp->bif_dlt = dlt;
1478 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
1479 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
1480 *driverp = bp;
1481
1482 mtx_lock(&bpf_mtx);
1483 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
1484 mtx_unlock(&bpf_mtx);
1485
1486 /*
1487 * Compute the length of the bpf header. This is not necessarily
1488 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1489 * that the network layer header begins on a longword boundary (for
1490 * performance reasons and to alleviate alignment restrictions).
1491 */
1492 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1493
1494 if (bootverbose)
1495 if_printf(ifp, "bpf attached\n");
1496 }
1497
1498 /*
1499 * Detach bpf from an interface. This involves detaching each descriptor
1500 * associated with the interface, and leaving bd_bif NULL. Notify each
1501 * descriptor as it's detached so that any sleepers wake up and get
1502 * ENXIO.
1503 */
1504 void
1505 bpfdetach(struct ifnet *ifp)
1506 {
1507 struct bpf_if *bp;
1508 struct bpf_d *d;
1509
1510 /* Locate BPF interface information */
1511 mtx_lock(&bpf_mtx);
1512 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1513 if (ifp == bp->bif_ifp)
1514 break;
1515 }
1516
1517 /* Interface wasn't attached */
1518 if ((bp == NULL) || (bp->bif_ifp == NULL)) {
1519 mtx_unlock(&bpf_mtx);
1520 printf("bpfdetach: %s was not attached\n", ifp->if_xname);
1521 return;
1522 }
1523
1524 LIST_REMOVE(bp, bif_next);
1525 mtx_unlock(&bpf_mtx);
1526
1527 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
1528 bpf_detachd(d);
1529 BPFD_LOCK(d);
1530 bpf_wakeup(d);
1531 BPFD_UNLOCK(d);
1532 }
1533
1534 mtx_destroy(&bp->bif_mtx);
1535 free(bp, M_BPF);
1536 }
1537
1538 /*
1539 * Get a list of available data link type of the interface.
1540 */
1541 static int
1542 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1543 {
1544 int n, error;
1545 struct ifnet *ifp;
1546 struct bpf_if *bp;
1547
1548 ifp = d->bd_bif->bif_ifp;
1549 n = 0;
1550 error = 0;
1551 mtx_lock(&bpf_mtx);
1552 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1553 if (bp->bif_ifp != ifp)
1554 continue;
1555 if (bfl->bfl_list != NULL) {
1556 if (n >= bfl->bfl_len) {
1557 mtx_unlock(&bpf_mtx);
1558 return (ENOMEM);
1559 }
1560 error = copyout(&bp->bif_dlt,
1561 bfl->bfl_list + n, sizeof(u_int));
1562 }
1563 n++;
1564 }
1565 mtx_unlock(&bpf_mtx);
1566 bfl->bfl_len = n;
1567 return (error);
1568 }
1569
1570 /*
1571 * Set the data link type of a BPF instance.
1572 */
1573 static int
1574 bpf_setdlt(struct bpf_d *d, u_int dlt)
1575 {
1576 int error, opromisc;
1577 struct ifnet *ifp;
1578 struct bpf_if *bp;
1579
1580 if (d->bd_bif->bif_dlt == dlt)
1581 return (0);
1582 ifp = d->bd_bif->bif_ifp;
1583 mtx_lock(&bpf_mtx);
1584 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1585 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1586 break;
1587 }
1588 mtx_unlock(&bpf_mtx);
1589 if (bp != NULL) {
1590 opromisc = d->bd_promisc;
1591 bpf_detachd(d);
1592 bpf_attachd(d, bp);
1593 BPFD_LOCK(d);
1594 reset_d(d);
1595 BPFD_UNLOCK(d);
1596 if (opromisc) {
1597 error = ifpromisc(bp->bif_ifp, 1);
1598 if (error)
1599 if_printf(bp->bif_ifp,
1600 "bpf_setdlt: ifpromisc failed (%d)\n",
1601 error);
1602 else
1603 d->bd_promisc = 1;
1604 }
1605 }
1606 return (bp == NULL ? EINVAL : 0);
1607 }
1608
1609 static void
1610 bpf_clone(void *arg, struct ucred *cred, char *name, int namelen,
1611 struct cdev **dev)
1612 {
1613 int u;
1614
1615 if (*dev != NULL)
1616 return;
1617 if (dev_stdclone(name, NULL, "bpf", &u) != 1)
1618 return;
1619 *dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600,
1620 "bpf%d", u);
1621 dev_ref(*dev);
1622 (*dev)->si_flags |= SI_CHEAPCLONE;
1623 return;
1624 }
1625
1626 static void
1627 bpf_drvinit(void *unused)
1628 {
1629
1630 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
1631 LIST_INIT(&bpf_iflist);
1632 EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000);
1633 }
1634
1635 static void
1636 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
1637 {
1638
1639 bzero(d, sizeof(*d));
1640 BPFD_LOCK_ASSERT(bd);
1641 d->bd_immediate = bd->bd_immediate;
1642 d->bd_promisc = bd->bd_promisc;
1643 d->bd_hdrcmplt = bd->bd_hdrcmplt;
1644 d->bd_seesent = bd->bd_seesent;
1645 d->bd_async = bd->bd_async;
1646 d->bd_rcount = bd->bd_rcount;
1647 d->bd_dcount = bd->bd_dcount;
1648 d->bd_fcount = bd->bd_fcount;
1649 d->bd_sig = bd->bd_sig;
1650 d->bd_slen = bd->bd_slen;
1651 d->bd_hlen = bd->bd_hlen;
1652 d->bd_bufsize = bd->bd_bufsize;
1653 d->bd_pid = bd->bd_pid;
1654 strlcpy(d->bd_ifname,
1655 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
1656 strlcpy(d->bd_pcomm, bd->bd_pcomm, MAXCOMLEN);
1657 d->bd_locked = bd->bd_locked;
1658 }
1659
1660 static int
1661 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
1662 {
1663 struct xbpf_d *xbdbuf, *xbd;
1664 int index, error;
1665 struct bpf_if *bp;
1666 struct bpf_d *bd;
1667
1668 /*
1669 * XXX This is not technically correct. It is possible for non
1670 * privileged users to open bpf devices. It would make sense
1671 * if the users who opened the devices were able to retrieve
1672 * the statistics for them, too.
1673 */
1674 error = suser(req->td);
1675 if (error)
1676 return (error);
1677 if (req->oldptr == NULL)
1678 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
1679 if (bpf_bpfd_cnt == 0)
1680 return (SYSCTL_OUT(req, 0, 0));
1681 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
1682 mtx_lock(&bpf_mtx);
1683 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
1684 mtx_unlock(&bpf_mtx);
1685 free(xbdbuf, M_BPF);
1686 return (ENOMEM);
1687 }
1688 index = 0;
1689 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1690 BPFIF_LOCK(bp);
1691 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
1692 xbd = &xbdbuf[index++];
1693 BPFD_LOCK(bd);
1694 bpfstats_fill_xbpf(xbd, bd);
1695 BPFD_UNLOCK(bd);
1696 }
1697 BPFIF_UNLOCK(bp);
1698 }
1699 mtx_unlock(&bpf_mtx);
1700 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
1701 free(xbdbuf, M_BPF);
1702 return (error);
1703 }
1704
1705 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL)
1706
1707 #else /* !DEV_BPF && !NETGRAPH_BPF */
1708 /*
1709 * NOP stubs to allow bpf-using drivers to load and function.
1710 *
1711 * A 'better' implementation would allow the core bpf functionality
1712 * to be loaded at runtime.
1713 */
1714 static struct bpf_if bp_null;
1715
1716 void
1717 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1718 {
1719 }
1720
1721 void
1722 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1723 {
1724 }
1725
1726 void
1727 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
1728 {
1729 }
1730
1731 void
1732 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1733 {
1734
1735 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1736 }
1737
1738 void
1739 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1740 {
1741
1742 *driverp = &bp_null;
1743 }
1744
1745 void
1746 bpfdetach(struct ifnet *ifp)
1747 {
1748 }
1749
1750 u_int
1751 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
1752 {
1753 return -1; /* "no filter" behaviour */
1754 }
1755
1756 int
1757 bpf_validate(const struct bpf_insn *f, int len)
1758 {
1759 return 0; /* false */
1760 }
1761
1762 #endif /* !DEV_BPF && !NETGRAPH_BPF */
1763
1764 /*
1765 * ABI compatibility hacks. Older drivers check if_bpf against NULL
1766 * to see if there are active listeners. In the new ABI, if_bpf is
1767 * always non-NULL, so bpf_*tap() are always invoked. We check for
1768 * listeners in these wrappers and call the real functions if needed.
1769 */
1770 #undef bpf_tap
1771 #undef bpf_mtap
1772 #undef bpf_mtap2
1773
1774 void bpf_tap(struct bpf_if *, u_char *, u_int);
1775 void bpf_mtap(struct bpf_if *, struct mbuf *);
1776 void bpf_mtap2(struct bpf_if *, void *, u_int, struct mbuf *);
1777
1778 void
1779 bpf_tap(bp, pkt, pktlen)
1780 struct bpf_if *bp;
1781 u_char *pkt;
1782 u_int pktlen;
1783 {
1784
1785 if (bpf_peers_present(bp))
1786 bpf_tap_new(bp, pkt, pktlen);
1787 }
1788
1789 void
1790 bpf_mtap(bp, m)
1791 struct bpf_if *bp;
1792 struct mbuf *m;
1793 {
1794
1795 if (bpf_peers_present(bp))
1796 bpf_mtap_new(bp, m);
1797 }
1798
1799 void
1800 bpf_mtap2(bp, d, l, m)
1801 struct bpf_if *bp;
1802 void *d;
1803 u_int l;
1804 struct mbuf *m;
1805 {
1806
1807 if (bpf_peers_present(bp))
1808 bpf_mtap2_new(bp, d, l, m);
1809 }
Cache object: 29d087cb4df65c1dc5670527881ccf6a
|