FreeBSD/Linux Kernel Cross Reference
sys/net/bpf.c
1 /*
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
39 *
40 * $FreeBSD$
41 */
42
43 #include "bpf.h"
44
45 #ifndef __GNUC__
46 #define inline
47 #else
48 #define inline __inline
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/conf.h>
54 #include <sys/event.h>
55 #include <sys/malloc.h>
56 #include <sys/mbuf.h>
57 #include <sys/time.h>
58 #include <sys/proc.h>
59 #include <sys/signalvar.h>
60 #include <sys/filio.h>
61 #include <sys/sockio.h>
62 #include <sys/ttycom.h>
63 #include <sys/filedesc.h>
64
65 #if defined(sparc) && BSD < 199103
66 #include <sys/stream.h>
67 #endif
68 #include <sys/poll.h>
69
70 #include <sys/socket.h>
71 #include <sys/vnode.h>
72
73 #include <net/if.h>
74 #include <net/bpf.h>
75 #include <net/bpfdesc.h>
76
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81
82 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
83
84 #if NBPF > 0
85
86 /*
87 * Older BSDs don't have kernel malloc.
88 */
89 #if BSD < 199103
90 extern bcopy();
91 static caddr_t bpf_alloc();
92 #include <net/bpf_compat.h>
93 #define BPF_BUFSIZE (MCLBYTES-8)
94 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
95 #else
96 #define BPF_BUFSIZE 4096
97 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
98 #endif
99
100 #define PRINET 26 /* interruptible */
101
102 /*
103 * The default read buffer size is patchable.
104 */
105 static int bpf_bufsize = BPF_BUFSIZE;
106 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
107 &bpf_bufsize, 0, "");
108 static int bpf_maxbufsize = BPF_MAXBUFSIZE;
109 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
110 &bpf_maxbufsize, 0, "");
111
112 /*
113 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
114 */
115 static struct bpf_if *bpf_iflist;
116
117 static int bpf_allocbufs __P((struct bpf_d *));
118 static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp));
119 static void bpf_detachd __P((struct bpf_d *d));
120 static void bpf_freed __P((struct bpf_d *));
121 static void bpf_mcopy __P((const void *, void *, size_t));
122 static int bpf_movein __P((struct uio *, int,
123 struct mbuf **, struct sockaddr *, int *));
124 static int bpf_setif __P((struct bpf_d *, struct ifreq *));
125 static void bpf_timed_out __P((void *));
126 static inline void
127 bpf_wakeup __P((struct bpf_d *));
128 static void catchpacket __P((struct bpf_d *, u_char *, u_int,
129 u_int, void (*)(const void *, void *, size_t),
130 struct timeval *));
131 static void reset_d __P((struct bpf_d *));
132 static int bpf_setf __P((struct bpf_d *, struct bpf_program *));
133
134 static d_open_t bpfopen;
135 static d_close_t bpfclose;
136 static d_read_t bpfread;
137 static d_write_t bpfwrite;
138 static d_ioctl_t bpfioctl;
139 static d_poll_t bpfpoll;
140 static d_kqfilter_t bpfkqfilter;
141
142 #define CDEV_MAJOR 23
143 static struct cdevsw bpf_cdevsw = {
144 /* open */ bpfopen,
145 /* close */ bpfclose,
146 /* read */ bpfread,
147 /* write */ bpfwrite,
148 /* ioctl */ bpfioctl,
149 /* poll */ bpfpoll,
150 /* mmap */ nommap,
151 /* strategy */ nostrategy,
152 /* name */ "bpf",
153 /* maj */ CDEV_MAJOR,
154 /* dump */ nodump,
155 /* psize */ nopsize,
156 /* flags */ D_KQFILTER,
157 /* bmaj */ -1,
158 /* kqfilter */ bpfkqfilter,
159 };
160
161 static void filt_bpfdetach(struct knote *kn);
162 static int filt_bpfread(struct knote *kn, long hint);
163
164 static struct filterops bpfread_filtops =
165 { 1, NULL, filt_bpfdetach, filt_bpfread };
166
167 static int
168 bpf_movein(uio, linktype, mp, sockp, datlen)
169 register struct uio *uio;
170 int linktype, *datlen;
171 register struct mbuf **mp;
172 register struct sockaddr *sockp;
173 {
174 struct mbuf *m;
175 int error;
176 int len;
177 int hlen;
178
179 /*
180 * Build a sockaddr based on the data link layer type.
181 * We do this at this level because the ethernet header
182 * is copied directly into the data field of the sockaddr.
183 * In the case of SLIP, there is no header and the packet
184 * is forwarded as is.
185 * Also, we are careful to leave room at the front of the mbuf
186 * for the link level header.
187 */
188 switch (linktype) {
189
190 case DLT_SLIP:
191 sockp->sa_family = AF_INET;
192 hlen = 0;
193 break;
194
195 case DLT_EN10MB:
196 sockp->sa_family = AF_UNSPEC;
197 /* XXX Would MAXLINKHDR be better? */
198 hlen = sizeof(struct ether_header);
199 break;
200
201 case DLT_FDDI:
202 #if defined(__FreeBSD__) || defined(__bsdi__)
203 sockp->sa_family = AF_IMPLINK;
204 hlen = 0;
205 #else
206 sockp->sa_family = AF_UNSPEC;
207 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
208 hlen = 24;
209 #endif
210 break;
211
212 case DLT_RAW:
213 case DLT_NULL:
214 sockp->sa_family = AF_UNSPEC;
215 hlen = 0;
216 break;
217
218 #ifdef __FreeBSD__
219 case DLT_ATM_RFC1483:
220 /*
221 * en atm driver requires 4-byte atm pseudo header.
222 * though it isn't standard, vpi:vci needs to be
223 * specified anyway.
224 */
225 sockp->sa_family = AF_UNSPEC;
226 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
227 break;
228 #endif
229 case DLT_PPP:
230 sockp->sa_family = AF_UNSPEC;
231 hlen = 4; /* This should match PPP_HDRLEN */
232 break;
233
234 default:
235 return (EIO);
236 }
237
238 len = uio->uio_resid;
239 *datlen = len - hlen;
240 if ((unsigned)len > MCLBYTES)
241 return (EIO);
242
243 MGETHDR(m, M_WAIT, MT_DATA);
244 if (m == 0)
245 return (ENOBUFS);
246 if (len > MHLEN) {
247 #if BSD >= 199103
248 MCLGET(m, M_WAIT);
249 if ((m->m_flags & M_EXT) == 0) {
250 #else
251 MCLGET(m);
252 if (m->m_len != MCLBYTES) {
253 #endif
254 error = ENOBUFS;
255 goto bad;
256 }
257 }
258 m->m_pkthdr.len = m->m_len = len;
259 m->m_pkthdr.rcvif = NULL;
260 *mp = m;
261 /*
262 * Make room for link header.
263 */
264 if (hlen != 0) {
265 m->m_pkthdr.len -= hlen;
266 m->m_len -= hlen;
267 #if BSD >= 199103
268 m->m_data += hlen; /* XXX */
269 #else
270 m->m_off += hlen;
271 #endif
272 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
273 if (error)
274 goto bad;
275 }
276 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
277 if (!error)
278 return (0);
279 bad:
280 m_freem(m);
281 return (error);
282 }
283
284 /*
285 * Attach file to the bpf interface, i.e. make d listen on bp.
286 * Must be called at splimp.
287 */
288 static void
289 bpf_attachd(d, bp)
290 struct bpf_d *d;
291 struct bpf_if *bp;
292 {
293 /*
294 * Point d at bp, and add d to the interface's list of listeners.
295 * Finally, point the driver's bpf cookie at the interface so
296 * it will divert packets to bpf.
297 */
298 d->bd_bif = bp;
299 d->bd_next = bp->bif_dlist;
300 bp->bif_dlist = d;
301
302 bp->bif_ifp->if_bpf = bp;
303 }
304
305 /*
306 * Detach a file from its interface.
307 */
308 static void
309 bpf_detachd(d)
310 struct bpf_d *d;
311 {
312 struct bpf_d **p;
313 struct bpf_if *bp;
314
315 bp = d->bd_bif;
316 /*
317 * Check if this descriptor had requested promiscuous mode.
318 * If so, turn it off.
319 */
320 if (d->bd_promisc) {
321 d->bd_promisc = 0;
322 if (ifpromisc(bp->bif_ifp, 0))
323 /*
324 * Something is really wrong if we were able to put
325 * the driver into promiscuous mode, but can't
326 * take it out.
327 */
328 panic("bpf: ifpromisc failed");
329 }
330 /* Remove d from the interface's descriptor list. */
331 p = &bp->bif_dlist;
332 while (*p != d) {
333 p = &(*p)->bd_next;
334 if (*p == 0)
335 panic("bpf_detachd: descriptor not in list");
336 }
337 *p = (*p)->bd_next;
338 if (bp->bif_dlist == 0)
339 /*
340 * Let the driver know that there are no more listeners.
341 */
342 d->bd_bif->bif_ifp->if_bpf = 0;
343 d->bd_bif = 0;
344 }
345
346 /*
347 * Open ethernet device. Returns ENXIO for illegal minor device number,
348 * EBUSY if file is open by another process.
349 */
350 /* ARGSUSED */
351 static int
352 bpfopen(dev, flags, fmt, p)
353 dev_t dev;
354 int flags;
355 int fmt;
356 struct proc *p;
357 {
358 register struct bpf_d *d;
359
360 if (p->p_prison)
361 return (EPERM);
362
363 d = dev->si_drv1;
364 /*
365 * Each minor can be opened by only one process. If the requested
366 * minor is in use, return EBUSY.
367 */
368 if (d)
369 return (EBUSY);
370 make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev));
371 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
372 dev->si_drv1 = d;
373 d->bd_bufsize = bpf_bufsize;
374 d->bd_sig = SIGIO;
375 d->bd_seesent = 1;
376 callout_init(&d->bd_callout);
377 return (0);
378 }
379
380 /*
381 * Close the descriptor by detaching it from its interface,
382 * deallocating its buffers, and marking it free.
383 */
384 /* ARGSUSED */
385 static int
386 bpfclose(dev, flags, fmt, p)
387 dev_t dev;
388 int flags;
389 int fmt;
390 struct proc *p;
391 {
392 register struct bpf_d *d = dev->si_drv1;
393 register int s;
394
395 funsetown(d->bd_sigio);
396 s = splimp();
397 if (d->bd_state == BPF_WAITING)
398 callout_stop(&d->bd_callout);
399 d->bd_state = BPF_IDLE;
400 if (d->bd_bif)
401 bpf_detachd(d);
402 splx(s);
403 bpf_freed(d);
404 dev->si_drv1 = 0;
405 free(d, M_BPF);
406
407 return (0);
408 }
409
410 /*
411 * Support for SunOS, which does not have tsleep.
412 */
413 #if BSD < 199103
414 static
415 bpf_timeout(arg)
416 caddr_t arg;
417 {
418 struct bpf_d *d = (struct bpf_d *)arg;
419 d->bd_timedout = 1;
420 wakeup(arg);
421 }
422
423 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
424
425 int
426 bpf_sleep(d)
427 register struct bpf_d *d;
428 {
429 register int rto = d->bd_rtout;
430 register int st;
431
432 if (rto != 0) {
433 d->bd_timedout = 0;
434 timeout(bpf_timeout, (caddr_t)d, rto);
435 }
436 st = sleep((caddr_t)d, PRINET|PCATCH);
437 if (rto != 0) {
438 if (d->bd_timedout == 0)
439 untimeout(bpf_timeout, (caddr_t)d);
440 else if (st == 0)
441 return EWOULDBLOCK;
442 }
443 return (st != 0) ? EINTR : 0;
444 }
445 #else
446 #define BPF_SLEEP tsleep
447 #endif
448
449 /*
450 * Rotate the packet buffers in descriptor d. Move the store buffer
451 * into the hold slot, and the free buffer into the store slot.
452 * Zero the length of the new store buffer.
453 */
454 #define ROTATE_BUFFERS(d) \
455 (d)->bd_hbuf = (d)->bd_sbuf; \
456 (d)->bd_hlen = (d)->bd_slen; \
457 (d)->bd_sbuf = (d)->bd_fbuf; \
458 (d)->bd_slen = 0; \
459 (d)->bd_fbuf = 0;
460 /*
461 * bpfread - read next chunk of packets from buffers
462 */
463 static int
464 bpfread(dev, uio, ioflag)
465 dev_t dev;
466 register struct uio *uio;
467 int ioflag;
468 {
469 register struct bpf_d *d = dev->si_drv1;
470 int timed_out;
471 int error;
472 int s;
473
474 /*
475 * Restrict application to use a buffer the same size as
476 * as kernel buffers.
477 */
478 if (uio->uio_resid != d->bd_bufsize)
479 return (EINVAL);
480
481 s = splimp();
482 if (d->bd_state == BPF_WAITING)
483 callout_stop(&d->bd_callout);
484 timed_out = (d->bd_state == BPF_TIMED_OUT);
485 d->bd_state = BPF_IDLE;
486 /*
487 * If the hold buffer is empty, then do a timed sleep, which
488 * ends when the timeout expires or when enough packets
489 * have arrived to fill the store buffer.
490 */
491 while (d->bd_hbuf == 0) {
492 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
493 /*
494 * A packet(s) either arrived since the previous
495 * read or arrived while we were asleep.
496 * Rotate the buffers and return what's here.
497 */
498 ROTATE_BUFFERS(d);
499 break;
500 }
501
502 /*
503 * No data is available, check to see if the bpf device
504 * is still pointed at a real interface. If not, return
505 * ENXIO so that the userland process knows to rebind
506 * it before using it again.
507 */
508 if (d->bd_bif == NULL) {
509 splx(s);
510 return (ENXIO);
511 }
512
513 if (ioflag & IO_NDELAY) {
514 splx(s);
515 return (EWOULDBLOCK);
516 }
517 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
518 d->bd_rtout);
519 if (error == EINTR || error == ERESTART) {
520 splx(s);
521 return (error);
522 }
523 if (error == EWOULDBLOCK) {
524 /*
525 * On a timeout, return what's in the buffer,
526 * which may be nothing. If there is something
527 * in the store buffer, we can rotate the buffers.
528 */
529 if (d->bd_hbuf)
530 /*
531 * We filled up the buffer in between
532 * getting the timeout and arriving
533 * here, so we don't need to rotate.
534 */
535 break;
536
537 if (d->bd_slen == 0) {
538 splx(s);
539 return (0);
540 }
541 ROTATE_BUFFERS(d);
542 break;
543 }
544 }
545 /*
546 * At this point, we know we have something in the hold slot.
547 */
548 splx(s);
549
550 /*
551 * Move data from hold buffer into user space.
552 * We know the entire buffer is transferred since
553 * we checked above that the read buffer is bpf_bufsize bytes.
554 */
555 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
556
557 s = splimp();
558 d->bd_fbuf = d->bd_hbuf;
559 d->bd_hbuf = 0;
560 d->bd_hlen = 0;
561 splx(s);
562
563 return (error);
564 }
565
566
567 /*
568 * If there are processes sleeping on this descriptor, wake them up.
569 */
570 static inline void
571 bpf_wakeup(d)
572 register struct bpf_d *d;
573 {
574 if (d->bd_state == BPF_WAITING) {
575 callout_stop(&d->bd_callout);
576 d->bd_state = BPF_IDLE;
577 }
578 wakeup((caddr_t)d);
579 if (d->bd_async && d->bd_sig && d->bd_sigio)
580 pgsigio(d->bd_sigio, d->bd_sig, 0);
581
582 #if BSD >= 199103
583 selwakeup(&d->bd_sel);
584 /* XXX */
585 d->bd_sel.si_pid = 0;
586 KNOTE(&d->bd_sel.si_note, 0);
587 #else
588 if (d->bd_selproc) {
589 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
590 d->bd_selcoll = 0;
591 d->bd_selproc = 0;
592 }
593 #endif
594 }
595
596 static void
597 bpf_timed_out(arg)
598 void *arg;
599 {
600 struct bpf_d *d = (struct bpf_d *)arg;
601 int s;
602
603 s = splimp();
604 if (d->bd_state == BPF_WAITING) {
605 d->bd_state = BPF_TIMED_OUT;
606 if (d->bd_slen != 0)
607 bpf_wakeup(d);
608 }
609 splx(s);
610 }
611
612 static int
613 bpfwrite(dev, uio, ioflag)
614 dev_t dev;
615 struct uio *uio;
616 int ioflag;
617 {
618 register struct bpf_d *d = dev->si_drv1;
619 struct ifnet *ifp;
620 struct mbuf *m;
621 int error, s;
622 static struct sockaddr dst;
623 int datlen;
624
625 if (d->bd_bif == 0)
626 return (ENXIO);
627
628 ifp = d->bd_bif->bif_ifp;
629
630 if (uio->uio_resid == 0)
631 return (0);
632
633 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
634 if (error)
635 return (error);
636
637 if (datlen > ifp->if_mtu)
638 return (EMSGSIZE);
639
640 if (d->bd_hdrcmplt)
641 dst.sa_family = pseudo_AF_HDRCMPLT;
642
643 s = splnet();
644 #if BSD >= 199103
645 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
646 #else
647 error = (*ifp->if_output)(ifp, m, &dst);
648 #endif
649 splx(s);
650 /*
651 * The driver frees the mbuf.
652 */
653 return (error);
654 }
655
656 /*
657 * Reset a descriptor by flushing its packet buffer and clearing the
658 * receive and drop counts. Should be called at splimp.
659 */
660 static void
661 reset_d(d)
662 struct bpf_d *d;
663 {
664 if (d->bd_hbuf) {
665 /* Free the hold buffer. */
666 d->bd_fbuf = d->bd_hbuf;
667 d->bd_hbuf = 0;
668 }
669 d->bd_slen = 0;
670 d->bd_hlen = 0;
671 d->bd_rcount = 0;
672 d->bd_dcount = 0;
673 }
674
675 /*
676 * FIONREAD Check for read packet available.
677 * SIOCGIFADDR Get interface address - convenient hook to driver.
678 * BIOCGBLEN Get buffer len [for read()].
679 * BIOCSETF Set ethernet read filter.
680 * BIOCFLUSH Flush read packet buffer.
681 * BIOCPROMISC Put interface into promiscuous mode.
682 * BIOCGDLT Get link layer type.
683 * BIOCGETIF Get interface name.
684 * BIOCSETIF Set interface.
685 * BIOCSRTIMEOUT Set read timeout.
686 * BIOCGRTIMEOUT Get read timeout.
687 * BIOCGSTATS Get packet stats.
688 * BIOCIMMEDIATE Set immediate mode.
689 * BIOCVERSION Get filter language version.
690 * BIOCGHDRCMPLT Get "header already complete" flag
691 * BIOCSHDRCMPLT Set "header already complete" flag
692 * BIOCGSEESENT Get "see packets sent" flag
693 * BIOCSSEESENT Set "see packets sent" flag
694 */
695 /* ARGSUSED */
696 static int
697 bpfioctl(dev, cmd, addr, flags, p)
698 dev_t dev;
699 u_long cmd;
700 caddr_t addr;
701 int flags;
702 struct proc *p;
703 {
704 register struct bpf_d *d = dev->si_drv1;
705 int s, error = 0;
706
707 s = splimp();
708 if (d->bd_state == BPF_WAITING)
709 callout_stop(&d->bd_callout);
710 d->bd_state = BPF_IDLE;
711 splx(s);
712
713 switch (cmd) {
714
715 default:
716 error = EINVAL;
717 break;
718
719 /*
720 * Check for read packet available.
721 */
722 case FIONREAD:
723 {
724 int n;
725
726 s = splimp();
727 n = d->bd_slen;
728 if (d->bd_hbuf)
729 n += d->bd_hlen;
730 splx(s);
731
732 *(int *)addr = n;
733 break;
734 }
735
736 case SIOCGIFADDR:
737 {
738 struct ifnet *ifp;
739
740 if (d->bd_bif == 0)
741 error = EINVAL;
742 else {
743 ifp = d->bd_bif->bif_ifp;
744 error = (*ifp->if_ioctl)(ifp, cmd, addr);
745 }
746 break;
747 }
748
749 /*
750 * Get buffer len [for read()].
751 */
752 case BIOCGBLEN:
753 *(u_int *)addr = d->bd_bufsize;
754 break;
755
756 /*
757 * Set buffer length.
758 */
759 case BIOCSBLEN:
760 #if BSD < 199103
761 error = EINVAL;
762 #else
763 if (d->bd_bif != 0)
764 error = EINVAL;
765 else {
766 register u_int size = *(u_int *)addr;
767
768 if (size > bpf_maxbufsize)
769 *(u_int *)addr = size = bpf_maxbufsize;
770 else if (size < BPF_MINBUFSIZE)
771 *(u_int *)addr = size = BPF_MINBUFSIZE;
772 d->bd_bufsize = size;
773 }
774 #endif
775 break;
776
777 /*
778 * Set link layer read filter.
779 */
780 case BIOCSETF:
781 error = bpf_setf(d, (struct bpf_program *)addr);
782 break;
783
784 /*
785 * Flush read packet buffer.
786 */
787 case BIOCFLUSH:
788 s = splimp();
789 reset_d(d);
790 splx(s);
791 break;
792
793 /*
794 * Put interface into promiscuous mode.
795 */
796 case BIOCPROMISC:
797 if (d->bd_bif == 0) {
798 /*
799 * No interface attached yet.
800 */
801 error = EINVAL;
802 break;
803 }
804 s = splimp();
805 if (d->bd_promisc == 0) {
806 error = ifpromisc(d->bd_bif->bif_ifp, 1);
807 if (error == 0)
808 d->bd_promisc = 1;
809 }
810 splx(s);
811 break;
812
813 /*
814 * Get device parameters.
815 */
816 case BIOCGDLT:
817 if (d->bd_bif == 0)
818 error = EINVAL;
819 else
820 *(u_int *)addr = d->bd_bif->bif_dlt;
821 break;
822
823 /*
824 * Get interface name.
825 */
826 case BIOCGETIF:
827 if (d->bd_bif == 0)
828 error = EINVAL;
829 else {
830 struct ifnet *const ifp = d->bd_bif->bif_ifp;
831 struct ifreq *const ifr = (struct ifreq *)addr;
832
833 snprintf(ifr->ifr_name, sizeof(ifr->ifr_name),
834 "%s%d", ifp->if_name, ifp->if_unit);
835 }
836 break;
837
838 /*
839 * Set interface.
840 */
841 case BIOCSETIF:
842 error = bpf_setif(d, (struct ifreq *)addr);
843 break;
844
845 /*
846 * Set read timeout.
847 */
848 case BIOCSRTIMEOUT:
849 {
850 struct timeval *tv = (struct timeval *)addr;
851
852 /*
853 * Subtract 1 tick from tvtohz() since this isn't
854 * a one-shot timer.
855 */
856 if ((error = itimerfix(tv)) == 0)
857 d->bd_rtout = tvtohz(tv) - 1;
858 break;
859 }
860
861 /*
862 * Get read timeout.
863 */
864 case BIOCGRTIMEOUT:
865 {
866 struct timeval *tv = (struct timeval *)addr;
867
868 tv->tv_sec = d->bd_rtout / hz;
869 tv->tv_usec = (d->bd_rtout % hz) * tick;
870 break;
871 }
872
873 /*
874 * Get packet stats.
875 */
876 case BIOCGSTATS:
877 {
878 struct bpf_stat *bs = (struct bpf_stat *)addr;
879
880 bs->bs_recv = d->bd_rcount;
881 bs->bs_drop = d->bd_dcount;
882 break;
883 }
884
885 /*
886 * Set immediate mode.
887 */
888 case BIOCIMMEDIATE:
889 d->bd_immediate = *(u_int *)addr;
890 break;
891
892 case BIOCVERSION:
893 {
894 struct bpf_version *bv = (struct bpf_version *)addr;
895
896 bv->bv_major = BPF_MAJOR_VERSION;
897 bv->bv_minor = BPF_MINOR_VERSION;
898 break;
899 }
900
901 /*
902 * Get "header already complete" flag
903 */
904 case BIOCGHDRCMPLT:
905 *(u_int *)addr = d->bd_hdrcmplt;
906 break;
907
908 /*
909 * Set "header already complete" flag
910 */
911 case BIOCSHDRCMPLT:
912 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
913 break;
914
915 /*
916 * Get "see sent packets" flag
917 */
918 case BIOCGSEESENT:
919 *(u_int *)addr = d->bd_seesent;
920 break;
921
922 /*
923 * Set "see sent packets" flag
924 */
925 case BIOCSSEESENT:
926 d->bd_seesent = *(u_int *)addr;
927 break;
928
929 case FIONBIO: /* Non-blocking I/O */
930 break;
931
932 case FIOASYNC: /* Send signal on receive packets */
933 d->bd_async = *(int *)addr;
934 break;
935
936 case FIOSETOWN:
937 error = fsetown(*(int *)addr, &d->bd_sigio);
938 break;
939
940 case FIOGETOWN:
941 *(int *)addr = fgetown(d->bd_sigio);
942 break;
943
944 /* This is deprecated, FIOSETOWN should be used instead. */
945 case TIOCSPGRP:
946 error = fsetown(-(*(int *)addr), &d->bd_sigio);
947 break;
948
949 /* This is deprecated, FIOGETOWN should be used instead. */
950 case TIOCGPGRP:
951 *(int *)addr = -fgetown(d->bd_sigio);
952 break;
953
954 case BIOCSRSIG: /* Set receive signal */
955 {
956 u_int sig;
957
958 sig = *(u_int *)addr;
959
960 if (sig >= NSIG)
961 error = EINVAL;
962 else
963 d->bd_sig = sig;
964 break;
965 }
966 case BIOCGRSIG:
967 *(u_int *)addr = d->bd_sig;
968 break;
969 }
970 return (error);
971 }
972
973 /*
974 * Set d's packet filter program to fp. If this file already has a filter,
975 * free it and replace it. Returns EINVAL for bogus requests.
976 */
977 static int
978 bpf_setf(d, fp)
979 struct bpf_d *d;
980 struct bpf_program *fp;
981 {
982 struct bpf_insn *fcode, *old;
983 u_int flen, size;
984 int s;
985
986 old = d->bd_filter;
987 if (fp->bf_insns == 0) {
988 if (fp->bf_len != 0)
989 return (EINVAL);
990 s = splimp();
991 d->bd_filter = 0;
992 reset_d(d);
993 splx(s);
994 if (old != 0)
995 free((caddr_t)old, M_BPF);
996 return (0);
997 }
998 flen = fp->bf_len;
999 if (flen > BPF_MAXINSNS)
1000 return (EINVAL);
1001
1002 size = flen * sizeof(*fp->bf_insns);
1003 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
1004 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
1005 bpf_validate(fcode, (int)flen)) {
1006 s = splimp();
1007 d->bd_filter = fcode;
1008 reset_d(d);
1009 splx(s);
1010 if (old != 0)
1011 free((caddr_t)old, M_BPF);
1012
1013 return (0);
1014 }
1015 free((caddr_t)fcode, M_BPF);
1016 return (EINVAL);
1017 }
1018
1019 /*
1020 * Detach a file from its current interface (if attached at all) and attach
1021 * to the interface indicated by the name stored in ifr.
1022 * Return an errno or 0.
1023 */
1024 static int
1025 bpf_setif(d, ifr)
1026 struct bpf_d *d;
1027 struct ifreq *ifr;
1028 {
1029 struct bpf_if *bp;
1030 int s, error;
1031 struct ifnet *theywant;
1032
1033 theywant = ifunit(ifr->ifr_name);
1034 if (theywant == 0)
1035 return ENXIO;
1036
1037 /*
1038 * Look through attached interfaces for the named one.
1039 */
1040 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1041 struct ifnet *ifp = bp->bif_ifp;
1042
1043 if (ifp == 0 || ifp != theywant)
1044 continue;
1045 /*
1046 * We found the requested interface.
1047 * If it's not up, return an error.
1048 * Allocate the packet buffers if we need to.
1049 * If we're already attached to requested interface,
1050 * just flush the buffer.
1051 */
1052 if ((ifp->if_flags & IFF_UP) == 0)
1053 return (ENETDOWN);
1054
1055 if (d->bd_sbuf == 0) {
1056 error = bpf_allocbufs(d);
1057 if (error != 0)
1058 return (error);
1059 }
1060 s = splimp();
1061 if (bp != d->bd_bif) {
1062 if (d->bd_bif)
1063 /*
1064 * Detach if attached to something else.
1065 */
1066 bpf_detachd(d);
1067
1068 bpf_attachd(d, bp);
1069 }
1070 reset_d(d);
1071 splx(s);
1072 return (0);
1073 }
1074 /* Not found. */
1075 return (ENXIO);
1076 }
1077
1078 /*
1079 * Support for select() and poll() system calls
1080 *
1081 * Return true iff the specific operation will not block indefinitely.
1082 * Otherwise, return false but make a note that a selwakeup() must be done.
1083 */
1084 int
1085 bpfpoll(dev, events, p)
1086 register dev_t dev;
1087 int events;
1088 struct proc *p;
1089 {
1090 register struct bpf_d *d;
1091 register int s;
1092 int revents;
1093
1094 d = dev->si_drv1;
1095 if (d->bd_bif == NULL)
1096 return (ENXIO);
1097
1098 revents = events & (POLLOUT | POLLWRNORM);
1099 s = splimp();
1100 if (events & (POLLIN | POLLRDNORM)) {
1101 if (bpf_ready(d))
1102 revents |= events & (POLLIN | POLLRDNORM);
1103 else {
1104 selrecord(p, &d->bd_sel);
1105 /* Start the read timeout if necessary. */
1106 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1107 callout_reset(&d->bd_callout, d->bd_rtout,
1108 bpf_timed_out, d);
1109 d->bd_state = BPF_WAITING;
1110 }
1111 }
1112 }
1113 splx(s);
1114 return (revents);
1115 }
1116
1117 /*
1118 * Support for kqueue(2).
1119 */
1120 int
1121 bpfkqfilter(dev, kn)
1122 dev_t dev;
1123 struct knote *kn;
1124 {
1125 struct bpf_d *d = (struct bpf_d *)dev->si_drv1;
1126 int s;
1127
1128 if (kn->kn_filter != EVFILT_READ)
1129 return (1);
1130
1131 kn->kn_fop = &bpfread_filtops;
1132 kn->kn_hook = (caddr_t)d;
1133 s = splimp();
1134 SLIST_INSERT_HEAD(&d->bd_sel.si_note, kn, kn_selnext);
1135 splx(s);
1136
1137 return (0);
1138 }
1139
1140 static void
1141 filt_bpfdetach(kn)
1142 struct knote *kn;
1143 {
1144 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1145 int s;
1146
1147 s = splimp();
1148 SLIST_REMOVE(&d->bd_sel.si_note, kn, knote, kn_selnext);
1149 splx(s);
1150 }
1151
1152 static int
1153 filt_bpfread(kn, hint)
1154 struct knote *kn;
1155 long hint;
1156 {
1157 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1158 int s, ready;
1159
1160 s = splimp();
1161 ready = bpf_ready(d);
1162 if (ready) {
1163 kn->kn_data = d->bd_slen;
1164 if (d->bd_hbuf)
1165 kn->kn_data += d->bd_hlen;
1166 }
1167 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1168 callout_reset(&d->bd_callout, d->bd_rtout,
1169 bpf_timed_out, d);
1170 d->bd_state = BPF_WAITING;
1171 }
1172 splx(s);
1173
1174 return (ready);
1175 }
1176
1177 /*
1178 * Incoming linkage from device drivers. Process the packet pkt, of length
1179 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1180 * by each process' filter, and if accepted, stashed into the corresponding
1181 * buffer.
1182 */
1183 void
1184 bpf_tap(ifp, pkt, pktlen)
1185 struct ifnet *ifp;
1186 register u_char *pkt;
1187 register u_int pktlen;
1188 {
1189 struct bpf_if *bp;
1190 register struct bpf_d *d;
1191 register u_int slen;
1192 int gottime;
1193 struct timeval tv;
1194 /*
1195 * Note that the ipl does not have to be raised at this point.
1196 * The only problem that could arise here is that if two different
1197 * interfaces shared any data. This is not the case.
1198 */
1199 gottime = 0;
1200 bp = ifp->if_bpf;
1201 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1202 ++d->bd_rcount;
1203 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1204 if (slen != 0) {
1205 if (!gottime) {
1206 microtime(&tv);
1207 gottime = 1;
1208 }
1209 catchpacket(d, pkt, pktlen, slen, bcopy, &tv);
1210 }
1211 }
1212 }
1213
1214 /*
1215 * Copy data from an mbuf chain into a buffer. This code is derived
1216 * from m_copydata in sys/uipc_mbuf.c.
1217 */
1218 static void
1219 bpf_mcopy(src_arg, dst_arg, len)
1220 const void *src_arg;
1221 void *dst_arg;
1222 register size_t len;
1223 {
1224 register const struct mbuf *m;
1225 register u_int count;
1226 u_char *dst;
1227
1228 m = src_arg;
1229 dst = dst_arg;
1230 while (len > 0) {
1231 if (m == 0)
1232 panic("bpf_mcopy");
1233 count = min(m->m_len, len);
1234 bcopy(mtod(m, void *), dst, count);
1235 m = m->m_next;
1236 dst += count;
1237 len -= count;
1238 }
1239 }
1240
1241 /*
1242 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1243 */
1244 void
1245 bpf_mtap(ifp, m)
1246 struct ifnet *ifp;
1247 struct mbuf *m;
1248 {
1249 struct bpf_if *bp = ifp->if_bpf;
1250 struct bpf_d *d;
1251 u_int pktlen, slen;
1252 struct mbuf *m0;
1253 int gottime;
1254 struct timeval tv;
1255
1256 gottime = 0;
1257
1258 pktlen = 0;
1259 for (m0 = m; m0 != 0; m0 = m0->m_next)
1260 pktlen += m0->m_len;
1261
1262 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1263 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1264 continue;
1265 ++d->bd_rcount;
1266 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1267 if (slen != 0) {
1268 if (!gottime) {
1269 microtime(&tv);
1270 gottime = 1;
1271 }
1272 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy, &tv);
1273 }
1274 }
1275 }
1276
1277 /*
1278 * Move the packet data from interface memory (pkt) into the
1279 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1280 * otherwise 0. "copy" is the routine called to do the actual data
1281 * transfer. bcopy is passed in to copy contiguous chunks, while
1282 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1283 * pkt is really an mbuf.
1284 */
1285 static void
1286 catchpacket(d, pkt, pktlen, snaplen, cpfn, tv)
1287 register struct bpf_d *d;
1288 register u_char *pkt;
1289 register u_int pktlen, snaplen;
1290 register void (*cpfn) __P((const void *, void *, size_t));
1291 struct timeval *tv;
1292 {
1293 register struct bpf_hdr *hp;
1294 register int totlen, curlen;
1295 register int hdrlen = d->bd_bif->bif_hdrlen;
1296 /*
1297 * Figure out how many bytes to move. If the packet is
1298 * greater or equal to the snapshot length, transfer that
1299 * much. Otherwise, transfer the whole packet (unless
1300 * we hit the buffer size limit).
1301 */
1302 totlen = hdrlen + min(snaplen, pktlen);
1303 if (totlen > d->bd_bufsize)
1304 totlen = d->bd_bufsize;
1305
1306 /*
1307 * Round up the end of the previous packet to the next longword.
1308 */
1309 curlen = BPF_WORDALIGN(d->bd_slen);
1310 if (curlen + totlen > d->bd_bufsize) {
1311 /*
1312 * This packet will overflow the storage buffer.
1313 * Rotate the buffers if we can, then wakeup any
1314 * pending reads.
1315 */
1316 if (d->bd_fbuf == 0) {
1317 /*
1318 * We haven't completed the previous read yet,
1319 * so drop the packet.
1320 */
1321 ++d->bd_dcount;
1322 return;
1323 }
1324 ROTATE_BUFFERS(d);
1325 bpf_wakeup(d);
1326 curlen = 0;
1327 }
1328 else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1329 /*
1330 * Immediate mode is set, or the read timeout has
1331 * already expired during a select call. A packet
1332 * arrived, so the reader should be woken up.
1333 */
1334 bpf_wakeup(d);
1335
1336 /*
1337 * Append the bpf header.
1338 */
1339 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1340 hp->bh_tstamp = *tv;
1341 hp->bh_datalen = pktlen;
1342 hp->bh_hdrlen = hdrlen;
1343 /*
1344 * Copy the packet data into the store buffer and update its length.
1345 */
1346 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1347 d->bd_slen = curlen + totlen;
1348 }
1349
1350 /*
1351 * Initialize all nonzero fields of a descriptor.
1352 */
1353 static int
1354 bpf_allocbufs(d)
1355 register struct bpf_d *d;
1356 {
1357 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1358 if (d->bd_fbuf == 0)
1359 return (ENOBUFS);
1360
1361 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1362 if (d->bd_sbuf == 0) {
1363 free(d->bd_fbuf, M_BPF);
1364 return (ENOBUFS);
1365 }
1366 d->bd_slen = 0;
1367 d->bd_hlen = 0;
1368 return (0);
1369 }
1370
1371 /*
1372 * Free buffers currently in use by a descriptor.
1373 * Called on close.
1374 */
1375 static void
1376 bpf_freed(d)
1377 register struct bpf_d *d;
1378 {
1379 /*
1380 * We don't need to lock out interrupts since this descriptor has
1381 * been detached from its interface and it yet hasn't been marked
1382 * free.
1383 */
1384 if (d->bd_sbuf != 0) {
1385 free(d->bd_sbuf, M_BPF);
1386 if (d->bd_hbuf != 0)
1387 free(d->bd_hbuf, M_BPF);
1388 if (d->bd_fbuf != 0)
1389 free(d->bd_fbuf, M_BPF);
1390 }
1391 if (d->bd_filter)
1392 free((caddr_t)d->bd_filter, M_BPF);
1393 }
1394
1395 /*
1396 * Attach an interface to bpf. ifp is a pointer to the structure
1397 * defining the interface to be attached, dlt is the link layer type,
1398 * and hdrlen is the fixed size of the link header (variable length
1399 * headers are not yet supporrted).
1400 */
1401 void
1402 bpfattach(ifp, dlt, hdrlen)
1403 struct ifnet *ifp;
1404 u_int dlt, hdrlen;
1405 {
1406 struct bpf_if *bp;
1407 bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_DONTWAIT | M_ZERO);
1408 if (bp == 0)
1409 panic("bpfattach");
1410
1411 bp->bif_ifp = ifp;
1412 bp->bif_dlt = dlt;
1413
1414 bp->bif_next = bpf_iflist;
1415 bpf_iflist = bp;
1416
1417 bp->bif_ifp->if_bpf = 0;
1418
1419 /*
1420 * Compute the length of the bpf header. This is not necessarily
1421 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1422 * that the network layer header begins on a longword boundary (for
1423 * performance reasons and to alleviate alignment restrictions).
1424 */
1425 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1426
1427 if (bootverbose)
1428 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1429 }
1430
1431 /*
1432 * Detach bpf from an interface. This involves detaching each descriptor
1433 * associated with the interface, and leaving bd_bif NULL. Notify each
1434 * descriptor as it's detached so that any sleepers wake up and get
1435 * ENXIO.
1436 */
1437 void
1438 bpfdetach(ifp)
1439 struct ifnet *ifp;
1440 {
1441 struct bpf_if *bp, *bp_prev;
1442 struct bpf_d *d;
1443 int s;
1444
1445 s = splimp();
1446
1447 /* Locate BPF interface information */
1448 bp_prev = NULL;
1449 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1450 if (ifp == bp->bif_ifp)
1451 break;
1452 bp_prev = bp;
1453 }
1454
1455 /* Interface wasn't attached */
1456 if ((bp == NULL) || (bp->bif_ifp == NULL)) {
1457 splx(s);
1458 printf("bpfdetach: %s%d was not attached\n", ifp->if_name,
1459 ifp->if_unit);
1460 return;
1461 }
1462
1463 while ((d = bp->bif_dlist) != NULL) {
1464 bpf_detachd(d);
1465 bpf_wakeup(d);
1466 }
1467
1468 if (bp_prev) {
1469 bp_prev->bif_next = bp->bif_next;
1470 } else {
1471 bpf_iflist = bp->bif_next;
1472 }
1473
1474 free(bp, M_BPF);
1475
1476 splx(s);
1477 }
1478
1479 static void bpf_drvinit __P((void *unused));
1480
1481 static void
1482 bpf_drvinit(unused)
1483 void *unused;
1484 {
1485
1486 cdevsw_add(&bpf_cdevsw);
1487 }
1488
1489 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1490
1491 #else /* !BPF */
1492 /*
1493 * NOP stubs to allow bpf-using drivers to load and function.
1494 *
1495 * A 'better' implementation would allow the core bpf functionality
1496 * to be loaded at runtime.
1497 */
1498
1499 void
1500 bpf_tap(ifp, pkt, pktlen)
1501 struct ifnet *ifp;
1502 register u_char *pkt;
1503 register u_int pktlen;
1504 {
1505 }
1506
1507 void
1508 bpf_mtap(ifp, m)
1509 struct ifnet *ifp;
1510 struct mbuf *m;
1511 {
1512 }
1513
1514 void
1515 bpfattach(ifp, dlt, hdrlen)
1516 struct ifnet *ifp;
1517 u_int dlt, hdrlen;
1518 {
1519 }
1520
1521 void
1522 bpfdetach(ifp)
1523 struct ifnet *ifp;
1524 {
1525 }
1526
1527 u_int
1528 bpf_filter(pc, p, wirelen, buflen)
1529 register const struct bpf_insn *pc;
1530 register u_char *p;
1531 u_int wirelen;
1532 register u_int buflen;
1533 {
1534 return -1; /* "no filter" behaviour */
1535 }
1536
1537 #endif /* !BPF */
Cache object: 51979ab150769f2da5135bb938c45b97
|