FreeBSD/Linux Kernel Cross Reference
sys/net/bpf.c
1 /* $NetBSD: bpf.c,v 1.141.6.2 2011/04/05 06:10:50 riz Exp $ */
2
3 /*
4 * Copyright (c) 1990, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
37 * static char rcsid[] =
38 * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp ";
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.141.6.2 2011/04/05 06:10:50 riz Exp $");
43
44 #if defined(_KERNEL_OPT)
45 #include "opt_bpf.h"
46 #include "sl.h"
47 #include "strip.h"
48 #endif
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/buf.h>
54 #include <sys/time.h>
55 #include <sys/proc.h>
56 #include <sys/user.h>
57 #include <sys/ioctl.h>
58 #include <sys/conf.h>
59 #include <sys/vnode.h>
60 #include <sys/queue.h>
61
62 #include <sys/file.h>
63 #include <sys/filedesc.h>
64 #include <sys/tty.h>
65 #include <sys/uio.h>
66
67 #include <sys/protosw.h>
68 #include <sys/socket.h>
69 #include <sys/errno.h>
70 #include <sys/kernel.h>
71 #include <sys/poll.h>
72 #include <sys/sysctl.h>
73 #include <sys/kauth.h>
74
75 #include <net/if.h>
76 #include <net/slip.h>
77
78 #include <net/bpf.h>
79 #include <net/bpfdesc.h>
80
81 #include <net/if_arc.h>
82 #include <net/if_ether.h>
83
84 #include <netinet/in.h>
85 #include <netinet/if_inarp.h>
86
87
88 #include <compat/sys/sockio.h>
89
90 #ifndef BPF_BUFSIZE
91 /*
92 * 4096 is too small for FDDI frames. 8192 is too small for gigabit Ethernet
93 * jumbos (circa 9k), ATM, or Intel gig/10gig ethernet jumbos (16k).
94 */
95 # define BPF_BUFSIZE 32768
96 #endif
97
98 #define PRINET 26 /* interruptible */
99
100 /*
101 * The default read buffer size, and limit for BIOCSBLEN, is sysctl'able.
102 * XXX the default values should be computed dynamically based
103 * on available memory size and available mbuf clusters.
104 */
105 int bpf_bufsize = BPF_BUFSIZE;
106 int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */
107
108
109 /*
110 * Global BPF statistics returned by net.bpf.stats sysctl.
111 */
112 struct bpf_stat bpf_gstats;
113
114 /*
115 * Use a mutex to avoid a race condition between gathering the stats/peers
116 * and opening/closing the device.
117 */
118 static kmutex_t bpf_mtx;
119
120 /*
121 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
122 * bpf_dtab holds the descriptors, indexed by minor device #
123 */
124 struct bpf_if *bpf_iflist;
125 LIST_HEAD(, bpf_d) bpf_list;
126
127 static int bpf_allocbufs(struct bpf_d *);
128 static void bpf_deliver(struct bpf_if *,
129 void *(*cpfn)(void *, const void *, size_t),
130 void *, u_int, u_int, struct ifnet *);
131 static void bpf_freed(struct bpf_d *);
132 static void bpf_ifname(struct ifnet *, struct ifreq *);
133 static void *bpf_mcpy(void *, const void *, size_t);
134 static int bpf_movein(struct uio *, int, int,
135 struct mbuf **, struct sockaddr *);
136 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
137 static void bpf_detachd(struct bpf_d *);
138 static int bpf_setif(struct bpf_d *, struct ifreq *);
139 static void bpf_timed_out(void *);
140 static inline void
141 bpf_wakeup(struct bpf_d *);
142 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
143 void *(*)(void *, const void *, size_t), struct timeval *);
144 static void reset_d(struct bpf_d *);
145 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
146 static int bpf_setdlt(struct bpf_d *, u_int);
147
148 static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t,
149 int);
150 static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t,
151 int);
152 static int bpf_ioctl(struct file *, u_long, void *);
153 static int bpf_poll(struct file *, int);
154 static int bpf_close(struct file *);
155 static int bpf_kqfilter(struct file *, struct knote *);
156 static void bpf_softintr(void *);
157
158 static const struct fileops bpf_fileops = {
159 .fo_read = bpf_read,
160 .fo_write = bpf_write,
161 .fo_ioctl = bpf_ioctl,
162 .fo_fcntl = fnullop_fcntl,
163 .fo_poll = bpf_poll,
164 .fo_stat = fbadop_stat,
165 .fo_close = bpf_close,
166 .fo_kqfilter = bpf_kqfilter,
167 .fo_drain = fnullop_drain,
168 };
169
170 dev_type_open(bpfopen);
171
172 const struct cdevsw bpf_cdevsw = {
173 bpfopen, noclose, noread, nowrite, noioctl,
174 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER
175 };
176
177 static int
178 bpf_movein(struct uio *uio, int linktype, int mtu, struct mbuf **mp,
179 struct sockaddr *sockp)
180 {
181 struct mbuf *m;
182 int error;
183 int len;
184 int hlen;
185 int align;
186
187 /*
188 * Build a sockaddr based on the data link layer type.
189 * We do this at this level because the ethernet header
190 * is copied directly into the data field of the sockaddr.
191 * In the case of SLIP, there is no header and the packet
192 * is forwarded as is.
193 * Also, we are careful to leave room at the front of the mbuf
194 * for the link level header.
195 */
196 switch (linktype) {
197
198 case DLT_SLIP:
199 sockp->sa_family = AF_INET;
200 hlen = 0;
201 align = 0;
202 break;
203
204 case DLT_PPP:
205 sockp->sa_family = AF_UNSPEC;
206 hlen = 0;
207 align = 0;
208 break;
209
210 case DLT_EN10MB:
211 sockp->sa_family = AF_UNSPEC;
212 /* XXX Would MAXLINKHDR be better? */
213 /* 6(dst)+6(src)+2(type) */
214 hlen = sizeof(struct ether_header);
215 align = 2;
216 break;
217
218 case DLT_ARCNET:
219 sockp->sa_family = AF_UNSPEC;
220 hlen = ARC_HDRLEN;
221 align = 5;
222 break;
223
224 case DLT_FDDI:
225 sockp->sa_family = AF_LINK;
226 /* XXX 4(FORMAC)+6(dst)+6(src) */
227 hlen = 16;
228 align = 0;
229 break;
230
231 case DLT_ECONET:
232 sockp->sa_family = AF_UNSPEC;
233 hlen = 6;
234 align = 2;
235 break;
236
237 case DLT_NULL:
238 sockp->sa_family = AF_UNSPEC;
239 hlen = 0;
240 align = 0;
241 break;
242
243 default:
244 return (EIO);
245 }
246
247 len = uio->uio_resid;
248 /*
249 * If there aren't enough bytes for a link level header or the
250 * packet length exceeds the interface mtu, return an error.
251 */
252 if (len < hlen || len - hlen > mtu)
253 return (EMSGSIZE);
254
255 /*
256 * XXX Avoid complicated buffer chaining ---
257 * bail if it won't fit in a single mbuf.
258 * (Take into account possible alignment bytes)
259 */
260 if ((unsigned)len > MCLBYTES - align)
261 return (EIO);
262
263 m = m_gethdr(M_WAIT, MT_DATA);
264 m->m_pkthdr.rcvif = 0;
265 m->m_pkthdr.len = len - hlen;
266 if (len > MHLEN - align) {
267 m_clget(m, M_WAIT);
268 if ((m->m_flags & M_EXT) == 0) {
269 error = ENOBUFS;
270 goto bad;
271 }
272 }
273
274 /* Insure the data is properly aligned */
275 if (align > 0) {
276 m->m_data += align;
277 m->m_len -= align;
278 }
279
280 error = uiomove(mtod(m, void *), len, uio);
281 if (error)
282 goto bad;
283 if (hlen != 0) {
284 memcpy(sockp->sa_data, mtod(m, void *), hlen);
285 m->m_data += hlen; /* XXX */
286 len -= hlen;
287 }
288 m->m_len = len;
289 *mp = m;
290 return (0);
291
292 bad:
293 m_freem(m);
294 return (error);
295 }
296
297 /*
298 * Attach file to the bpf interface, i.e. make d listen on bp.
299 * Must be called at splnet.
300 */
301 static void
302 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
303 {
304 /*
305 * Point d at bp, and add d to the interface's list of listeners.
306 * Finally, point the driver's bpf cookie at the interface so
307 * it will divert packets to bpf.
308 */
309 d->bd_bif = bp;
310 d->bd_next = bp->bif_dlist;
311 bp->bif_dlist = d;
312
313 *bp->bif_driverp = bp;
314 }
315
316 /*
317 * Detach a file from its interface.
318 */
319 static void
320 bpf_detachd(struct bpf_d *d)
321 {
322 struct bpf_d **p;
323 struct bpf_if *bp;
324
325 bp = d->bd_bif;
326 /*
327 * Check if this descriptor had requested promiscuous mode.
328 * If so, turn it off.
329 */
330 if (d->bd_promisc) {
331 int error;
332
333 d->bd_promisc = 0;
334 /*
335 * Take device out of promiscuous mode. Since we were
336 * able to enter promiscuous mode, we should be able
337 * to turn it off. But we can get an error if
338 * the interface was configured down, so only panic
339 * if we don't get an unexpected error.
340 */
341 error = ifpromisc(bp->bif_ifp, 0);
342 if (error && error != EINVAL)
343 panic("bpf: ifpromisc failed");
344 }
345 /* Remove d from the interface's descriptor list. */
346 p = &bp->bif_dlist;
347 while (*p != d) {
348 p = &(*p)->bd_next;
349 if (*p == 0)
350 panic("bpf_detachd: descriptor not in list");
351 }
352 *p = (*p)->bd_next;
353 if (bp->bif_dlist == 0)
354 /*
355 * Let the driver know that there are no more listeners.
356 */
357 *d->bd_bif->bif_driverp = 0;
358 d->bd_bif = 0;
359 }
360
361
362 /*
363 * Mark a descriptor free by making it point to itself.
364 * This is probably cheaper than marking with a constant since
365 * the address should be in a register anyway.
366 */
367
368 /*
369 * bpfilterattach() is called at boot time.
370 */
371 /* ARGSUSED */
372 void
373 bpfilterattach(int n)
374 {
375 mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE);
376
377 mutex_enter(&bpf_mtx);
378 LIST_INIT(&bpf_list);
379 mutex_exit(&bpf_mtx);
380
381 bpf_gstats.bs_recv = 0;
382 bpf_gstats.bs_drop = 0;
383 bpf_gstats.bs_capt = 0;
384 }
385
386 /*
387 * Open ethernet device. Clones.
388 */
389 /* ARGSUSED */
390 int
391 bpfopen(dev_t dev, int flag, int mode, struct lwp *l)
392 {
393 struct bpf_d *d;
394 struct file *fp;
395 int error, fd;
396
397 /* falloc() will use the descriptor for us. */
398 if ((error = fd_allocfile(&fp, &fd)) != 0)
399 return error;
400
401 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO);
402 d->bd_bufsize = bpf_bufsize;
403 d->bd_seesent = 1;
404 d->bd_pid = l->l_proc->p_pid;
405 callout_init(&d->bd_callout, 0);
406 selinit(&d->bd_sel);
407 d->bd_sih = softint_establish(SOFTINT_CLOCK, bpf_softintr, d);
408
409 mutex_enter(&bpf_mtx);
410 LIST_INSERT_HEAD(&bpf_list, d, bd_list);
411 mutex_exit(&bpf_mtx);
412
413 return fd_clone(fp, fd, flag, &bpf_fileops, d);
414 }
415
416 /*
417 * Close the descriptor by detaching it from its interface,
418 * deallocating its buffers, and marking it free.
419 */
420 /* ARGSUSED */
421 static int
422 bpf_close(struct file *fp)
423 {
424 struct bpf_d *d = fp->f_data;
425 int s;
426
427 KERNEL_LOCK(1, NULL);
428
429 /*
430 * Refresh the PID associated with this bpf file.
431 */
432 d->bd_pid = curproc->p_pid;
433
434 s = splnet();
435 if (d->bd_state == BPF_WAITING)
436 callout_stop(&d->bd_callout);
437 d->bd_state = BPF_IDLE;
438 if (d->bd_bif)
439 bpf_detachd(d);
440 splx(s);
441 bpf_freed(d);
442 mutex_enter(&bpf_mtx);
443 LIST_REMOVE(d, bd_list);
444 mutex_exit(&bpf_mtx);
445 callout_destroy(&d->bd_callout);
446 seldestroy(&d->bd_sel);
447 softint_disestablish(d->bd_sih);
448 free(d, M_DEVBUF);
449 fp->f_data = NULL;
450
451 KERNEL_UNLOCK_ONE(NULL);
452
453 return (0);
454 }
455
456 /*
457 * Rotate the packet buffers in descriptor d. Move the store buffer
458 * into the hold slot, and the free buffer into the store slot.
459 * Zero the length of the new store buffer.
460 */
461 #define ROTATE_BUFFERS(d) \
462 (d)->bd_hbuf = (d)->bd_sbuf; \
463 (d)->bd_hlen = (d)->bd_slen; \
464 (d)->bd_sbuf = (d)->bd_fbuf; \
465 (d)->bd_slen = 0; \
466 (d)->bd_fbuf = 0;
467 /*
468 * bpfread - read next chunk of packets from buffers
469 */
470 static int
471 bpf_read(struct file *fp, off_t *offp, struct uio *uio,
472 kauth_cred_t cred, int flags)
473 {
474 struct bpf_d *d = fp->f_data;
475 int timed_out;
476 int error;
477 int s;
478
479 /*
480 * Restrict application to use a buffer the same size as
481 * the kernel buffers.
482 */
483 if (uio->uio_resid != d->bd_bufsize)
484 return (EINVAL);
485
486 KERNEL_LOCK(1, NULL);
487 s = splnet();
488 if (d->bd_state == BPF_WAITING)
489 callout_stop(&d->bd_callout);
490 timed_out = (d->bd_state == BPF_TIMED_OUT);
491 d->bd_state = BPF_IDLE;
492 /*
493 * If the hold buffer is empty, then do a timed sleep, which
494 * ends when the timeout expires or when enough packets
495 * have arrived to fill the store buffer.
496 */
497 while (d->bd_hbuf == 0) {
498 if (fp->f_flag & FNONBLOCK) {
499 if (d->bd_slen == 0) {
500 splx(s);
501 KERNEL_UNLOCK_ONE(NULL);
502 return (EWOULDBLOCK);
503 }
504 ROTATE_BUFFERS(d);
505 break;
506 }
507
508 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
509 /*
510 * A packet(s) either arrived since the previous
511 * read or arrived while we were asleep.
512 * Rotate the buffers and return what's here.
513 */
514 ROTATE_BUFFERS(d);
515 break;
516 }
517 error = tsleep(d, PRINET|PCATCH, "bpf",
518 d->bd_rtout);
519 if (error == EINTR || error == ERESTART) {
520 splx(s);
521 KERNEL_UNLOCK_ONE(NULL);
522 return (error);
523 }
524 if (error == EWOULDBLOCK) {
525 /*
526 * On a timeout, return what's in the buffer,
527 * which may be nothing. If there is something
528 * in the store buffer, we can rotate the buffers.
529 */
530 if (d->bd_hbuf)
531 /*
532 * We filled up the buffer in between
533 * getting the timeout and arriving
534 * here, so we don't need to rotate.
535 */
536 break;
537
538 if (d->bd_slen == 0) {
539 splx(s);
540 KERNEL_UNLOCK_ONE(NULL);
541 return (0);
542 }
543 ROTATE_BUFFERS(d);
544 break;
545 }
546 if (error != 0)
547 goto done;
548 }
549 /*
550 * At this point, we know we have something in the hold slot.
551 */
552 splx(s);
553
554 /*
555 * Move data from hold buffer into user space.
556 * We know the entire buffer is transferred since
557 * we checked above that the read buffer is bpf_bufsize bytes.
558 */
559 error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
560
561 s = splnet();
562 d->bd_fbuf = d->bd_hbuf;
563 d->bd_hbuf = 0;
564 d->bd_hlen = 0;
565 done:
566 splx(s);
567 KERNEL_UNLOCK_ONE(NULL);
568 return (error);
569 }
570
571
572 /*
573 * If there are processes sleeping on this descriptor, wake them up.
574 */
575 static inline void
576 bpf_wakeup(struct bpf_d *d)
577 {
578 wakeup(d);
579 if (d->bd_async)
580 softint_schedule(d->bd_sih);
581 selnotify(&d->bd_sel, 0, 0);
582 }
583
584 static void
585 bpf_softintr(void *cookie)
586 {
587 struct bpf_d *d;
588
589 d = cookie;
590 if (d->bd_async)
591 fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL);
592 }
593
594 static void
595 bpf_timed_out(void *arg)
596 {
597 struct bpf_d *d = arg;
598 int s;
599
600 s = splnet();
601 if (d->bd_state == BPF_WAITING) {
602 d->bd_state = BPF_TIMED_OUT;
603 if (d->bd_slen != 0)
604 bpf_wakeup(d);
605 }
606 splx(s);
607 }
608
609
610 static int
611 bpf_write(struct file *fp, off_t *offp, struct uio *uio,
612 kauth_cred_t cred, int flags)
613 {
614 struct bpf_d *d = fp->f_data;
615 struct ifnet *ifp;
616 struct mbuf *m;
617 int error, s;
618 static struct sockaddr_storage dst;
619
620 m = NULL; /* XXX gcc */
621
622 KERNEL_LOCK(1, NULL);
623
624 if (d->bd_bif == 0) {
625 KERNEL_UNLOCK_ONE(NULL);
626 return (ENXIO);
627 }
628
629 ifp = d->bd_bif->bif_ifp;
630
631 if (uio->uio_resid == 0) {
632 KERNEL_UNLOCK_ONE(NULL);
633 return (0);
634 }
635
636 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m,
637 (struct sockaddr *) &dst);
638 if (error) {
639 KERNEL_UNLOCK_ONE(NULL);
640 return (error);
641 }
642
643 if (m->m_pkthdr.len > ifp->if_mtu) {
644 KERNEL_UNLOCK_ONE(NULL);
645 m_freem(m);
646 return (EMSGSIZE);
647 }
648
649 if (d->bd_hdrcmplt)
650 dst.ss_family = pseudo_AF_HDRCMPLT;
651
652 s = splsoftnet();
653 error = (*ifp->if_output)(ifp, m, (struct sockaddr *) &dst, NULL);
654 splx(s);
655 KERNEL_UNLOCK_ONE(NULL);
656 /*
657 * The driver frees the mbuf.
658 */
659 return (error);
660 }
661
662 /*
663 * Reset a descriptor by flushing its packet buffer and clearing the
664 * receive and drop counts. Should be called at splnet.
665 */
666 static void
667 reset_d(struct bpf_d *d)
668 {
669 if (d->bd_hbuf) {
670 /* Free the hold buffer. */
671 d->bd_fbuf = d->bd_hbuf;
672 d->bd_hbuf = 0;
673 }
674 d->bd_slen = 0;
675 d->bd_hlen = 0;
676 d->bd_rcount = 0;
677 d->bd_dcount = 0;
678 d->bd_ccount = 0;
679 }
680
681 /*
682 * FIONREAD Check for read packet available.
683 * BIOCGBLEN Get buffer len [for read()].
684 * BIOCSETF Set ethernet read filter.
685 * BIOCFLUSH Flush read packet buffer.
686 * BIOCPROMISC Put interface into promiscuous mode.
687 * BIOCGDLT Get link layer type.
688 * BIOCGETIF Get interface name.
689 * BIOCSETIF Set interface.
690 * BIOCSRTIMEOUT Set read timeout.
691 * BIOCGRTIMEOUT Get read timeout.
692 * BIOCGSTATS Get packet stats.
693 * BIOCIMMEDIATE Set immediate mode.
694 * BIOCVERSION Get filter language version.
695 * BIOCGHDRCMPLT Get "header already complete" flag.
696 * BIOCSHDRCMPLT Set "header already complete" flag.
697 */
698 /* ARGSUSED */
699 static int
700 bpf_ioctl(struct file *fp, u_long cmd, void *addr)
701 {
702 struct bpf_d *d = fp->f_data;
703 int s, error = 0;
704
705 /*
706 * Refresh the PID associated with this bpf file.
707 */
708 KERNEL_LOCK(1, NULL);
709 d->bd_pid = curproc->p_pid;
710
711 s = splnet();
712 if (d->bd_state == BPF_WAITING)
713 callout_stop(&d->bd_callout);
714 d->bd_state = BPF_IDLE;
715 splx(s);
716
717 switch (cmd) {
718
719 default:
720 error = EINVAL;
721 break;
722
723 /*
724 * Check for read packet available.
725 */
726 case FIONREAD:
727 {
728 int n;
729
730 s = splnet();
731 n = d->bd_slen;
732 if (d->bd_hbuf)
733 n += d->bd_hlen;
734 splx(s);
735
736 *(int *)addr = n;
737 break;
738 }
739
740 /*
741 * Get buffer len [for read()].
742 */
743 case BIOCGBLEN:
744 *(u_int *)addr = d->bd_bufsize;
745 break;
746
747 /*
748 * Set buffer length.
749 */
750 case BIOCSBLEN:
751 if (d->bd_bif != 0)
752 error = EINVAL;
753 else {
754 u_int size = *(u_int *)addr;
755
756 if (size > bpf_maxbufsize)
757 *(u_int *)addr = size = bpf_maxbufsize;
758 else if (size < BPF_MINBUFSIZE)
759 *(u_int *)addr = size = BPF_MINBUFSIZE;
760 d->bd_bufsize = size;
761 }
762 break;
763
764 /*
765 * Set link layer read filter.
766 */
767 case BIOCSETF:
768 error = bpf_setf(d, addr);
769 break;
770
771 /*
772 * Flush read packet buffer.
773 */
774 case BIOCFLUSH:
775 s = splnet();
776 reset_d(d);
777 splx(s);
778 break;
779
780 /*
781 * Put interface into promiscuous mode.
782 */
783 case BIOCPROMISC:
784 if (d->bd_bif == 0) {
785 /*
786 * No interface attached yet.
787 */
788 error = EINVAL;
789 break;
790 }
791 s = splnet();
792 if (d->bd_promisc == 0) {
793 error = ifpromisc(d->bd_bif->bif_ifp, 1);
794 if (error == 0)
795 d->bd_promisc = 1;
796 }
797 splx(s);
798 break;
799
800 /*
801 * Get device parameters.
802 */
803 case BIOCGDLT:
804 if (d->bd_bif == 0)
805 error = EINVAL;
806 else
807 *(u_int *)addr = d->bd_bif->bif_dlt;
808 break;
809
810 /*
811 * Get a list of supported device parameters.
812 */
813 case BIOCGDLTLIST:
814 if (d->bd_bif == 0)
815 error = EINVAL;
816 else
817 error = bpf_getdltlist(d, addr);
818 break;
819
820 /*
821 * Set device parameters.
822 */
823 case BIOCSDLT:
824 if (d->bd_bif == 0)
825 error = EINVAL;
826 else
827 error = bpf_setdlt(d, *(u_int *)addr);
828 break;
829
830 /*
831 * Set interface name.
832 */
833 #ifdef OBIOCGETIF
834 case OBIOCGETIF:
835 #endif
836 case BIOCGETIF:
837 if (d->bd_bif == 0)
838 error = EINVAL;
839 else
840 bpf_ifname(d->bd_bif->bif_ifp, addr);
841 break;
842
843 /*
844 * Set interface.
845 */
846 #ifdef OBIOCSETIF
847 case OBIOCSETIF:
848 #endif
849 case BIOCSETIF:
850 error = bpf_setif(d, addr);
851 break;
852
853 /*
854 * Set read timeout.
855 */
856 case BIOCSRTIMEOUT:
857 {
858 struct timeval *tv = addr;
859
860 /* Compute number of ticks. */
861 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
862 if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
863 d->bd_rtout = 1;
864 break;
865 }
866
867 /*
868 * Get read timeout.
869 */
870 case BIOCGRTIMEOUT:
871 {
872 struct timeval *tv = addr;
873
874 tv->tv_sec = d->bd_rtout / hz;
875 tv->tv_usec = (d->bd_rtout % hz) * tick;
876 break;
877 }
878
879 /*
880 * Get packet stats.
881 */
882 case BIOCGSTATS:
883 {
884 struct bpf_stat *bs = addr;
885
886 bs->bs_recv = d->bd_rcount;
887 bs->bs_drop = d->bd_dcount;
888 bs->bs_capt = d->bd_ccount;
889 break;
890 }
891
892 case BIOCGSTATSOLD:
893 {
894 struct bpf_stat_old *bs = addr;
895
896 bs->bs_recv = d->bd_rcount;
897 bs->bs_drop = d->bd_dcount;
898 break;
899 }
900
901 /*
902 * Set immediate mode.
903 */
904 case BIOCIMMEDIATE:
905 d->bd_immediate = *(u_int *)addr;
906 break;
907
908 case BIOCVERSION:
909 {
910 struct bpf_version *bv = addr;
911
912 bv->bv_major = BPF_MAJOR_VERSION;
913 bv->bv_minor = BPF_MINOR_VERSION;
914 break;
915 }
916
917 case BIOCGHDRCMPLT: /* get "header already complete" flag */
918 *(u_int *)addr = d->bd_hdrcmplt;
919 break;
920
921 case BIOCSHDRCMPLT: /* set "header already complete" flag */
922 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
923 break;
924
925 /*
926 * Get "see sent packets" flag
927 */
928 case BIOCGSEESENT:
929 *(u_int *)addr = d->bd_seesent;
930 break;
931
932 /*
933 * Set "see sent" packets flag
934 */
935 case BIOCSSEESENT:
936 d->bd_seesent = *(u_int *)addr;
937 break;
938
939 case FIONBIO: /* Non-blocking I/O */
940 /*
941 * No need to do anything special as we use IO_NDELAY in
942 * bpfread() as an indication of whether or not to block
943 * the read.
944 */
945 break;
946
947 case FIOASYNC: /* Send signal on receive packets */
948 d->bd_async = *(int *)addr;
949 break;
950
951 case TIOCSPGRP: /* Process or group to send signals to */
952 case FIOSETOWN:
953 error = fsetown(&d->bd_pgid, cmd, addr);
954 break;
955
956 case TIOCGPGRP:
957 case FIOGETOWN:
958 error = fgetown(d->bd_pgid, cmd, addr);
959 break;
960 }
961 KERNEL_UNLOCK_ONE(NULL);
962 return (error);
963 }
964
965 /*
966 * Set d's packet filter program to fp. If this file already has a filter,
967 * free it and replace it. Returns EINVAL for bogus requests.
968 */
969 int
970 bpf_setf(struct bpf_d *d, struct bpf_program *fp)
971 {
972 struct bpf_insn *fcode, *old;
973 u_int flen, size;
974 int s;
975
976 old = d->bd_filter;
977 if (fp->bf_insns == 0) {
978 if (fp->bf_len != 0)
979 return (EINVAL);
980 s = splnet();
981 d->bd_filter = 0;
982 reset_d(d);
983 splx(s);
984 if (old != 0)
985 free(old, M_DEVBUF);
986 return (0);
987 }
988 flen = fp->bf_len;
989 if (flen > BPF_MAXINSNS)
990 return (EINVAL);
991
992 size = flen * sizeof(*fp->bf_insns);
993 fcode = malloc(size, M_DEVBUF, M_WAITOK);
994 if (copyin(fp->bf_insns, fcode, size) == 0 &&
995 bpf_validate(fcode, (int)flen)) {
996 s = splnet();
997 d->bd_filter = fcode;
998 reset_d(d);
999 splx(s);
1000 if (old != 0)
1001 free(old, M_DEVBUF);
1002
1003 return (0);
1004 }
1005 free(fcode, M_DEVBUF);
1006 return (EINVAL);
1007 }
1008
1009 /*
1010 * Detach a file from its current interface (if attached at all) and attach
1011 * to the interface indicated by the name stored in ifr.
1012 * Return an errno or 0.
1013 */
1014 static int
1015 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1016 {
1017 struct bpf_if *bp;
1018 char *cp;
1019 int unit_seen, i, s, error;
1020
1021 /*
1022 * Make sure the provided name has a unit number, and default
1023 * it to '' if not specified.
1024 * XXX This is ugly ... do this differently?
1025 */
1026 unit_seen = 0;
1027 cp = ifr->ifr_name;
1028 cp[sizeof(ifr->ifr_name) - 1] = '\0'; /* sanity */
1029 while (*cp++)
1030 if (*cp >= '' && *cp <= '9')
1031 unit_seen = 1;
1032 if (!unit_seen) {
1033 /* Make sure to leave room for the '\0'. */
1034 for (i = 0; i < (IFNAMSIZ - 1); ++i) {
1035 if ((ifr->ifr_name[i] >= 'a' &&
1036 ifr->ifr_name[i] <= 'z') ||
1037 (ifr->ifr_name[i] >= 'A' &&
1038 ifr->ifr_name[i] <= 'Z'))
1039 continue;
1040 ifr->ifr_name[i] = '';
1041 }
1042 }
1043
1044 /*
1045 * Look through attached interfaces for the named one.
1046 */
1047 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1048 struct ifnet *ifp = bp->bif_ifp;
1049
1050 if (ifp == 0 ||
1051 strcmp(ifp->if_xname, ifr->ifr_name) != 0)
1052 continue;
1053 /* skip additional entry */
1054 if ((void **)bp->bif_driverp != &ifp->if_bpf)
1055 continue;
1056 /*
1057 * We found the requested interface.
1058 * Allocate the packet buffers if we need to.
1059 * If we're already attached to requested interface,
1060 * just flush the buffer.
1061 */
1062 if (d->bd_sbuf == 0) {
1063 error = bpf_allocbufs(d);
1064 if (error != 0)
1065 return (error);
1066 }
1067 s = splnet();
1068 if (bp != d->bd_bif) {
1069 if (d->bd_bif)
1070 /*
1071 * Detach if attached to something else.
1072 */
1073 bpf_detachd(d);
1074
1075 bpf_attachd(d, bp);
1076 }
1077 reset_d(d);
1078 splx(s);
1079 return (0);
1080 }
1081 /* Not found. */
1082 return (ENXIO);
1083 }
1084
1085 /*
1086 * Copy the interface name to the ifreq.
1087 */
1088 static void
1089 bpf_ifname(struct ifnet *ifp, struct ifreq *ifr)
1090 {
1091 memcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
1092 }
1093
1094 /*
1095 * Support for poll() system call
1096 *
1097 * Return true iff the specific operation will not block indefinitely - with
1098 * the assumption that it is safe to positively acknowledge a request for the
1099 * ability to write to the BPF device.
1100 * Otherwise, return false but make a note that a selnotify() must be done.
1101 */
1102 static int
1103 bpf_poll(struct file *fp, int events)
1104 {
1105 struct bpf_d *d = fp->f_data;
1106 int s = splnet();
1107 int revents;
1108
1109 /*
1110 * Refresh the PID associated with this bpf file.
1111 */
1112 KERNEL_LOCK(1, NULL);
1113 d->bd_pid = curproc->p_pid;
1114
1115 revents = events & (POLLOUT | POLLWRNORM);
1116 if (events & (POLLIN | POLLRDNORM)) {
1117 /*
1118 * An imitation of the FIONREAD ioctl code.
1119 */
1120 if (d->bd_hlen != 0 ||
1121 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1122 d->bd_slen != 0)) {
1123 revents |= events & (POLLIN | POLLRDNORM);
1124 } else {
1125 selrecord(curlwp, &d->bd_sel);
1126 /* Start the read timeout if necessary */
1127 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1128 callout_reset(&d->bd_callout, d->bd_rtout,
1129 bpf_timed_out, d);
1130 d->bd_state = BPF_WAITING;
1131 }
1132 }
1133 }
1134
1135 KERNEL_UNLOCK_ONE(NULL);
1136 splx(s);
1137 return (revents);
1138 }
1139
1140 static void
1141 filt_bpfrdetach(struct knote *kn)
1142 {
1143 struct bpf_d *d = kn->kn_hook;
1144 int s;
1145
1146 KERNEL_LOCK(1, NULL);
1147 s = splnet();
1148 SLIST_REMOVE(&d->bd_sel.sel_klist, kn, knote, kn_selnext);
1149 splx(s);
1150 KERNEL_UNLOCK_ONE(NULL);
1151 }
1152
1153 static int
1154 filt_bpfread(struct knote *kn, long hint)
1155 {
1156 struct bpf_d *d = kn->kn_hook;
1157 int rv;
1158
1159 KERNEL_LOCK(1, NULL);
1160 kn->kn_data = d->bd_hlen;
1161 if (d->bd_immediate)
1162 kn->kn_data += d->bd_slen;
1163 rv = (kn->kn_data > 0);
1164 KERNEL_UNLOCK_ONE(NULL);
1165 return rv;
1166 }
1167
1168 static const struct filterops bpfread_filtops =
1169 { 1, NULL, filt_bpfrdetach, filt_bpfread };
1170
1171 static int
1172 bpf_kqfilter(struct file *fp, struct knote *kn)
1173 {
1174 struct bpf_d *d = fp->f_data;
1175 struct klist *klist;
1176 int s;
1177
1178 KERNEL_LOCK(1, NULL);
1179
1180 switch (kn->kn_filter) {
1181 case EVFILT_READ:
1182 klist = &d->bd_sel.sel_klist;
1183 kn->kn_fop = &bpfread_filtops;
1184 break;
1185
1186 default:
1187 KERNEL_UNLOCK_ONE(NULL);
1188 return (EINVAL);
1189 }
1190
1191 kn->kn_hook = d;
1192
1193 s = splnet();
1194 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1195 splx(s);
1196 KERNEL_UNLOCK_ONE(NULL);
1197
1198 return (0);
1199 }
1200
1201 /*
1202 * Incoming linkage from device drivers. Process the packet pkt, of length
1203 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1204 * by each process' filter, and if accepted, stashed into the corresponding
1205 * buffer.
1206 */
1207 void
1208 bpf_tap(void *arg, u_char *pkt, u_int pktlen)
1209 {
1210 struct bpf_if *bp;
1211 struct bpf_d *d;
1212 u_int slen;
1213 struct timeval tv;
1214 int gottime=0;
1215
1216 /*
1217 * Note that the ipl does not have to be raised at this point.
1218 * The only problem that could arise here is that if two different
1219 * interfaces shared any data. This is not the case.
1220 */
1221 bp = arg;
1222 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1223 ++d->bd_rcount;
1224 ++bpf_gstats.bs_recv;
1225 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1226 if (slen != 0) {
1227 if (!gottime) {
1228 microtime(&tv);
1229 gottime = 1;
1230 }
1231 catchpacket(d, pkt, pktlen, slen, (void *)memcpy, &tv);
1232 }
1233 }
1234 }
1235
1236 /*
1237 * Copy data from an mbuf chain into a buffer. This code is derived
1238 * from m_copydata in sys/uipc_mbuf.c.
1239 */
1240 static void *
1241 bpf_mcpy(void *dst_arg, const void *src_arg, size_t len)
1242 {
1243 const struct mbuf *m;
1244 u_int count;
1245 u_char *dst;
1246
1247 m = src_arg;
1248 dst = dst_arg;
1249 while (len > 0) {
1250 if (m == NULL)
1251 panic("bpf_mcpy");
1252 count = min(m->m_len, len);
1253 memcpy(dst, mtod(m, const void *), count);
1254 m = m->m_next;
1255 dst += count;
1256 len -= count;
1257 }
1258 return dst_arg;
1259 }
1260
1261 /*
1262 * Dispatch a packet to all the listeners on interface bp.
1263 *
1264 * marg pointer to the packet, either a data buffer or an mbuf chain
1265 * buflen buffer length, if marg is a data buffer
1266 * cpfn a function that can copy marg into the listener's buffer
1267 * pktlen length of the packet
1268 * rcvif either NULL or the interface the packet came in on.
1269 */
1270 static inline void
1271 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t),
1272 void *marg, u_int pktlen, u_int buflen, struct ifnet *rcvif)
1273 {
1274 u_int slen;
1275 struct bpf_d *d;
1276 struct timeval tv;
1277 int gottime = 0;
1278
1279 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1280 if (!d->bd_seesent && (rcvif == NULL))
1281 continue;
1282 ++d->bd_rcount;
1283 ++bpf_gstats.bs_recv;
1284 slen = bpf_filter(d->bd_filter, marg, pktlen, buflen);
1285 if (slen != 0) {
1286 if(!gottime) {
1287 microtime(&tv);
1288 gottime = 1;
1289 }
1290 catchpacket(d, marg, pktlen, slen, cpfn, &tv);
1291 }
1292 }
1293 }
1294
1295 /*
1296 * Incoming linkage from device drivers, when the head of the packet is in
1297 * a buffer, and the tail is in an mbuf chain.
1298 */
1299 void
1300 bpf_mtap2(void *arg, void *data, u_int dlen, struct mbuf *m)
1301 {
1302 struct bpf_if *bp = arg;
1303 u_int pktlen;
1304 struct mbuf mb;
1305
1306 pktlen = m_length(m) + dlen;
1307
1308 /*
1309 * Craft on-stack mbuf suitable for passing to bpf_filter.
1310 * Note that we cut corners here; we only setup what's
1311 * absolutely needed--this mbuf should never go anywhere else.
1312 */
1313 (void)memset(&mb, 0, sizeof(mb));
1314 mb.m_next = m;
1315 mb.m_data = data;
1316 mb.m_len = dlen;
1317
1318 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif);
1319 }
1320
1321 /*
1322 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1323 */
1324 void
1325 bpf_mtap(void *arg, struct mbuf *m)
1326 {
1327 void *(*cpfn)(void *, const void *, size_t);
1328 struct bpf_if *bp = arg;
1329 u_int pktlen, buflen;
1330 void *marg;
1331
1332 pktlen = m_length(m);
1333
1334 if (pktlen == m->m_len) {
1335 cpfn = (void *)memcpy;
1336 marg = mtod(m, void *);
1337 buflen = pktlen;
1338 } else {
1339 /*###1299 [cc] warning: assignment from incompatible pointer type%%%*/
1340 cpfn = bpf_mcpy;
1341 marg = m;
1342 buflen = 0;
1343 }
1344
1345 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif);
1346 }
1347
1348 /*
1349 * We need to prepend the address family as
1350 * a four byte field. Cons up a dummy header
1351 * to pacify bpf. This is safe because bpf
1352 * will only read from the mbuf (i.e., it won't
1353 * try to free it or keep a pointer a to it).
1354 */
1355 void
1356 bpf_mtap_af(void *arg, uint32_t af, struct mbuf *m)
1357 {
1358 struct mbuf m0;
1359
1360 m0.m_flags = 0;
1361 m0.m_next = m;
1362 m0.m_len = 4;
1363 m0.m_data = (char *)⁡
1364
1365 bpf_mtap(arg, &m0);
1366 }
1367
1368 void
1369 bpf_mtap_et(void *arg, uint16_t et, struct mbuf *m)
1370 {
1371 struct mbuf m0;
1372
1373 m0.m_flags = 0;
1374 m0.m_next = m;
1375 m0.m_len = 14;
1376 m0.m_data = m0.m_dat;
1377
1378 ((uint32_t *)m0.m_data)[0] = 0;
1379 ((uint32_t *)m0.m_data)[1] = 0;
1380 ((uint32_t *)m0.m_data)[2] = 0;
1381 ((uint16_t *)m0.m_data)[6] = et;
1382
1383 bpf_mtap(arg, &m0);
1384 }
1385
1386 #if NSL > 0 || NSTRIP > 0
1387 /*
1388 * Put the SLIP pseudo-"link header" in place.
1389 * Note this M_PREPEND() should never fail,
1390 * swince we know we always have enough space
1391 * in the input buffer.
1392 */
1393 void
1394 bpf_mtap_sl_in(void *arg, u_char *chdr, struct mbuf **m)
1395 {
1396 int s;
1397 u_char *hp;
1398
1399 M_PREPEND(*m, SLIP_HDRLEN, M_DONTWAIT);
1400 if (*m == NULL)
1401 return;
1402
1403 hp = mtod(*m, u_char *);
1404 hp[SLX_DIR] = SLIPDIR_IN;
1405 (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
1406
1407 s = splnet();
1408 bpf_mtap(arg, *m);
1409 splx(s);
1410
1411 m_adj(*m, SLIP_HDRLEN);
1412 }
1413
1414 /*
1415 * Put the SLIP pseudo-"link header" in
1416 * place. The compressed header is now
1417 * at the beginning of the mbuf.
1418 */
1419 void
1420 bpf_mtap_sl_out(void *arg, u_char *chdr, struct mbuf *m)
1421 {
1422 struct mbuf m0;
1423 u_char *hp;
1424 int s;
1425
1426 m0.m_flags = 0;
1427 m0.m_next = m;
1428 m0.m_data = m0.m_dat;
1429 m0.m_len = SLIP_HDRLEN;
1430
1431 hp = mtod(&m0, u_char *);
1432
1433 hp[SLX_DIR] = SLIPDIR_OUT;
1434 (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
1435
1436 s = splnet();
1437 bpf_mtap(arg, &m0);
1438 splx(s);
1439 m_freem(m);
1440 }
1441 #endif
1442
1443 /*
1444 * Move the packet data from interface memory (pkt) into the
1445 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1446 * otherwise 0. "copy" is the routine called to do the actual data
1447 * transfer. memcpy is passed in to copy contiguous chunks, while
1448 * bpf_mcpy is passed in to copy mbuf chains. In the latter case,
1449 * pkt is really an mbuf.
1450 */
1451 static void
1452 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1453 void *(*cpfn)(void *, const void *, size_t), struct timeval *tv)
1454 {
1455 struct bpf_hdr *hp;
1456 int totlen, curlen;
1457 int hdrlen = d->bd_bif->bif_hdrlen;
1458 int do_wakeup = 0;
1459
1460 ++d->bd_ccount;
1461 ++bpf_gstats.bs_capt;
1462 /*
1463 * Figure out how many bytes to move. If the packet is
1464 * greater or equal to the snapshot length, transfer that
1465 * much. Otherwise, transfer the whole packet (unless
1466 * we hit the buffer size limit).
1467 */
1468 totlen = hdrlen + min(snaplen, pktlen);
1469 if (totlen > d->bd_bufsize)
1470 totlen = d->bd_bufsize;
1471
1472 /*
1473 * Round up the end of the previous packet to the next longword.
1474 */
1475 curlen = BPF_WORDALIGN(d->bd_slen);
1476 if (curlen + totlen > d->bd_bufsize) {
1477 /*
1478 * This packet will overflow the storage buffer.
1479 * Rotate the buffers if we can, then wakeup any
1480 * pending reads.
1481 */
1482 if (d->bd_fbuf == 0) {
1483 /*
1484 * We haven't completed the previous read yet,
1485 * so drop the packet.
1486 */
1487 ++d->bd_dcount;
1488 ++bpf_gstats.bs_drop;
1489 return;
1490 }
1491 ROTATE_BUFFERS(d);
1492 do_wakeup = 1;
1493 curlen = 0;
1494 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
1495 /*
1496 * Immediate mode is set, or the read timeout has
1497 * already expired during a select call. A packet
1498 * arrived, so the reader should be woken up.
1499 */
1500 do_wakeup = 1;
1501 }
1502
1503 /*
1504 * Append the bpf header.
1505 */
1506 hp = (struct bpf_hdr *)((char *)d->bd_sbuf + curlen);
1507 hp->bh_tstamp = *tv;
1508 hp->bh_datalen = pktlen;
1509 hp->bh_hdrlen = hdrlen;
1510 /*
1511 * Copy the packet data into the store buffer and update its length.
1512 */
1513 (*cpfn)((u_char *)hp + hdrlen, pkt, (hp->bh_caplen = totlen - hdrlen));
1514 d->bd_slen = curlen + totlen;
1515
1516 /*
1517 * Call bpf_wakeup after bd_slen has been updated so that kevent(2)
1518 * will cause filt_bpfread() to be called with it adjusted.
1519 */
1520 if (do_wakeup)
1521 bpf_wakeup(d);
1522 }
1523
1524 /*
1525 * Initialize all nonzero fields of a descriptor.
1526 */
1527 static int
1528 bpf_allocbufs(struct bpf_d *d)
1529 {
1530
1531 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL);
1532 if (!d->bd_fbuf)
1533 return (ENOBUFS);
1534 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL);
1535 if (!d->bd_sbuf) {
1536 free(d->bd_fbuf, M_DEVBUF);
1537 return (ENOBUFS);
1538 }
1539 d->bd_slen = 0;
1540 d->bd_hlen = 0;
1541 return (0);
1542 }
1543
1544 /*
1545 * Free buffers currently in use by a descriptor.
1546 * Called on close.
1547 */
1548 static void
1549 bpf_freed(struct bpf_d *d)
1550 {
1551 /*
1552 * We don't need to lock out interrupts since this descriptor has
1553 * been detached from its interface and it yet hasn't been marked
1554 * free.
1555 */
1556 if (d->bd_sbuf != 0) {
1557 free(d->bd_sbuf, M_DEVBUF);
1558 if (d->bd_hbuf != 0)
1559 free(d->bd_hbuf, M_DEVBUF);
1560 if (d->bd_fbuf != 0)
1561 free(d->bd_fbuf, M_DEVBUF);
1562 }
1563 if (d->bd_filter)
1564 free(d->bd_filter, M_DEVBUF);
1565 }
1566
1567 /*
1568 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
1569 * fixed size of the link header (variable length headers not yet supported).
1570 */
1571 void
1572 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1573 {
1574
1575 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1576 }
1577
1578 /*
1579 * Attach additional dlt for a interface to bpf. dlt is the link layer type;
1580 * hdrlen is the fixed size of the link header for the specified dlt
1581 * (variable length headers not yet supported).
1582 */
1583 void
1584 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, void *driverp)
1585 {
1586 struct bpf_if *bp;
1587 bp = malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1588 if (bp == 0)
1589 panic("bpfattach");
1590
1591 bp->bif_dlist = 0;
1592 bp->bif_driverp = driverp;
1593 bp->bif_ifp = ifp;
1594 bp->bif_dlt = dlt;
1595
1596 bp->bif_next = bpf_iflist;
1597 bpf_iflist = bp;
1598
1599 *bp->bif_driverp = 0;
1600
1601 /*
1602 * Compute the length of the bpf header. This is not necessarily
1603 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1604 * that the network layer header begins on a longword boundary (for
1605 * performance reasons and to alleviate alignment restrictions).
1606 */
1607 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1608
1609 #if 0
1610 printf("bpf: %s attached\n", ifp->if_xname);
1611 #endif
1612 }
1613
1614 /*
1615 * Remove an interface from bpf.
1616 */
1617 void
1618 bpfdetach(struct ifnet *ifp)
1619 {
1620 struct bpf_if *bp, **pbp;
1621 struct bpf_d *d;
1622 int s;
1623
1624 /* Nuke the vnodes for any open instances */
1625 LIST_FOREACH(d, &bpf_list, bd_list) {
1626 if (d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) {
1627 /*
1628 * Detach the descriptor from an interface now.
1629 * It will be free'ed later by close routine.
1630 */
1631 s = splnet();
1632 d->bd_promisc = 0; /* we can't touch device. */
1633 bpf_detachd(d);
1634 splx(s);
1635 }
1636 }
1637
1638 again:
1639 for (bp = bpf_iflist, pbp = &bpf_iflist;
1640 bp != NULL; pbp = &bp->bif_next, bp = bp->bif_next) {
1641 if (bp->bif_ifp == ifp) {
1642 *pbp = bp->bif_next;
1643 free(bp, M_DEVBUF);
1644 goto again;
1645 }
1646 }
1647 }
1648
1649 /*
1650 * Change the data link type of a interface.
1651 */
1652 void
1653 bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1654 {
1655 struct bpf_if *bp;
1656
1657 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1658 if ((void **)bp->bif_driverp == &ifp->if_bpf)
1659 break;
1660 }
1661 if (bp == NULL)
1662 panic("bpf_change_type");
1663
1664 bp->bif_dlt = dlt;
1665
1666 /*
1667 * Compute the length of the bpf header. This is not necessarily
1668 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1669 * that the network layer header begins on a longword boundary (for
1670 * performance reasons and to alleviate alignment restrictions).
1671 */
1672 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1673 }
1674
1675 /*
1676 * Get a list of available data link type of the interface.
1677 */
1678 static int
1679 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1680 {
1681 int n, error;
1682 struct ifnet *ifp;
1683 struct bpf_if *bp;
1684
1685 ifp = d->bd_bif->bif_ifp;
1686 n = 0;
1687 error = 0;
1688 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1689 if (bp->bif_ifp != ifp)
1690 continue;
1691 if (bfl->bfl_list != NULL) {
1692 if (n >= bfl->bfl_len)
1693 return ENOMEM;
1694 error = copyout(&bp->bif_dlt,
1695 bfl->bfl_list + n, sizeof(u_int));
1696 }
1697 n++;
1698 }
1699 bfl->bfl_len = n;
1700 return error;
1701 }
1702
1703 /*
1704 * Set the data link type of a BPF instance.
1705 */
1706 static int
1707 bpf_setdlt(struct bpf_d *d, u_int dlt)
1708 {
1709 int s, error, opromisc;
1710 struct ifnet *ifp;
1711 struct bpf_if *bp;
1712
1713 if (d->bd_bif->bif_dlt == dlt)
1714 return 0;
1715 ifp = d->bd_bif->bif_ifp;
1716 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1717 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1718 break;
1719 }
1720 if (bp == NULL)
1721 return EINVAL;
1722 s = splnet();
1723 opromisc = d->bd_promisc;
1724 bpf_detachd(d);
1725 bpf_attachd(d, bp);
1726 reset_d(d);
1727 if (opromisc) {
1728 error = ifpromisc(bp->bif_ifp, 1);
1729 if (error)
1730 printf("%s: bpf_setdlt: ifpromisc failed (%d)\n",
1731 bp->bif_ifp->if_xname, error);
1732 else
1733 d->bd_promisc = 1;
1734 }
1735 splx(s);
1736 return 0;
1737 }
1738
1739 static int
1740 sysctl_net_bpf_maxbufsize(SYSCTLFN_ARGS)
1741 {
1742 int newsize, error;
1743 struct sysctlnode node;
1744
1745 node = *rnode;
1746 node.sysctl_data = &newsize;
1747 newsize = bpf_maxbufsize;
1748 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1749 if (error || newp == NULL)
1750 return (error);
1751
1752 if (newsize < BPF_MINBUFSIZE || newsize > BPF_MAXBUFSIZE)
1753 return (EINVAL);
1754
1755 bpf_maxbufsize = newsize;
1756
1757 return (0);
1758 }
1759
1760 static int
1761 sysctl_net_bpf_peers(SYSCTLFN_ARGS)
1762 {
1763 int error, elem_count;
1764 struct bpf_d *dp;
1765 struct bpf_d_ext dpe;
1766 size_t len, needed, elem_size, out_size;
1767 char *sp;
1768
1769 if (namelen == 1 && name[0] == CTL_QUERY)
1770 return (sysctl_query(SYSCTLFN_CALL(rnode)));
1771
1772 if (namelen != 2)
1773 return (EINVAL);
1774
1775 /* BPF peers is privileged information. */
1776 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE,
1777 KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL);
1778 if (error)
1779 return (EPERM);
1780
1781 len = (oldp != NULL) ? *oldlenp : 0;
1782 sp = oldp;
1783 elem_size = name[0];
1784 elem_count = name[1];
1785 out_size = MIN(sizeof(dpe), elem_size);
1786 needed = 0;
1787
1788 if (elem_size < 1 || elem_count < 0)
1789 return (EINVAL);
1790
1791 mutex_enter(&bpf_mtx);
1792 LIST_FOREACH(dp, &bpf_list, bd_list) {
1793 if (len >= elem_size && elem_count > 0) {
1794 #define BPF_EXT(field) dpe.bde_ ## field = dp->bd_ ## field
1795 BPF_EXT(bufsize);
1796 BPF_EXT(promisc);
1797 BPF_EXT(promisc);
1798 BPF_EXT(state);
1799 BPF_EXT(immediate);
1800 BPF_EXT(hdrcmplt);
1801 BPF_EXT(seesent);
1802 BPF_EXT(pid);
1803 BPF_EXT(rcount);
1804 BPF_EXT(dcount);
1805 BPF_EXT(ccount);
1806 #undef BPF_EXT
1807 if (dp->bd_bif)
1808 (void)strlcpy(dpe.bde_ifname,
1809 dp->bd_bif->bif_ifp->if_xname,
1810 IFNAMSIZ - 1);
1811 else
1812 dpe.bde_ifname[0] = '\0';
1813
1814 error = copyout(&dpe, sp, out_size);
1815 if (error)
1816 break;
1817 sp += elem_size;
1818 len -= elem_size;
1819 }
1820 if (elem_count > 0) {
1821 needed += elem_size;
1822 if (elem_count != INT_MAX)
1823 elem_count--;
1824 }
1825 }
1826 mutex_exit(&bpf_mtx);
1827
1828 *oldlenp = needed;
1829
1830 return (error);
1831 }
1832
1833 SYSCTL_SETUP(sysctl_net_bpf_setup, "sysctl net.bpf subtree setup")
1834 {
1835 const struct sysctlnode *node;
1836
1837 sysctl_createv(clog, 0, NULL, NULL,
1838 CTLFLAG_PERMANENT,
1839 CTLTYPE_NODE, "net", NULL,
1840 NULL, 0, NULL, 0,
1841 CTL_NET, CTL_EOL);
1842
1843 node = NULL;
1844 sysctl_createv(clog, 0, NULL, &node,
1845 CTLFLAG_PERMANENT,
1846 CTLTYPE_NODE, "bpf",
1847 SYSCTL_DESCR("BPF options"),
1848 NULL, 0, NULL, 0,
1849 CTL_NET, CTL_CREATE, CTL_EOL);
1850 if (node != NULL) {
1851 sysctl_createv(clog, 0, NULL, NULL,
1852 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1853 CTLTYPE_INT, "maxbufsize",
1854 SYSCTL_DESCR("Maximum size for data capture buffer"),
1855 sysctl_net_bpf_maxbufsize, 0, &bpf_maxbufsize, 0,
1856 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
1857 sysctl_createv(clog, 0, NULL, NULL,
1858 CTLFLAG_PERMANENT,
1859 CTLTYPE_STRUCT, "stats",
1860 SYSCTL_DESCR("BPF stats"),
1861 NULL, 0, &bpf_gstats, sizeof(bpf_gstats),
1862 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
1863 sysctl_createv(clog, 0, NULL, NULL,
1864 CTLFLAG_PERMANENT,
1865 CTLTYPE_STRUCT, "peers",
1866 SYSCTL_DESCR("BPF peers"),
1867 sysctl_net_bpf_peers, 0, NULL, 0,
1868 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
1869 }
1870
1871 }
Cache object: 56b4e788dd9f00b343c67cbf0221cb9c
|