FreeBSD/Linux Kernel Cross Reference
sys/net/if_tun.c
1 /* $NetBSD: if_tun.c,v 1.68.2.2 2004/05/20 12:28:17 grant Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 #include <sys/cdefs.h>
18 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.68.2.2 2004/05/20 12:28:17 grant Exp $");
19
20 #include "tun.h"
21
22 #include "opt_inet.h"
23 #include "opt_ns.h"
24
25 #include <sys/param.h>
26 #include <sys/proc.h>
27 #include <sys/systm.h>
28 #include <sys/mbuf.h>
29 #include <sys/buf.h>
30 #include <sys/protosw.h>
31 #include <sys/socket.h>
32 #include <sys/ioctl.h>
33 #include <sys/errno.h>
34 #include <sys/syslog.h>
35 #include <sys/select.h>
36 #include <sys/poll.h>
37 #include <sys/file.h>
38 #include <sys/signalvar.h>
39 #include <sys/conf.h>
40
41 #include <machine/cpu.h>
42
43 #include <net/if.h>
44 #include <net/if_types.h>
45 #include <net/netisr.h>
46 #include <net/route.h>
47
48
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip.h>
54 #include <netinet/if_inarp.h>
55 #endif
56
57 #ifdef NS
58 #include <netns/ns.h>
59 #include <netns/ns_if.h>
60 #endif
61
62 #include "bpfilter.h"
63 #if NBPFILTER > 0
64 #include <sys/time.h>
65 #include <net/bpf.h>
66 #endif
67
68 #include <net/if_tun.h>
69
70 #define TUNDEBUG if (tundebug) printf
71 int tundebug = 0;
72
73 extern int ifqmaxlen;
74 void tunattach __P((int));
75 LIST_HEAD(, tun_softc) tun_softc_list;
76 LIST_HEAD(, tun_softc) tunz_softc_list;
77 static struct simplelock tun_softc_lock;
78
79 int tun_ioctl __P((struct ifnet *, u_long, caddr_t));
80 int tun_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
81 struct rtentry *rt));
82 int tun_clone_create __P((struct if_clone *, int));
83 void tun_clone_destroy __P((struct ifnet *));
84
85 struct if_clone tun_cloner =
86 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
87
88 static void tunattach0 __P((struct tun_softc *));
89 static void tuninit __P((struct tun_softc *));
90 #ifdef ALTQ
91 static void tunstart __P((struct ifnet *));
92 #endif
93 static struct tun_softc *tun_find_unit __P((dev_t));
94 static struct tun_softc *tun_find_zunit __P((int));
95
96 dev_type_open(tunopen);
97 dev_type_close(tunclose);
98 dev_type_read(tunread);
99 dev_type_write(tunwrite);
100 dev_type_ioctl(tunioctl);
101 dev_type_poll(tunpoll);
102 dev_type_kqfilter(tunkqfilter);
103
104 const struct cdevsw tun_cdevsw = {
105 tunopen, tunclose, tunread, tunwrite, tunioctl,
106 nostop, notty, tunpoll, nommap, tunkqfilter,
107 };
108
109 void
110 tunattach(unused)
111 int unused;
112 {
113
114 simple_lock_init(&tun_softc_lock);
115 LIST_INIT(&tun_softc_list);
116 LIST_INIT(&tunz_softc_list);
117 if_clone_attach(&tun_cloner);
118 }
119
120 /*
121 * Find driver instance from dev_t.
122 * Call at splnet().
123 * Returns with tp locked (if found).
124 */
125 static struct tun_softc *
126 tun_find_unit(dev)
127 dev_t dev;
128 {
129 struct tun_softc *tp;
130 int unit = minor(dev);
131
132 simple_lock(&tun_softc_lock);
133 LIST_FOREACH(tp, &tun_softc_list, tun_list)
134 if (unit == tp->tun_unit)
135 break;
136 if (tp)
137 simple_lock(&tp->tun_lock);
138 simple_unlock(&tun_softc_lock);
139
140 return (tp);
141 }
142
143 /*
144 * Find zombie driver instance by unit number.
145 * Call at splnet().
146 * Remove tp from list and return it unlocked (if found).
147 */
148 static struct tun_softc *
149 tun_find_zunit(unit)
150 int unit;
151 {
152 struct tun_softc *tp;
153
154 simple_lock(&tun_softc_lock);
155 LIST_FOREACH(tp, &tunz_softc_list, tun_list)
156 if (unit == tp->tun_unit)
157 break;
158 if (tp)
159 LIST_REMOVE(tp, tun_list);
160 simple_unlock(&tun_softc_lock);
161 #ifdef DIAGNOSTIC
162 if (tp != NULL && (tp->tun_flags & (TUN_INITED|TUN_OPEN)) != TUN_OPEN)
163 printf("tun%d: inconsistent flags: %x\n", unit, tp->tun_flags);
164 #endif
165
166 return (tp);
167 }
168
169 int
170 tun_clone_create(ifc, unit)
171 struct if_clone *ifc;
172 int unit;
173 {
174 struct tun_softc *tp;
175
176 if ((tp = tun_find_zunit(unit)) == NULL) {
177 /* Allocate a new instance */
178 tp = malloc(sizeof(struct tun_softc), M_DEVBUF, M_WAITOK);
179 (void)memset(tp, 0, sizeof(struct tun_softc));
180
181 tp->tun_unit = unit;
182 simple_lock_init(&tp->tun_lock);
183 } else {
184 /* Revive tunnel instance; clear ifp part */
185 (void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
186 }
187
188 (void)snprintf(tp->tun_if.if_xname, sizeof(tp->tun_if.if_xname),
189 "%s%d", ifc->ifc_name, unit);
190 tunattach0(tp);
191 tp->tun_flags |= TUN_INITED;
192
193 simple_lock(&tun_softc_lock);
194 LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
195 simple_unlock(&tun_softc_lock);
196
197 return (0);
198 }
199
200 void
201 tunattach0(tp)
202 struct tun_softc *tp;
203 {
204 struct ifnet *ifp;
205
206 ifp = &tp->tun_if;
207 ifp->if_softc = tp;
208 ifp->if_mtu = TUNMTU;
209 ifp->if_ioctl = tun_ioctl;
210 ifp->if_output = tun_output;
211 #ifdef ALTQ
212 ifp->if_start = tunstart;
213 #endif
214 ifp->if_flags = IFF_POINTOPOINT;
215 ifp->if_type = IFT_TUNNEL;
216 ifp->if_snd.ifq_maxlen = ifqmaxlen;
217 ifp->if_collisions = 0;
218 ifp->if_ierrors = 0;
219 ifp->if_oerrors = 0;
220 ifp->if_ipackets = 0;
221 ifp->if_opackets = 0;
222 ifp->if_ibytes = 0;
223 ifp->if_obytes = 0;
224 ifp->if_dlt = DLT_NULL;
225 IFQ_SET_READY(&ifp->if_snd);
226 if_attach(ifp);
227 if_alloc_sadl(ifp);
228 #if NBPFILTER > 0
229 bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
230 #endif
231 }
232
233 void
234 tun_clone_destroy(ifp)
235 struct ifnet *ifp;
236 {
237 struct tun_softc *tp = (void *)ifp;
238 int s, zombie = 0;
239
240 s = splnet();
241 simple_lock(&tun_softc_lock);
242 simple_lock(&tp->tun_lock);
243 LIST_REMOVE(tp, tun_list);
244 if (tp->tun_flags & TUN_OPEN) {
245 /* Hang on to storage until last close */
246 zombie = 1;
247 tp->tun_flags &= ~TUN_INITED;
248 LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
249 }
250 simple_unlock(&tun_softc_lock);
251
252 IF_PURGE(&ifp->if_snd);
253 ifp->if_flags &= ~IFF_RUNNING;
254
255 if (tp->tun_flags & TUN_RWAIT) {
256 tp->tun_flags &= ~TUN_RWAIT;
257 wakeup((caddr_t)tp);
258 }
259 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
260 fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
261
262 selwakeup(&tp->tun_rsel);
263
264 simple_unlock(&tp->tun_lock);
265 splx(s);
266
267 #if NBPFILTER > 0
268 bpfdetach(ifp);
269 #endif
270 if_detach(ifp);
271
272 if (!zombie)
273 free(tp, M_DEVBUF);
274 }
275
276 /*
277 * tunnel open - must be superuser & the device must be
278 * configured in
279 */
280 int
281 tunopen(dev, flag, mode, p)
282 dev_t dev;
283 int flag, mode;
284 struct proc *p;
285 {
286 struct ifnet *ifp;
287 struct tun_softc *tp;
288 int s, error;
289
290 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
291 return (error);
292
293 if (NTUN < 1)
294 return (ENXIO);
295
296 s = splnet();
297 tp = tun_find_unit(dev);
298
299 if (tp == NULL) {
300 (void)tun_clone_create(&tun_cloner, minor(dev));
301 tp = tun_find_unit(dev);
302 if (tp == NULL) {
303 error = ENXIO;
304 goto out_nolock;
305 }
306 }
307
308 if (tp->tun_flags & TUN_OPEN) {
309 error = EBUSY;
310 goto out;
311 }
312
313 ifp = &tp->tun_if;
314 tp->tun_flags |= TUN_OPEN;
315 TUNDEBUG("%s: open\n", ifp->if_xname);
316 out:
317 simple_unlock(&tp->tun_lock);
318 out_nolock:
319 splx(s);
320 return (error);
321 }
322
323 /*
324 * tunclose - close the device - mark i/f down & delete
325 * routing info
326 */
327 int
328 tunclose(dev, flag, mode, p)
329 dev_t dev;
330 int flag;
331 int mode;
332 struct proc *p;
333 {
334 int s;
335 struct tun_softc *tp;
336 struct ifnet *ifp;
337
338 s = splnet();
339 if ((tp = tun_find_zunit(minor(dev))) != NULL) {
340 /* interface was "destroyed" before the close */
341 free(tp, M_DEVBUF);
342 goto out_nolock;
343 }
344
345 if ((tp = tun_find_unit(dev)) == NULL)
346 goto out_nolock;
347
348 ifp = &tp->tun_if;
349
350 tp->tun_flags &= ~TUN_OPEN;
351
352 /*
353 * junk all pending output
354 */
355 IFQ_PURGE(&ifp->if_snd);
356
357 if (ifp->if_flags & IFF_UP) {
358 if_down(ifp);
359 if (ifp->if_flags & IFF_RUNNING) {
360 /* find internet addresses and delete routes */
361 struct ifaddr *ifa;
362 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
363 #ifdef INET
364 if (ifa->ifa_addr->sa_family == AF_INET) {
365 rtinit(ifa, (int)RTM_DELETE,
366 tp->tun_flags & TUN_DSTADDR
367 ? RTF_HOST
368 : 0);
369 }
370 #endif
371 }
372 }
373 }
374 tp->tun_pgid = 0;
375 selnotify(&tp->tun_rsel, 0);
376
377 TUNDEBUG ("%s: closed\n", ifp->if_xname);
378 simple_unlock(&tp->tun_lock);
379 out_nolock:
380 splx(s);
381 return (0);
382 }
383
384 /*
385 * Call at splnet() with tp locked.
386 */
387 static void
388 tuninit(tp)
389 struct tun_softc *tp;
390 {
391 struct ifnet *ifp = &tp->tun_if;
392 struct ifaddr *ifa;
393
394 TUNDEBUG("%s: tuninit\n", ifp->if_xname);
395
396 ifp->if_flags |= IFF_UP | IFF_RUNNING;
397
398 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
399 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
400 #ifdef INET
401 if (ifa->ifa_addr->sa_family == AF_INET) {
402 struct sockaddr_in *sin;
403
404 sin = satosin(ifa->ifa_addr);
405 if (sin && sin->sin_addr.s_addr)
406 tp->tun_flags |= TUN_IASET;
407
408 if (ifp->if_flags & IFF_POINTOPOINT) {
409 sin = satosin(ifa->ifa_dstaddr);
410 if (sin && sin->sin_addr.s_addr)
411 tp->tun_flags |= TUN_DSTADDR;
412 }
413 }
414 #endif
415 }
416
417 return;
418 }
419
420 /*
421 * Process an ioctl request.
422 */
423 int
424 tun_ioctl(ifp, cmd, data)
425 struct ifnet *ifp;
426 u_long cmd;
427 caddr_t data;
428 {
429 int error = 0, s;
430 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
431
432 s = splnet();
433 simple_lock(&tp->tun_lock);
434
435 switch (cmd) {
436 case SIOCSIFADDR:
437 tuninit(tp);
438 TUNDEBUG("%s: address set\n", ifp->if_xname);
439 break;
440 case SIOCSIFDSTADDR:
441 tuninit(tp);
442 TUNDEBUG("%s: destination address set\n", ifp->if_xname);
443 break;
444 case SIOCSIFBRDADDR:
445 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
446 break;
447 case SIOCSIFMTU: {
448 struct ifreq *ifr = (struct ifreq *) data;
449 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
450 error = EINVAL;
451 break;
452 }
453 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
454 ifp->if_mtu = ifr->ifr_mtu;
455 break;
456 }
457 case SIOCADDMULTI:
458 case SIOCDELMULTI: {
459 struct ifreq *ifr = (struct ifreq *) data;
460 if (ifr == 0) {
461 error = EAFNOSUPPORT; /* XXX */
462 break;
463 }
464 switch (ifr->ifr_addr.sa_family) {
465 #ifdef INET
466 case AF_INET:
467 break;
468 #endif
469 default:
470 error = EAFNOSUPPORT;
471 break;
472 }
473 break;
474 }
475 case SIOCSIFFLAGS:
476 break;
477 default:
478 error = EINVAL;
479 }
480
481 simple_unlock(&tp->tun_lock);
482 splx(s);
483 return (error);
484 }
485
486 /*
487 * tun_output - queue packets from higher level ready to put out.
488 */
489 int
490 tun_output(ifp, m0, dst, rt)
491 struct ifnet *ifp;
492 struct mbuf *m0;
493 struct sockaddr *dst;
494 struct rtentry *rt;
495 {
496 struct tun_softc *tp = ifp->if_softc;
497 #ifdef INET
498 int s;
499 int error;
500 #endif
501 int mlen;
502 ALTQ_DECL(struct altq_pktattr pktattr;)
503
504 s = splnet();
505 simple_lock(&tp->tun_lock);
506 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
507
508 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
509 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
510 tp->tun_flags);
511 m_freem (m0);
512 error = EHOSTDOWN;
513 goto out;
514 }
515
516 /*
517 * if the queueing discipline needs packet classification,
518 * do it before prepending link headers.
519 */
520 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family, &pktattr);
521
522 #if NBPFILTER > 0
523 if (ifp->if_bpf) {
524 /*
525 * We need to prepend the address family as
526 * a four byte field. Cons up a dummy header
527 * to pacify bpf. This is safe because bpf
528 * will only read from the mbuf (i.e., it won't
529 * try to free it or keep a pointer to it).
530 */
531 struct mbuf m;
532 u_int32_t af = dst->sa_family;
533
534 m.m_flags = 0;
535 m.m_next = m0;
536 m.m_len = sizeof(af);
537 m.m_data = (char *)⁡
538
539 bpf_mtap(ifp->if_bpf, &m);
540 }
541 #endif
542
543 switch(dst->sa_family) {
544 #ifdef INET
545 case AF_INET:
546 if (tp->tun_flags & TUN_PREPADDR) {
547 /* Simple link-layer header */
548 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
549 if (m0 == NULL) {
550 IF_DROP(&ifp->if_snd);
551 error = ENOBUFS;
552 goto out;
553 }
554 bcopy(dst, mtod(m0, char *), dst->sa_len);
555 }
556 /* FALLTHROUGH */
557 case AF_UNSPEC:
558 IFQ_ENQUEUE(&ifp->if_snd, m0, &pktattr, error);
559 if (error) {
560 ifp->if_collisions++;
561 error = EAFNOSUPPORT;
562 goto out;
563 }
564 mlen = m0->m_pkthdr.len;
565 ifp->if_opackets++;
566 ifp->if_obytes += mlen;
567 break;
568 #endif
569 default:
570 m_freem(m0);
571 error = EAFNOSUPPORT;
572 goto out;
573 }
574
575 if (tp->tun_flags & TUN_RWAIT) {
576 tp->tun_flags &= ~TUN_RWAIT;
577 wakeup((caddr_t)tp);
578 }
579 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
580 fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
581 NULL);
582
583 selnotify(&tp->tun_rsel, 0);
584 out:
585 simple_unlock(&tp->tun_lock);
586 splx(s);
587 return (0);
588 }
589
590 /*
591 * the cdevsw interface is now pretty minimal.
592 */
593 int
594 tunioctl(dev, cmd, data, flag, p)
595 dev_t dev;
596 u_long cmd;
597 caddr_t data;
598 int flag;
599 struct proc *p;
600 {
601 struct tun_softc *tp;
602 int s, error = 0;
603
604 s = splnet();
605 tp = tun_find_unit(dev);
606
607 /* interface was "destroyed" already */
608 if (tp == NULL) {
609 error = ENXIO;
610 goto out_nolock;
611 }
612
613 switch (cmd) {
614 case TUNSDEBUG:
615 tundebug = *(int *)data;
616 break;
617
618 case TUNGDEBUG:
619 *(int *)data = tundebug;
620 break;
621
622 case TUNSIFMODE:
623 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
624 case IFF_POINTOPOINT:
625 case IFF_BROADCAST:
626 if (tp->tun_if.if_flags & IFF_UP) {
627 error = EBUSY;
628 goto out;
629 }
630 tp->tun_if.if_flags &=
631 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
632 tp->tun_if.if_flags |= *(int *)data;
633 break;
634 default:
635 error = EINVAL;
636 goto out;
637 }
638 break;
639
640 case TUNSLMODE:
641 if (*(int *)data)
642 tp->tun_flags |= TUN_PREPADDR;
643 else
644 tp->tun_flags &= ~TUN_PREPADDR;
645 break;
646
647 case FIONBIO:
648 if (*(int *)data)
649 tp->tun_flags |= TUN_NBIO;
650 else
651 tp->tun_flags &= ~TUN_NBIO;
652 break;
653
654 case FIOASYNC:
655 if (*(int *)data)
656 tp->tun_flags |= TUN_ASYNC;
657 else
658 tp->tun_flags &= ~TUN_ASYNC;
659 break;
660
661 case FIONREAD:
662 if (tp->tun_if.if_snd.ifq_head)
663 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
664 else
665 *(int *)data = 0;
666 break;
667
668 case TIOCSPGRP:
669 case FIOSETOWN:
670 error = fsetown(p, &tp->tun_pgid, cmd, data);
671 break;
672
673 case TIOCGPGRP:
674 case FIOGETOWN:
675 error = fgetown(p, tp->tun_pgid, cmd, data);
676 break;
677
678 default:
679 error = ENOTTY;
680 }
681
682 out:
683 simple_unlock(&tp->tun_lock);
684 out_nolock:
685 splx(s);
686 return (error);
687 }
688
689 /*
690 * The cdevsw read interface - reads a packet at a time, or at
691 * least as much of a packet as can be read.
692 */
693 int
694 tunread(dev, uio, ioflag)
695 dev_t dev;
696 struct uio *uio;
697 int ioflag;
698 {
699 struct tun_softc *tp;
700 struct ifnet *ifp;
701 struct mbuf *m, *m0;
702 int error = 0, len, s, index;
703
704 s = splnet();
705 tp = tun_find_unit(dev);
706
707 /* interface was "destroyed" already */
708 if (tp == NULL) {
709 error = ENXIO;
710 goto out_nolock;
711 }
712
713 index = tp->tun_if.if_index;
714 ifp = &tp->tun_if;
715
716 TUNDEBUG ("%s: read\n", ifp->if_xname);
717 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
718 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
719 error = EHOSTDOWN;
720 goto out;
721 }
722
723 tp->tun_flags &= ~TUN_RWAIT;
724
725 do {
726 IFQ_DEQUEUE(&ifp->if_snd, m0);
727 if (m0 == 0) {
728 if (tp->tun_flags & TUN_NBIO) {
729 error = EWOULDBLOCK;
730 goto out;
731 }
732 tp->tun_flags |= TUN_RWAIT;
733 if (ltsleep((caddr_t)tp, PZERO|PCATCH|PNORELOCK,
734 "tunread", 0, &tp->tun_lock) != 0) {
735 error = EINTR;
736 goto out_nolock;
737 } else {
738 /*
739 * Maybe the interface was destroyed while
740 * we were sleeping, so let's ensure that
741 * we're looking at the same (valid) tun
742 * interface before looping.
743 */
744 tp = tun_find_unit(dev);
745 if (tp == NULL) {
746 error = ENXIO;
747 goto out_nolock;
748 }
749 if (tp->tun_if.if_index != index) {
750 error = ENXIO;
751 goto out;
752 }
753 }
754 }
755 } while (m0 == 0);
756
757 simple_unlock(&tp->tun_lock);
758 splx(s);
759
760 /* Copy the mbuf chain */
761 while (m0 && uio->uio_resid > 0 && error == 0) {
762 len = min(uio->uio_resid, m0->m_len);
763 if (len != 0)
764 error = uiomove(mtod(m0, caddr_t), len, uio);
765 MFREE(m0, m);
766 m0 = m;
767 }
768
769 if (m0) {
770 TUNDEBUG("Dropping mbuf\n");
771 m_freem(m0);
772 }
773 if (error)
774 ifp->if_ierrors++;
775
776 return (error);
777
778 out:
779 simple_unlock(&tp->tun_lock);
780 out_nolock:
781 splx(s);
782 return (error);
783 }
784
785 /*
786 * the cdevsw write interface - an atomic write is a packet - or else!
787 */
788 int
789 tunwrite(dev, uio, ioflag)
790 dev_t dev;
791 struct uio *uio;
792 int ioflag;
793 {
794 struct tun_softc *tp;
795 struct ifnet *ifp;
796 struct mbuf *top, **mp, *m;
797 struct ifqueue *ifq;
798 struct sockaddr dst;
799 int isr, error = 0, s, tlen, mlen;
800
801 s = splnet();
802 tp = tun_find_unit(dev);
803
804 /* interface was "destroyed" already */
805 if (tp == NULL) {
806 error = ENXIO;
807 goto out_nolock;
808 }
809
810 /* Unlock until we've got the data */
811 simple_unlock(&tp->tun_lock);
812 splx(s);
813
814 ifp = &tp->tun_if;
815
816 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
817
818 if (tp->tun_flags & TUN_PREPADDR) {
819 if (uio->uio_resid < sizeof(dst)) {
820 error = EIO;
821 goto out0;
822 }
823 error = uiomove((caddr_t)&dst, sizeof(dst), uio);
824 if (dst.sa_len > sizeof(dst)) {
825 /* Duh.. */
826 char discard;
827 int n = dst.sa_len - sizeof(dst);
828 while (n--)
829 if ((error = uiomove(&discard, 1, uio)) != 0) {
830 goto out0;
831 }
832 }
833 } else {
834 #ifdef INET
835 dst.sa_family = AF_INET;
836 #endif
837 }
838
839 if (uio->uio_resid > TUNMTU) {
840 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
841 (unsigned long)uio->uio_resid);
842 error = EIO;
843 goto out0;
844 }
845
846 switch (dst.sa_family) {
847 #ifdef INET
848 case AF_INET:
849 ifq = &ipintrq;
850 isr = NETISR_IP;
851 break;
852 #endif
853 default:
854 error = EAFNOSUPPORT;
855 goto out0;
856 }
857
858 tlen = uio->uio_resid;
859
860 /* get a header mbuf */
861 MGETHDR(m, M_DONTWAIT, MT_DATA);
862 if (m == NULL) {
863 error = ENOBUFS;
864 goto out0;
865 }
866 mlen = MHLEN;
867
868 top = NULL;
869 mp = ⊤
870 while (error == 0 && uio->uio_resid > 0) {
871 m->m_len = min(mlen, uio->uio_resid);
872 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
873 *mp = m;
874 mp = &m->m_next;
875 if (error == 0 && uio->uio_resid > 0) {
876 MGET(m, M_DONTWAIT, MT_DATA);
877 if (m == NULL) {
878 error = ENOBUFS;
879 break;
880 }
881 mlen = MLEN;
882 }
883 }
884 if (error) {
885 if (top != NULL)
886 m_freem (top);
887 ifp->if_ierrors++;
888 goto out0;
889 }
890
891 top->m_pkthdr.len = tlen;
892 top->m_pkthdr.rcvif = ifp;
893
894 #if NBPFILTER > 0
895 if (ifp->if_bpf) {
896 /*
897 * We need to prepend the address family as
898 * a four byte field. Cons up a dummy header
899 * to pacify bpf. This is safe because bpf
900 * will only read from the mbuf (i.e., it won't
901 * try to free it or keep a pointer to it).
902 */
903 struct mbuf m;
904 u_int32_t af = AF_INET;
905
906 m.m_flags = 0;
907 m.m_next = top;
908 m.m_len = sizeof(af);
909 m.m_data = (char *)⁡
910
911 bpf_mtap(ifp->if_bpf, &m);
912 }
913 #endif
914
915 s = splnet();
916 simple_lock(&tp->tun_lock);
917 if ((tp->tun_flags & TUN_INITED) == 0) {
918 /* Interface was destroyed */
919 error = ENXIO;
920 goto out;
921 }
922 if (IF_QFULL(ifq)) {
923 IF_DROP(ifq);
924 ifp->if_collisions++;
925 m_freem(top);
926 error = ENOBUFS;
927 goto out;
928 }
929
930 IF_ENQUEUE(ifq, top);
931 ifp->if_ipackets++;
932 ifp->if_ibytes += tlen;
933 schednetisr(isr);
934 out:
935 simple_unlock(&tp->tun_lock);
936 out_nolock:
937 splx(s);
938 out0:
939 return (error);
940 }
941
942 #ifdef ALTQ
943 /*
944 * Start packet transmission on the interface.
945 * when the interface queue is rate-limited by ALTQ or TBR,
946 * if_start is needed to drain packets from the queue in order
947 * to notify readers when outgoing packets become ready.
948 *
949 * Should be called at splnet.
950 */
951 static void
952 tunstart(ifp)
953 struct ifnet *ifp;
954 {
955 struct tun_softc *tp = ifp->if_softc;
956
957 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
958 return;
959
960 simple_lock(&tp->tun_lock);
961 if (!IF_IS_EMPTY(&ifp->if_snd)) {
962 if (tp->tun_flags & TUN_RWAIT) {
963 tp->tun_flags &= ~TUN_RWAIT;
964 wakeup((caddr_t)tp);
965 }
966 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
967 fownsignal(tp->tun_pgid, SIGIO, POLL_OUT,
968 POLLOUT|POLLWRNORM, NULL);
969
970 selwakeup(&tp->tun_rsel);
971 }
972 simple_unlock(&tp->tun_lock);
973 }
974 #endif /* ALTQ */
975 /*
976 * tunpoll - the poll interface, this is only useful on reads
977 * really. The write detect always returns true, write never blocks
978 * anyway, it either accepts the packet or drops it.
979 */
980 int
981 tunpoll(dev, events, p)
982 dev_t dev;
983 int events;
984 struct proc *p;
985 {
986 struct tun_softc *tp;
987 struct ifnet *ifp;
988 int s, revents = 0;
989
990 s = splnet();
991 tp = tun_find_unit(dev);
992
993 /* interface was "destroyed" already */
994 if (tp == NULL)
995 goto out_nolock;
996
997 ifp = &tp->tun_if;
998
999 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
1000
1001 if (events & (POLLIN | POLLRDNORM)) {
1002 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1003 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
1004 ifp->if_snd.ifq_len);
1005 revents |= events & (POLLIN | POLLRDNORM);
1006 } else {
1007 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
1008 selrecord(p, &tp->tun_rsel);
1009 }
1010 }
1011
1012 if (events & (POLLOUT | POLLWRNORM))
1013 revents |= events & (POLLOUT | POLLWRNORM);
1014
1015 simple_unlock(&tp->tun_lock);
1016 out_nolock:
1017 splx(s);
1018 return (revents);
1019 }
1020
1021 static void
1022 filt_tunrdetach(struct knote *kn)
1023 {
1024 struct tun_softc *tp = kn->kn_hook;
1025 int s;
1026
1027 s = splnet();
1028 SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
1029 splx(s);
1030 }
1031
1032 static int
1033 filt_tunread(struct knote *kn, long hint)
1034 {
1035 struct tun_softc *tp = kn->kn_hook;
1036 struct ifnet *ifp = &tp->tun_if;
1037 struct mbuf *m;
1038 int s;
1039
1040 s = splnet();
1041 IF_POLL(&ifp->if_snd, m);
1042 if (m == NULL) {
1043 splx(s);
1044 return (0);
1045 }
1046
1047 for (kn->kn_data = 0; m != NULL; m = m->m_next)
1048 kn->kn_data += m->m_len;
1049
1050 splx(s);
1051 return (1);
1052 }
1053
1054 static const struct filterops tunread_filtops =
1055 { 1, NULL, filt_tunrdetach, filt_tunread };
1056
1057 static const struct filterops tun_seltrue_filtops =
1058 { 1, NULL, filt_tunrdetach, filt_seltrue };
1059
1060 int
1061 tunkqfilter(dev_t dev, struct knote *kn)
1062 {
1063 struct tun_softc *tp;
1064 struct klist *klist;
1065 int rv = 0, s;
1066
1067 s = splnet();
1068 tp = tun_find_unit(dev);
1069 if (tp == NULL)
1070 goto out_nolock;
1071
1072 switch (kn->kn_filter) {
1073 case EVFILT_READ:
1074 klist = &tp->tun_rsel.sel_klist;
1075 kn->kn_fop = &tunread_filtops;
1076 break;
1077
1078 case EVFILT_WRITE:
1079 klist = &tp->tun_rsel.sel_klist;
1080 kn->kn_fop = &tun_seltrue_filtops;
1081 break;
1082
1083 default:
1084 rv = 1;
1085 goto out;
1086 }
1087
1088 kn->kn_hook = tp;
1089
1090 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1091
1092 out:
1093 simple_unlock(&tp->tun_lock);
1094 out_nolock:
1095 splx(s);
1096 return (rv);
1097 }
Cache object: 3405d436e153718c1bd9885a5b364061
|