FreeBSD/Linux Kernel Cross Reference
sys/net/if_tun.c
1 /* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */
2
3 /*-
4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has it's
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 *
16 * $FreeBSD$
17 */
18
19 #include "opt_atalk.h"
20 #include "opt_inet.h"
21 #include "opt_inet6.h"
22 #include "opt_ipx.h"
23 #include "opt_mac.h"
24
25 #include <sys/param.h>
26 #include <sys/proc.h>
27 #include <sys/systm.h>
28 #include <sys/mac.h>
29 #include <sys/mbuf.h>
30 #include <sys/module.h>
31 #include <sys/socket.h>
32 #include <sys/filio.h>
33 #include <sys/sockio.h>
34 #include <sys/ttycom.h>
35 #include <sys/poll.h>
36 #include <sys/signalvar.h>
37 #include <sys/filedesc.h>
38 #include <sys/kernel.h>
39 #include <sys/sysctl.h>
40 #include <sys/conf.h>
41 #include <sys/uio.h>
42 #include <sys/vnode.h>
43 #include <sys/malloc.h>
44 #include <sys/random.h>
45
46 #include <net/if.h>
47 #include <net/if_types.h>
48 #include <net/netisr.h>
49 #include <net/route.h>
50 #ifdef INET
51 #include <netinet/in.h>
52 #endif
53 #include <net/bpf.h>
54 #include <net/if_tun.h>
55
56 #include <sys/queue.h>
57
58 /*
59 * tun_list is protected by global tunmtx. Other mutable fields are
60 * protected by tun->tun_mtx, or by their owning subsystem. tun_dev is
61 * static for the duration of a tunnel interface.
62 */
63 struct tun_softc {
64 TAILQ_ENTRY(tun_softc) tun_list;
65 struct cdev *tun_dev;
66 u_short tun_flags; /* misc flags */
67 #define TUN_OPEN 0x0001
68 #define TUN_INITED 0x0002
69 #define TUN_RCOLL 0x0004
70 #define TUN_IASET 0x0008
71 #define TUN_DSTADDR 0x0010
72 #define TUN_LMODE 0x0020
73 #define TUN_RWAIT 0x0040
74 #define TUN_ASYNC 0x0080
75 #define TUN_IFHEAD 0x0100
76
77 #define TUN_READY (TUN_OPEN | TUN_INITED)
78
79 /*
80 * XXXRW: tun_pid is used to exclusively lock /dev/tun. Is this
81 * actually needed? Can we just return EBUSY if already open?
82 * Problem is that this involved inherent races when a tun device
83 * is handed off from one process to another, as opposed to just
84 * being slightly stale informationally.
85 */
86 pid_t tun_pid; /* owning pid */
87 struct ifnet tun_if; /* the interface */
88 struct sigio *tun_sigio; /* information for async I/O */
89 struct selinfo tun_rsel; /* read select */
90 struct mtx tun_mtx; /* protect mutable softc fields */
91 };
92
93 #define TUNDEBUG if (tundebug) if_printf
94 #define TUNNAME "tun"
95
96 /*
97 * All mutable global variables in if_tun are locked using tunmtx, with
98 * the exception of tundebug, which is used unlocked, and tunclones,
99 * which is static after setup.
100 */
101 static struct mtx tunmtx;
102 static MALLOC_DEFINE(M_TUN, TUNNAME, "Tunnel Interface");
103 static int tundebug = 0;
104 static struct clonedevs *tunclones;
105 static TAILQ_HEAD(,tun_softc) tunhead = TAILQ_HEAD_INITIALIZER(tunhead);
106 SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
107
108 static void tunclone(void *arg, char *name, int namelen, struct cdev **dev);
109 static void tuncreate(struct cdev *dev);
110 static int tunifioctl(struct ifnet *, u_long, caddr_t);
111 static int tuninit(struct ifnet *);
112 static int tunmodevent(module_t, int, void *);
113 static int tunoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
114 struct rtentry *rt);
115 static void tunstart(struct ifnet *);
116
117 static d_open_t tunopen;
118 static d_close_t tunclose;
119 static d_read_t tunread;
120 static d_write_t tunwrite;
121 static d_ioctl_t tunioctl;
122 static d_poll_t tunpoll;
123
124 static struct cdevsw tun_cdevsw = {
125 .d_version = D_VERSION,
126 .d_flags = D_PSEUDO | D_NEEDGIANT,
127 .d_open = tunopen,
128 .d_close = tunclose,
129 .d_read = tunread,
130 .d_write = tunwrite,
131 .d_ioctl = tunioctl,
132 .d_poll = tunpoll,
133 .d_name = TUNNAME,
134 };
135
136 static void
137 tunclone(void *arg, char *name, int namelen, struct cdev **dev)
138 {
139 int u, i;
140
141 if (*dev != NULL)
142 return;
143
144 if (strcmp(name, TUNNAME) == 0) {
145 u = -1;
146 } else if (dev_stdclone(name, NULL, TUNNAME, &u) != 1)
147 return; /* Don't recognise the name */
148 if (u != -1 && u > IF_MAXUNIT)
149 return; /* Unit number too high */
150
151 /* find any existing device, or allocate new unit number */
152 i = clone_create(&tunclones, &tun_cdevsw, &u, dev, 0);
153 if (i) {
154 /* No preexisting struct cdev *, create one */
155 *dev = make_dev(&tun_cdevsw, unit2minor(u),
156 UID_UUCP, GID_DIALER, 0600, "tun%d", u);
157 if (*dev != NULL)
158 (*dev)->si_flags |= SI_CHEAPCLONE;
159 }
160 }
161
162 static void
163 tun_destroy(struct tun_softc *tp)
164 {
165 struct cdev *dev;
166
167 /* Unlocked read. */
168 KASSERT((tp->tun_flags & TUN_OPEN) == 0,
169 ("tununits is out of sync - unit %d", tp->tun_if.if_dunit));
170
171 dev = tp->tun_dev;
172 bpfdetach(&tp->tun_if);
173 if_detach(&tp->tun_if);
174 destroy_dev(dev);
175 mtx_destroy(&tp->tun_mtx);
176 free(tp, M_TUN);
177 }
178
179 static int
180 tunmodevent(module_t mod, int type, void *data)
181 {
182 static eventhandler_tag tag;
183 struct tun_softc *tp;
184
185 switch (type) {
186 case MOD_LOAD:
187 mtx_init(&tunmtx, "tunmtx", NULL, MTX_DEF);
188 clone_setup(&tunclones);
189 tag = EVENTHANDLER_REGISTER(dev_clone, tunclone, 0, 1000);
190 if (tag == NULL)
191 return (ENOMEM);
192 break;
193 case MOD_UNLOAD:
194 EVENTHANDLER_DEREGISTER(dev_clone, tag);
195
196 mtx_lock(&tunmtx);
197 while ((tp = TAILQ_FIRST(&tunhead)) != NULL) {
198 TAILQ_REMOVE(&tunhead, tp, tun_list);
199 mtx_unlock(&tunmtx);
200 tun_destroy(tp);
201 mtx_lock(&tunmtx);
202 }
203 mtx_unlock(&tunmtx);
204 clone_cleanup(&tunclones);
205 mtx_destroy(&tunmtx);
206 break;
207 default:
208 return EOPNOTSUPP;
209 }
210 return 0;
211 }
212
213 static moduledata_t tun_mod = {
214 "if_tun",
215 tunmodevent,
216 0
217 };
218
219 DECLARE_MODULE(if_tun, tun_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
220
221 static void
222 tunstart(struct ifnet *ifp)
223 {
224 struct tun_softc *tp = ifp->if_softc;
225 struct mbuf *m;
226
227 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
228 IFQ_LOCK(&ifp->if_snd);
229 IFQ_POLL_NOLOCK(&ifp->if_snd, m);
230 if (m == NULL) {
231 IFQ_UNLOCK(&ifp->if_snd);
232 return;
233 }
234 IFQ_UNLOCK(&ifp->if_snd);
235 }
236
237 mtx_lock(&tp->tun_mtx);
238 if (tp->tun_flags & TUN_RWAIT) {
239 tp->tun_flags &= ~TUN_RWAIT;
240 wakeup(tp);
241 }
242 if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) {
243 mtx_unlock(&tp->tun_mtx);
244 pgsigio(&tp->tun_sigio, SIGIO, 0);
245 } else
246 mtx_unlock(&tp->tun_mtx);
247 selwakeuppri(&tp->tun_rsel, PZERO + 1);
248 }
249
250 static void
251 tuncreate(struct cdev *dev)
252 {
253 struct tun_softc *sc;
254 struct ifnet *ifp;
255
256 dev->si_flags &= ~SI_CHEAPCLONE;
257
258 MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
259 mtx_init(&sc->tun_mtx, "tun_mtx", NULL, MTX_DEF);
260 sc->tun_flags = TUN_INITED;
261 sc->tun_dev = dev;
262 mtx_lock(&tunmtx);
263 TAILQ_INSERT_TAIL(&tunhead, sc, tun_list);
264 mtx_unlock(&tunmtx);
265
266 ifp = &sc->tun_if;
267 if_initname(ifp, TUNNAME, dev2unit(dev));
268 ifp->if_mtu = TUNMTU;
269 ifp->if_ioctl = tunifioctl;
270 ifp->if_output = tunoutput;
271 ifp->if_start = tunstart;
272 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
273 ifp->if_type = IFT_PPP;
274 ifp->if_softc = sc;
275 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
276 ifp->if_snd.ifq_drv_maxlen = 0;
277 IFQ_SET_READY(&ifp->if_snd);
278
279 if_attach(ifp);
280 bpfattach(ifp, DLT_NULL, sizeof(u_int));
281 dev->si_drv1 = sc;
282 }
283
284 static int
285 tunopen(struct cdev *dev, int flag, int mode, struct thread *td)
286 {
287 struct ifnet *ifp;
288 struct tun_softc *tp;
289
290 /*
291 * XXXRW: Non-atomic test and set of dev->si_drv1 requires
292 * synchronization.
293 */
294 tp = dev->si_drv1;
295 if (!tp) {
296 tuncreate(dev);
297 tp = dev->si_drv1;
298 }
299
300 /*
301 * XXXRW: This use of tun_pid is subject to error due to the
302 * fact that a reference to the tunnel can live beyond the
303 * death of the process that created it. Can we replace this
304 * with a simple busy flag?
305 */
306 mtx_lock(&tp->tun_mtx);
307 if (tp->tun_pid != 0 && tp->tun_pid != td->td_proc->p_pid) {
308 mtx_unlock(&tp->tun_mtx);
309 return (EBUSY);
310 }
311 tp->tun_pid = td->td_proc->p_pid;
312
313 tp->tun_flags |= TUN_OPEN;
314 mtx_unlock(&tp->tun_mtx);
315 ifp = &tp->tun_if;
316 TUNDEBUG(ifp, "open\n");
317
318 return (0);
319 }
320
321 /*
322 * tunclose - close the device - mark i/f down & delete
323 * routing info
324 */
325 static int
326 tunclose(struct cdev *dev, int foo, int bar, struct thread *td)
327 {
328 struct tun_softc *tp;
329 struct ifnet *ifp;
330 int s;
331
332 tp = dev->si_drv1;
333 ifp = &tp->tun_if;
334
335 mtx_lock(&tp->tun_mtx);
336 tp->tun_flags &= ~TUN_OPEN;
337 tp->tun_pid = 0;
338
339 /*
340 * junk all pending output
341 */
342 s = splimp();
343 IFQ_PURGE(&ifp->if_snd);
344 splx(s);
345 mtx_unlock(&tp->tun_mtx);
346
347 if (ifp->if_flags & IFF_UP) {
348 s = splimp();
349 if_down(ifp);
350 splx(s);
351 }
352
353 if (ifp->if_flags & IFF_RUNNING) {
354 struct ifaddr *ifa;
355
356 s = splimp();
357 /* find internet addresses and delete routes */
358 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
359 if (ifa->ifa_addr->sa_family == AF_INET)
360 /* Unlocked read. */
361 rtinit(ifa, (int)RTM_DELETE,
362 tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0);
363 ifp->if_flags &= ~IFF_RUNNING;
364 splx(s);
365 }
366
367 funsetown(&tp->tun_sigio);
368 selwakeuppri(&tp->tun_rsel, PZERO + 1);
369 TUNDEBUG (ifp, "closed\n");
370 return (0);
371 }
372
373 static int
374 tuninit(struct ifnet *ifp)
375 {
376 struct tun_softc *tp = ifp->if_softc;
377 struct ifaddr *ifa;
378 int error = 0;
379
380 TUNDEBUG(ifp, "tuninit\n");
381
382 ifp->if_flags |= IFF_UP | IFF_RUNNING;
383 getmicrotime(&ifp->if_lastchange);
384
385 for (ifa = TAILQ_FIRST(&ifp->if_addrhead); ifa;
386 ifa = TAILQ_NEXT(ifa, ifa_link)) {
387 if (ifa->ifa_addr == NULL)
388 error = EFAULT;
389 /* XXX: Should maybe return straight off? */
390 else {
391 #ifdef INET
392 if (ifa->ifa_addr->sa_family == AF_INET) {
393 struct sockaddr_in *si;
394
395 si = (struct sockaddr_in *)ifa->ifa_addr;
396 mtx_lock(&tp->tun_mtx);
397 if (si->sin_addr.s_addr)
398 tp->tun_flags |= TUN_IASET;
399
400 si = (struct sockaddr_in *)ifa->ifa_dstaddr;
401 if (si && si->sin_addr.s_addr)
402 tp->tun_flags |= TUN_DSTADDR;
403 mtx_unlock(&tp->tun_mtx);
404 }
405 #endif
406 }
407 }
408 return (error);
409 }
410
411 /*
412 * Process an ioctl request.
413 */
414 static int
415 tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
416 {
417 struct ifreq *ifr = (struct ifreq *)data;
418 struct tun_softc *tp = ifp->if_softc;
419 struct ifstat *ifs;
420 int error = 0, s;
421
422 s = splimp();
423 switch(cmd) {
424 case SIOCGIFSTATUS:
425 ifs = (struct ifstat *)data;
426 mtx_lock(&tp->tun_mtx);
427 if (tp->tun_pid)
428 sprintf(ifs->ascii + strlen(ifs->ascii),
429 "\tOpened by PID %d\n", tp->tun_pid);
430 mtx_unlock(&tp->tun_mtx);
431 break;
432 case SIOCSIFADDR:
433 error = tuninit(ifp);
434 TUNDEBUG(ifp, "address set, error=%d\n", error);
435 break;
436 case SIOCSIFDSTADDR:
437 error = tuninit(ifp);
438 TUNDEBUG(ifp, "destination address set, error=%d\n", error);
439 break;
440 case SIOCSIFMTU:
441 ifp->if_mtu = ifr->ifr_mtu;
442 TUNDEBUG(ifp, "mtu set\n");
443 break;
444 case SIOCSIFFLAGS:
445 case SIOCADDMULTI:
446 case SIOCDELMULTI:
447 break;
448 default:
449 error = EINVAL;
450 }
451 splx(s);
452 return (error);
453 }
454
455 /*
456 * tunoutput - queue packets from higher level ready to put out.
457 */
458 static int
459 tunoutput(
460 struct ifnet *ifp,
461 struct mbuf *m0,
462 struct sockaddr *dst,
463 struct rtentry *rt)
464 {
465 struct tun_softc *tp = ifp->if_softc;
466 u_short cached_tun_flags;
467 int error;
468
469 TUNDEBUG (ifp, "tunoutput\n");
470
471 #ifdef MAC
472 error = mac_check_ifnet_transmit(ifp, m0);
473 if (error) {
474 m_freem(m0);
475 return (error);
476 }
477 #endif
478
479 /* Could be unlocked read? */
480 mtx_lock(&tp->tun_mtx);
481 cached_tun_flags = tp->tun_flags;
482 mtx_unlock(&tp->tun_mtx);
483 if ((cached_tun_flags & TUN_READY) != TUN_READY) {
484 TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
485 m_freem (m0);
486 return (EHOSTDOWN);
487 }
488
489 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
490 m_freem (m0);
491 return (EHOSTDOWN);
492 }
493
494 /* BPF write needs to be handled specially */
495 if (dst->sa_family == AF_UNSPEC) {
496 dst->sa_family = *(mtod(m0, int *));
497 m0->m_len -= sizeof(int);
498 m0->m_pkthdr.len -= sizeof(int);
499 m0->m_data += sizeof(int);
500 }
501
502 if (ifp->if_bpf) {
503 uint32_t af = dst->sa_family;
504 bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m0);
505 }
506
507 /* prepend sockaddr? this may abort if the mbuf allocation fails */
508 if (cached_tun_flags & TUN_LMODE) {
509 /* allocate space for sockaddr */
510 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
511
512 /* if allocation failed drop packet */
513 if (m0 == NULL) {
514 ifp->if_iqdrops++;
515 ifp->if_oerrors++;
516 return (ENOBUFS);
517 } else {
518 bcopy(dst, m0->m_data, dst->sa_len);
519 }
520 }
521
522 if (cached_tun_flags & TUN_IFHEAD) {
523 /* Prepend the address family */
524 M_PREPEND(m0, 4, M_DONTWAIT);
525
526 /* if allocation failed drop packet */
527 if (m0 == NULL) {
528 ifp->if_iqdrops++;
529 ifp->if_oerrors++;
530 return (ENOBUFS);
531 } else
532 *(u_int32_t *)m0->m_data = htonl(dst->sa_family);
533 } else {
534 #ifdef INET
535 if (dst->sa_family != AF_INET)
536 #endif
537 {
538 m_freem(m0);
539 return (EAFNOSUPPORT);
540 }
541 }
542
543 IFQ_HANDOFF(ifp, m0, error);
544 if (error) {
545 ifp->if_collisions++;
546 return (ENOBUFS);
547 }
548 ifp->if_opackets++;
549 return (0);
550 }
551
552 /*
553 * the cdevsw interface is now pretty minimal.
554 */
555 static int
556 tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
557 {
558 int s;
559 int error;
560 struct tun_softc *tp = dev->si_drv1;
561 struct tuninfo *tunp;
562
563 switch (cmd) {
564 case TUNSIFINFO:
565 tunp = (struct tuninfo *)data;
566 if (tunp->mtu < IF_MINMTU)
567 return (EINVAL);
568 if (tp->tun_if.if_mtu != tunp->mtu
569 && (error = suser(td)) != 0)
570 return (error);
571 tp->tun_if.if_mtu = tunp->mtu;
572 tp->tun_if.if_type = tunp->type;
573 tp->tun_if.if_baudrate = tunp->baudrate;
574 break;
575 case TUNGIFINFO:
576 tunp = (struct tuninfo *)data;
577 tunp->mtu = tp->tun_if.if_mtu;
578 tunp->type = tp->tun_if.if_type;
579 tunp->baudrate = tp->tun_if.if_baudrate;
580 break;
581 case TUNSDEBUG:
582 tundebug = *(int *)data;
583 break;
584 case TUNGDEBUG:
585 *(int *)data = tundebug;
586 break;
587 case TUNSLMODE:
588 mtx_lock(&tp->tun_mtx);
589 if (*(int *)data) {
590 tp->tun_flags |= TUN_LMODE;
591 tp->tun_flags &= ~TUN_IFHEAD;
592 } else
593 tp->tun_flags &= ~TUN_LMODE;
594 mtx_unlock(&tp->tun_mtx);
595 break;
596 case TUNSIFHEAD:
597 mtx_lock(&tp->tun_mtx);
598 if (*(int *)data) {
599 tp->tun_flags |= TUN_IFHEAD;
600 tp->tun_flags &= ~TUN_LMODE;
601 } else
602 tp->tun_flags &= ~TUN_IFHEAD;
603 mtx_unlock(&tp->tun_mtx);
604 break;
605 case TUNGIFHEAD:
606 /* Could be unlocked read? */
607 mtx_lock(&tp->tun_mtx);
608 *(int *)data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
609 mtx_unlock(&tp->tun_mtx);
610 break;
611 case TUNSIFMODE:
612 /* deny this if UP */
613 if (tp->tun_if.if_flags & IFF_UP)
614 return(EBUSY);
615
616 switch (*(int *)data & ~IFF_MULTICAST) {
617 case IFF_POINTOPOINT:
618 case IFF_BROADCAST:
619 tp->tun_if.if_flags &=
620 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
621 tp->tun_if.if_flags |= *(int *)data;
622 break;
623 default:
624 return(EINVAL);
625 }
626 break;
627 case TUNSIFPID:
628 mtx_lock(&tp->tun_mtx);
629 tp->tun_pid = curthread->td_proc->p_pid;
630 mtx_unlock(&tp->tun_mtx);
631 break;
632 case FIONBIO:
633 break;
634 case FIOASYNC:
635 mtx_lock(&tp->tun_mtx);
636 if (*(int *)data)
637 tp->tun_flags |= TUN_ASYNC;
638 else
639 tp->tun_flags &= ~TUN_ASYNC;
640 mtx_unlock(&tp->tun_mtx);
641 break;
642 case FIONREAD:
643 s = splimp();
644 if (!IFQ_IS_EMPTY(&tp->tun_if.if_snd)) {
645 struct mbuf *mb;
646 IFQ_LOCK(&tp->tun_if.if_snd);
647 IFQ_POLL_NOLOCK(&tp->tun_if.if_snd, mb);
648 for( *(int *)data = 0; mb != 0; mb = mb->m_next)
649 *(int *)data += mb->m_len;
650 IFQ_UNLOCK(&tp->tun_if.if_snd);
651 } else
652 *(int *)data = 0;
653 splx(s);
654 break;
655 case FIOSETOWN:
656 return (fsetown(*(int *)data, &tp->tun_sigio));
657
658 case FIOGETOWN:
659 *(int *)data = fgetown(&tp->tun_sigio);
660 return (0);
661
662 /* This is deprecated, FIOSETOWN should be used instead. */
663 case TIOCSPGRP:
664 return (fsetown(-(*(int *)data), &tp->tun_sigio));
665
666 /* This is deprecated, FIOGETOWN should be used instead. */
667 case TIOCGPGRP:
668 *(int *)data = -fgetown(&tp->tun_sigio);
669 return (0);
670
671 default:
672 return (ENOTTY);
673 }
674 return (0);
675 }
676
677 /*
678 * The cdevsw read interface - reads a packet at a time, or at
679 * least as much of a packet as can be read.
680 */
681 static int
682 tunread(struct cdev *dev, struct uio *uio, int flag)
683 {
684 struct tun_softc *tp = dev->si_drv1;
685 struct ifnet *ifp = &tp->tun_if;
686 struct mbuf *m;
687 int error=0, len, s;
688
689 TUNDEBUG (ifp, "read\n");
690 mtx_lock(&tp->tun_mtx);
691 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
692 mtx_unlock(&tp->tun_mtx);
693 TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
694 return (EHOSTDOWN);
695 }
696
697 tp->tun_flags &= ~TUN_RWAIT;
698 mtx_unlock(&tp->tun_mtx);
699
700 s = splimp();
701 do {
702 IFQ_DEQUEUE(&ifp->if_snd, m);
703 if (m == NULL) {
704 if (flag & IO_NDELAY) {
705 splx(s);
706 return (EWOULDBLOCK);
707 }
708 mtx_lock(&tp->tun_mtx);
709 tp->tun_flags |= TUN_RWAIT;
710 mtx_unlock(&tp->tun_mtx);
711 if((error = tsleep(tp, PCATCH | (PZERO + 1),
712 "tunread", 0)) != 0) {
713 splx(s);
714 return (error);
715 }
716 }
717 } while (m == NULL);
718 splx(s);
719
720 while (m && uio->uio_resid > 0 && error == 0) {
721 len = min(uio->uio_resid, m->m_len);
722 if (len != 0)
723 error = uiomove(mtod(m, void *), len, uio);
724 m = m_free(m);
725 }
726
727 if (m) {
728 TUNDEBUG(ifp, "Dropping mbuf\n");
729 m_freem(m);
730 }
731 return (error);
732 }
733
734 /*
735 * the cdevsw write interface - an atomic write is a packet - or else!
736 */
737 static int
738 tunwrite(struct cdev *dev, struct uio *uio, int flag)
739 {
740 struct tun_softc *tp = dev->si_drv1;
741 struct ifnet *ifp = &tp->tun_if;
742 struct mbuf *m;
743 int error = 0;
744 uint32_t family;
745 int isr;
746
747 TUNDEBUG(ifp, "tunwrite\n");
748
749 if ((ifp->if_flags & IFF_UP) != IFF_UP)
750 /* ignore silently */
751 return (0);
752
753 if (uio->uio_resid == 0)
754 return (0);
755
756 if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
757 TUNDEBUG(ifp, "len=%d!\n", uio->uio_resid);
758 return (EIO);
759 }
760
761 if ((m = m_uiotombuf(uio, M_DONTWAIT, 0, 0)) == NULL) {
762 ifp->if_ierrors++;
763 return (error);
764 }
765
766 m->m_pkthdr.rcvif = ifp;
767 #ifdef MAC
768 mac_create_mbuf_from_ifnet(ifp, m);
769 #endif
770
771 /* Could be unlocked read? */
772 mtx_lock(&tp->tun_mtx);
773 if (tp->tun_flags & TUN_IFHEAD) {
774 mtx_unlock(&tp->tun_mtx);
775 if (m->m_len < sizeof(family) &&
776 (m = m_pullup(m, sizeof(family))) == NULL)
777 return (ENOBUFS);
778 family = ntohl(*mtod(m, u_int32_t *));
779 m_adj(m, sizeof(family));
780 } else {
781 mtx_unlock(&tp->tun_mtx);
782 family = AF_INET;
783 }
784
785 BPF_MTAP2(ifp, &family, sizeof(family), m);
786
787 switch (family) {
788 #ifdef INET
789 case AF_INET:
790 isr = NETISR_IP;
791 break;
792 #endif
793 #ifdef INET6
794 case AF_INET6:
795 isr = NETISR_IPV6;
796 break;
797 #endif
798 #ifdef IPX
799 case AF_IPX:
800 isr = NETISR_IPX;
801 break;
802 #endif
803 #ifdef NETATALK
804 case AF_APPLETALK:
805 isr = NETISR_ATALK2;
806 break;
807 #endif
808 default:
809 m_freem(m);
810 return (EAFNOSUPPORT);
811 }
812 /* First chunk of an mbuf contains good junk */
813 if (harvest.point_to_point)
814 random_harvest(m, 16, 3, 0, RANDOM_NET);
815 ifp->if_ibytes += m->m_pkthdr.len;
816 ifp->if_ipackets++;
817 netisr_dispatch(isr, m);
818 return (0);
819 }
820
821 /*
822 * tunpoll - the poll interface, this is only useful on reads
823 * really. The write detect always returns true, write never blocks
824 * anyway, it either accepts the packet or drops it.
825 */
826 static int
827 tunpoll(struct cdev *dev, int events, struct thread *td)
828 {
829 int s;
830 struct tun_softc *tp = dev->si_drv1;
831 struct ifnet *ifp = &tp->tun_if;
832 int revents = 0;
833 struct mbuf *m;
834
835 s = splimp();
836 TUNDEBUG(ifp, "tunpoll\n");
837
838 if (events & (POLLIN | POLLRDNORM)) {
839 IFQ_LOCK(&ifp->if_snd);
840 IFQ_POLL_NOLOCK(&ifp->if_snd, m);
841 if (m != NULL) {
842 TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len);
843 revents |= events & (POLLIN | POLLRDNORM);
844 } else {
845 TUNDEBUG(ifp, "tunpoll waiting\n");
846 selrecord(td, &tp->tun_rsel);
847 }
848 IFQ_UNLOCK(&ifp->if_snd);
849 }
850 if (events & (POLLOUT | POLLWRNORM))
851 revents |= events & (POLLOUT | POLLWRNORM);
852
853 splx(s);
854 return (revents);
855 }
Cache object: f3d0af01c401ad93d4118a5e44414634
|