The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/net/if_tun.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $  */
    2 
    3 /*-
    4  * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
    5  * Nottingham University 1987.
    6  *
    7  * This source may be freely distributed, however I would be interested
    8  * in any changes that are made.
    9  *
   10  * This driver takes packets off the IP i/f and hands them up to a
   11  * user process to have its wicked way with. This driver has it's
   12  * roots in a similar driver written by Phil Cockcroft (formerly) at
   13  * UCL. This driver is based much more on read/write/poll mode of
   14  * operation though.
   15  *
   16  * $FreeBSD: releng/8.3/sys/net/if_tun.c 231380 2012-02-10 13:15:11Z bz $
   17  */
   18 
   19 #include "opt_atalk.h"
   20 #include "opt_inet.h"
   21 #include "opt_inet6.h"
   22 #include "opt_ipx.h"
   23 
   24 #include <sys/param.h>
   25 #include <sys/priv.h>
   26 #include <sys/proc.h>
   27 #include <sys/systm.h>
   28 #include <sys/jail.h>
   29 #include <sys/mbuf.h>
   30 #include <sys/module.h>
   31 #include <sys/socket.h>
   32 #include <sys/fcntl.h>
   33 #include <sys/filio.h>
   34 #include <sys/sockio.h>
   35 #include <sys/ttycom.h>
   36 #include <sys/poll.h>
   37 #include <sys/selinfo.h>
   38 #include <sys/signalvar.h>
   39 #include <sys/filedesc.h>
   40 #include <sys/kernel.h>
   41 #include <sys/sysctl.h>
   42 #include <sys/conf.h>
   43 #include <sys/uio.h>
   44 #include <sys/malloc.h>
   45 #include <sys/random.h>
   46 
   47 #include <net/if.h>
   48 #include <net/if_clone.h>
   49 #include <net/if_types.h>
   50 #include <net/netisr.h>
   51 #include <net/route.h>
   52 #include <net/vnet.h>
   53 #ifdef INET
   54 #include <netinet/in.h>
   55 #endif
   56 #include <net/bpf.h>
   57 #include <net/if_tun.h>
   58 
   59 #include <sys/queue.h>
   60 #include <sys/condvar.h>
   61 
   62 #include <security/mac/mac_framework.h>
   63 
   64 /*
   65  * tun_list is protected by global tunmtx.  Other mutable fields are
   66  * protected by tun->tun_mtx, or by their owning subsystem.  tun_dev is
   67  * static for the duration of a tunnel interface.
   68  */
   69 struct tun_softc {
   70         TAILQ_ENTRY(tun_softc)  tun_list;
   71         struct cdev *tun_dev;
   72         u_short tun_flags;              /* misc flags */
   73 #define TUN_OPEN        0x0001
   74 #define TUN_INITED      0x0002
   75 #define TUN_RCOLL       0x0004
   76 #define TUN_IASET       0x0008
   77 #define TUN_DSTADDR     0x0010
   78 #define TUN_LMODE       0x0020
   79 #define TUN_RWAIT       0x0040
   80 #define TUN_ASYNC       0x0080
   81 #define TUN_IFHEAD      0x0100
   82 
   83 #define TUN_READY       (TUN_OPEN | TUN_INITED)
   84 
   85         /*
   86          * XXXRW: tun_pid is used to exclusively lock /dev/tun.  Is this
   87          * actually needed?  Can we just return EBUSY if already open?
   88          * Problem is that this involved inherent races when a tun device
   89          * is handed off from one process to another, as opposed to just
   90          * being slightly stale informationally.
   91          */
   92         pid_t   tun_pid;                /* owning pid */
   93         struct  ifnet *tun_ifp;         /* the interface */
   94         struct  sigio *tun_sigio;       /* information for async I/O */
   95         struct  selinfo tun_rsel;       /* read select */
   96         struct mtx      tun_mtx;        /* protect mutable softc fields */
   97         struct cv       tun_cv;         /* protect against ref'd dev destroy */
   98 };
   99 #define TUN2IFP(sc)     ((sc)->tun_ifp)
  100 
  101 #define TUNDEBUG        if (tundebug) if_printf
  102 #define TUNNAME         "tun"
  103 
  104 /*
  105  * All mutable global variables in if_tun are locked using tunmtx, with
  106  * the exception of tundebug, which is used unlocked, and tunclones,
  107  * which is static after setup.
  108  */
  109 static struct mtx tunmtx;
  110 static MALLOC_DEFINE(M_TUN, TUNNAME, "Tunnel Interface");
  111 static int tundebug = 0;
  112 static int tundclone = 1;
  113 static struct clonedevs *tunclones;
  114 static TAILQ_HEAD(,tun_softc)   tunhead = TAILQ_HEAD_INITIALIZER(tunhead);
  115 SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
  116 
  117 SYSCTL_DECL(_net_link);
  118 SYSCTL_NODE(_net_link, OID_AUTO, tun, CTLFLAG_RW, 0,
  119     "IP tunnel software network interface.");
  120 SYSCTL_INT(_net_link_tun, OID_AUTO, devfs_cloning, CTLFLAG_RW, &tundclone, 0,
  121     "Enable legacy devfs interface creation.");
  122 
  123 TUNABLE_INT("net.link.tun.devfs_cloning", &tundclone);
  124 
  125 static void     tunclone(void *arg, struct ucred *cred, char *name,
  126                     int namelen, struct cdev **dev);
  127 static void     tuncreate(const char *name, struct cdev *dev);
  128 static int      tunifioctl(struct ifnet *, u_long, caddr_t);
  129 static void     tuninit(struct ifnet *);
  130 static int      tunmodevent(module_t, int, void *);
  131 static int      tunoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
  132                     struct route *ro);
  133 static void     tunstart(struct ifnet *);
  134 
  135 static int      tun_clone_create(struct if_clone *, int, caddr_t);
  136 static void     tun_clone_destroy(struct ifnet *);
  137 
  138 IFC_SIMPLE_DECLARE(tun, 0);
  139 
  140 static d_open_t         tunopen;
  141 static d_close_t        tunclose;
  142 static d_read_t         tunread;
  143 static d_write_t        tunwrite;
  144 static d_ioctl_t        tunioctl;
  145 static d_poll_t         tunpoll;
  146 static d_kqfilter_t     tunkqfilter;
  147 
  148 static int              tunkqread(struct knote *, long);
  149 static int              tunkqwrite(struct knote *, long);
  150 static void             tunkqdetach(struct knote *);
  151 
  152 static struct filterops tun_read_filterops = {
  153         .f_isfd =       1,
  154         .f_attach =     NULL,
  155         .f_detach =     tunkqdetach,
  156         .f_event =      tunkqread,
  157 };
  158 
  159 static struct filterops tun_write_filterops = {
  160         .f_isfd =       1,
  161         .f_attach =     NULL,
  162         .f_detach =     tunkqdetach,
  163         .f_event =      tunkqwrite,
  164 };
  165 
  166 static struct cdevsw tun_cdevsw = {
  167         .d_version =    D_VERSION,
  168         .d_flags =      D_PSEUDO | D_NEEDMINOR,
  169         .d_open =       tunopen,
  170         .d_close =      tunclose,
  171         .d_read =       tunread,
  172         .d_write =      tunwrite,
  173         .d_ioctl =      tunioctl,
  174         .d_poll =       tunpoll,
  175         .d_kqfilter =   tunkqfilter,
  176         .d_name =       TUNNAME,
  177 };
  178 
  179 static int
  180 tun_clone_create(struct if_clone *ifc, int unit, caddr_t params)
  181 {
  182         struct cdev *dev;
  183         int i;
  184 
  185         /* find any existing device, or allocate new unit number */
  186         i = clone_create(&tunclones, &tun_cdevsw, &unit, &dev, 0);
  187         if (i) {
  188                 /* No preexisting struct cdev *, create one */
  189                 dev = make_dev(&tun_cdevsw, unit,
  190                     UID_UUCP, GID_DIALER, 0600, "%s%d", ifc->ifc_name, unit);
  191         }
  192         tuncreate(ifc->ifc_name, dev);
  193 
  194         return (0);
  195 }
  196 
  197 static void
  198 tunclone(void *arg, struct ucred *cred, char *name, int namelen,
  199     struct cdev **dev)
  200 {
  201         char devname[SPECNAMELEN + 1];
  202         int u, i, append_unit;
  203 
  204         if (*dev != NULL)
  205                 return;
  206 
  207         /*
  208          * If tun cloning is enabled, only the superuser can create an
  209          * interface.
  210          */
  211         if (!tundclone || priv_check_cred(cred, PRIV_NET_IFCREATE, 0) != 0)
  212                 return;
  213 
  214         if (strcmp(name, TUNNAME) == 0) {
  215                 u = -1;
  216         } else if (dev_stdclone(name, NULL, TUNNAME, &u) != 1)
  217                 return; /* Don't recognise the name */
  218         if (u != -1 && u > IF_MAXUNIT)
  219                 return; /* Unit number too high */
  220 
  221         if (u == -1)
  222                 append_unit = 1;
  223         else
  224                 append_unit = 0;
  225 
  226         CURVNET_SET(CRED_TO_VNET(cred));
  227         /* find any existing device, or allocate new unit number */
  228         i = clone_create(&tunclones, &tun_cdevsw, &u, dev, 0);
  229         if (i) {
  230                 if (append_unit) {
  231                         namelen = snprintf(devname, sizeof(devname), "%s%d",
  232                             name, u);
  233                         name = devname;
  234                 }
  235                 /* No preexisting struct cdev *, create one */
  236                 *dev = make_dev_credf(MAKEDEV_REF, &tun_cdevsw, u, cred,
  237                     UID_UUCP, GID_DIALER, 0600, "%s", name);
  238         }
  239 
  240         if_clone_create(name, namelen, NULL);
  241         CURVNET_RESTORE();
  242 }
  243 
  244 static void
  245 tun_destroy(struct tun_softc *tp)
  246 {
  247         struct cdev *dev;
  248 
  249         /* Unlocked read. */
  250         mtx_lock(&tp->tun_mtx);
  251         if ((tp->tun_flags & TUN_OPEN) != 0)
  252                 cv_wait_unlock(&tp->tun_cv, &tp->tun_mtx);
  253         else
  254                 mtx_unlock(&tp->tun_mtx);
  255 
  256         CURVNET_SET(TUN2IFP(tp)->if_vnet);
  257         dev = tp->tun_dev;
  258         bpfdetach(TUN2IFP(tp));
  259         if_detach(TUN2IFP(tp));
  260         if_free(TUN2IFP(tp));
  261         destroy_dev(dev);
  262         seldrain(&tp->tun_rsel);
  263         knlist_destroy(&tp->tun_rsel.si_note);
  264         mtx_destroy(&tp->tun_mtx);
  265         cv_destroy(&tp->tun_cv);
  266         free(tp, M_TUN);
  267         CURVNET_RESTORE();
  268 }
  269 
  270 static void
  271 tun_clone_destroy(struct ifnet *ifp)
  272 {
  273         struct tun_softc *tp = ifp->if_softc;
  274 
  275         mtx_lock(&tunmtx);
  276         TAILQ_REMOVE(&tunhead, tp, tun_list);
  277         mtx_unlock(&tunmtx);
  278         tun_destroy(tp);
  279 }
  280 
  281 static int
  282 tunmodevent(module_t mod, int type, void *data)
  283 {
  284         static eventhandler_tag tag;
  285         struct tun_softc *tp;
  286 
  287         switch (type) {
  288         case MOD_LOAD:
  289                 mtx_init(&tunmtx, "tunmtx", NULL, MTX_DEF);
  290                 clone_setup(&tunclones);
  291                 tag = EVENTHANDLER_REGISTER(dev_clone, tunclone, 0, 1000);
  292                 if (tag == NULL)
  293                         return (ENOMEM);
  294                 if_clone_attach(&tun_cloner);
  295                 break;
  296         case MOD_UNLOAD:
  297                 if_clone_detach(&tun_cloner);
  298                 EVENTHANDLER_DEREGISTER(dev_clone, tag);
  299                 drain_dev_clone_events();
  300 
  301                 mtx_lock(&tunmtx);
  302                 while ((tp = TAILQ_FIRST(&tunhead)) != NULL) {
  303                         TAILQ_REMOVE(&tunhead, tp, tun_list);
  304                         mtx_unlock(&tunmtx);
  305                         tun_destroy(tp);
  306                         mtx_lock(&tunmtx);
  307                 }
  308                 mtx_unlock(&tunmtx);
  309                 clone_cleanup(&tunclones);
  310                 mtx_destroy(&tunmtx);
  311                 break;
  312         default:
  313                 return EOPNOTSUPP;
  314         }
  315         return 0;
  316 }
  317 
  318 static moduledata_t tun_mod = {
  319         "if_tun",
  320         tunmodevent,
  321         0
  322 };
  323 
  324 DECLARE_MODULE(if_tun, tun_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
  325 
  326 static void
  327 tunstart(struct ifnet *ifp)
  328 {
  329         struct tun_softc *tp = ifp->if_softc;
  330         struct mbuf *m;
  331 
  332         TUNDEBUG(ifp,"%s starting\n", ifp->if_xname);
  333         if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
  334                 IFQ_LOCK(&ifp->if_snd);
  335                 IFQ_POLL_NOLOCK(&ifp->if_snd, m);
  336                 if (m == NULL) {
  337                         IFQ_UNLOCK(&ifp->if_snd);
  338                         return;
  339                 }
  340                 IFQ_UNLOCK(&ifp->if_snd);
  341         }
  342 
  343         mtx_lock(&tp->tun_mtx);
  344         if (tp->tun_flags & TUN_RWAIT) {
  345                 tp->tun_flags &= ~TUN_RWAIT;
  346                 wakeup(tp);
  347         }
  348         selwakeuppri(&tp->tun_rsel, PZERO + 1);
  349         KNOTE_LOCKED(&tp->tun_rsel.si_note, 0);
  350         if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) {
  351                 mtx_unlock(&tp->tun_mtx);
  352                 pgsigio(&tp->tun_sigio, SIGIO, 0);
  353         } else
  354                 mtx_unlock(&tp->tun_mtx);
  355 }
  356 
  357 /* XXX: should return an error code so it can fail. */
  358 static void
  359 tuncreate(const char *name, struct cdev *dev)
  360 {
  361         struct tun_softc *sc;
  362         struct ifnet *ifp;
  363 
  364         dev->si_flags &= ~SI_CHEAPCLONE;
  365 
  366         sc = malloc(sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
  367         mtx_init(&sc->tun_mtx, "tun_mtx", NULL, MTX_DEF);
  368         cv_init(&sc->tun_cv, "tun_condvar");
  369         sc->tun_flags = TUN_INITED;
  370         sc->tun_dev = dev;
  371         mtx_lock(&tunmtx);
  372         TAILQ_INSERT_TAIL(&tunhead, sc, tun_list);
  373         mtx_unlock(&tunmtx);
  374 
  375         ifp = sc->tun_ifp = if_alloc(IFT_PPP);
  376         if (ifp == NULL)
  377                 panic("%s%d: failed to if_alloc() interface.\n",
  378                     name, dev2unit(dev));
  379         if_initname(ifp, name, dev2unit(dev));
  380         ifp->if_mtu = TUNMTU;
  381         ifp->if_ioctl = tunifioctl;
  382         ifp->if_output = tunoutput;
  383         ifp->if_start = tunstart;
  384         ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
  385         ifp->if_softc = sc;
  386         IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
  387         ifp->if_snd.ifq_drv_maxlen = 0;
  388         IFQ_SET_READY(&ifp->if_snd);
  389         knlist_init_mtx(&sc->tun_rsel.si_note, &sc->tun_mtx);
  390         ifp->if_capabilities |= IFCAP_LINKSTATE;
  391         ifp->if_capenable |= IFCAP_LINKSTATE;
  392 
  393         if_attach(ifp);
  394         bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
  395         dev->si_drv1 = sc;
  396         TUNDEBUG(ifp, "interface %s is created, minor = %#x\n",
  397             ifp->if_xname, dev2unit(dev));
  398 }
  399 
  400 static int
  401 tunopen(struct cdev *dev, int flag, int mode, struct thread *td)
  402 {
  403         struct ifnet    *ifp;
  404         struct tun_softc *tp;
  405 
  406         /*
  407          * XXXRW: Non-atomic test and set of dev->si_drv1 requires
  408          * synchronization.
  409          */
  410         tp = dev->si_drv1;
  411         if (!tp) {
  412                 tuncreate(TUNNAME, dev);
  413                 tp = dev->si_drv1;
  414         }
  415 
  416         /*
  417          * XXXRW: This use of tun_pid is subject to error due to the
  418          * fact that a reference to the tunnel can live beyond the
  419          * death of the process that created it.  Can we replace this
  420          * with a simple busy flag?
  421          */
  422         mtx_lock(&tp->tun_mtx);
  423         if (tp->tun_pid != 0 && tp->tun_pid != td->td_proc->p_pid) {
  424                 mtx_unlock(&tp->tun_mtx);
  425                 return (EBUSY);
  426         }
  427         tp->tun_pid = td->td_proc->p_pid;
  428 
  429         tp->tun_flags |= TUN_OPEN;
  430         ifp = TUN2IFP(tp);
  431         if_link_state_change(ifp, LINK_STATE_UP);
  432         TUNDEBUG(ifp, "open\n");
  433         mtx_unlock(&tp->tun_mtx);
  434 
  435         return (0);
  436 }
  437 
  438 /*
  439  * tunclose - close the device - mark i/f down & delete
  440  * routing info
  441  */
  442 static  int
  443 tunclose(struct cdev *dev, int foo, int bar, struct thread *td)
  444 {
  445         struct tun_softc *tp;
  446         struct ifnet *ifp;
  447 
  448         tp = dev->si_drv1;
  449         ifp = TUN2IFP(tp);
  450 
  451         mtx_lock(&tp->tun_mtx);
  452         tp->tun_flags &= ~TUN_OPEN;
  453         tp->tun_pid = 0;
  454 
  455         /*
  456          * junk all pending output
  457          */
  458         CURVNET_SET(ifp->if_vnet);
  459         IFQ_PURGE(&ifp->if_snd);
  460 
  461         if (ifp->if_flags & IFF_UP) {
  462                 mtx_unlock(&tp->tun_mtx);
  463                 if_down(ifp);
  464                 mtx_lock(&tp->tun_mtx);
  465         }
  466 
  467         /* Delete all addresses and routes which reference this interface. */
  468         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  469                 struct ifaddr *ifa;
  470 
  471                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  472                 mtx_unlock(&tp->tun_mtx);
  473                 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
  474                         /* deal w/IPv4 PtP destination; unlocked read */
  475                         if (ifa->ifa_addr->sa_family == AF_INET) {
  476                                 rtinit(ifa, (int)RTM_DELETE,
  477                                     tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0);
  478                         } else {
  479                                 rtinit(ifa, (int)RTM_DELETE, 0);
  480                         }
  481                 }
  482                 if_purgeaddrs(ifp);
  483                 mtx_lock(&tp->tun_mtx);
  484         }
  485         if_link_state_change(ifp, LINK_STATE_DOWN);
  486         CURVNET_RESTORE();
  487 
  488         funsetown(&tp->tun_sigio);
  489         selwakeuppri(&tp->tun_rsel, PZERO + 1);
  490         KNOTE_LOCKED(&tp->tun_rsel.si_note, 0);
  491         TUNDEBUG (ifp, "closed\n");
  492 
  493         cv_broadcast(&tp->tun_cv);
  494         mtx_unlock(&tp->tun_mtx);
  495         return (0);
  496 }
  497 
  498 static void
  499 tuninit(struct ifnet *ifp)
  500 {
  501         struct tun_softc *tp = ifp->if_softc;
  502 #ifdef INET
  503         struct ifaddr *ifa;
  504 #endif
  505 
  506         TUNDEBUG(ifp, "tuninit\n");
  507 
  508         mtx_lock(&tp->tun_mtx);
  509         ifp->if_flags |= IFF_UP;
  510         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  511         getmicrotime(&ifp->if_lastchange);
  512 
  513 #ifdef INET
  514         if_addr_rlock(ifp);
  515         TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
  516                 if (ifa->ifa_addr->sa_family == AF_INET) {
  517                         struct sockaddr_in *si;
  518 
  519                         si = (struct sockaddr_in *)ifa->ifa_addr;
  520                         if (si->sin_addr.s_addr)
  521                                 tp->tun_flags |= TUN_IASET;
  522 
  523                         si = (struct sockaddr_in *)ifa->ifa_dstaddr;
  524                         if (si && si->sin_addr.s_addr)
  525                                 tp->tun_flags |= TUN_DSTADDR;
  526                 }
  527         }
  528         if_addr_runlock(ifp);
  529 #endif
  530         mtx_unlock(&tp->tun_mtx);
  531 }
  532 
  533 /*
  534  * Process an ioctl request.
  535  */
  536 static int
  537 tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  538 {
  539         struct ifreq *ifr = (struct ifreq *)data;
  540         struct tun_softc *tp = ifp->if_softc;
  541         struct ifstat *ifs;
  542         int             error = 0;
  543 
  544         switch(cmd) {
  545         case SIOCGIFSTATUS:
  546                 ifs = (struct ifstat *)data;
  547                 mtx_lock(&tp->tun_mtx);
  548                 if (tp->tun_pid)
  549                         sprintf(ifs->ascii + strlen(ifs->ascii),
  550                             "\tOpened by PID %d\n", tp->tun_pid);
  551                 mtx_unlock(&tp->tun_mtx);
  552                 break;
  553         case SIOCSIFADDR:
  554                 tuninit(ifp);
  555                 TUNDEBUG(ifp, "address set\n");
  556                 break;
  557         case SIOCSIFDSTADDR:
  558                 tuninit(ifp);
  559                 TUNDEBUG(ifp, "destination address set\n");
  560                 break;
  561         case SIOCSIFMTU:
  562                 ifp->if_mtu = ifr->ifr_mtu;
  563                 TUNDEBUG(ifp, "mtu set\n");
  564                 break;
  565         case SIOCSIFFLAGS:
  566         case SIOCADDMULTI:
  567         case SIOCDELMULTI:
  568                 break;
  569         default:
  570                 error = EINVAL;
  571         }
  572         return (error);
  573 }
  574 
  575 /*
  576  * tunoutput - queue packets from higher level ready to put out.
  577  */
  578 static int
  579 tunoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
  580     struct route *ro)
  581 {
  582         struct tun_softc *tp = ifp->if_softc;
  583         u_short cached_tun_flags;
  584         int error;
  585         u_int32_t af;
  586 
  587         TUNDEBUG (ifp, "tunoutput\n");
  588 
  589 #ifdef MAC
  590         error = mac_ifnet_check_transmit(ifp, m0);
  591         if (error) {
  592                 m_freem(m0);
  593                 return (error);
  594         }
  595 #endif
  596 
  597         /* Could be unlocked read? */
  598         mtx_lock(&tp->tun_mtx);
  599         cached_tun_flags = tp->tun_flags;
  600         mtx_unlock(&tp->tun_mtx);
  601         if ((cached_tun_flags & TUN_READY) != TUN_READY) {
  602                 TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
  603                 m_freem (m0);
  604                 return (EHOSTDOWN);
  605         }
  606 
  607         if ((ifp->if_flags & IFF_UP) != IFF_UP) {
  608                 m_freem (m0);
  609                 return (EHOSTDOWN);
  610         }
  611 
  612         /* BPF writes need to be handled specially. */
  613         if (dst->sa_family == AF_UNSPEC) {
  614                 bcopy(dst->sa_data, &af, sizeof(af));
  615                 dst->sa_family = af; 
  616         }
  617 
  618         if (bpf_peers_present(ifp->if_bpf)) {
  619                 af = dst->sa_family;
  620                 bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m0);
  621         }
  622 
  623         /* prepend sockaddr? this may abort if the mbuf allocation fails */
  624         if (cached_tun_flags & TUN_LMODE) {
  625                 /* allocate space for sockaddr */
  626                 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
  627 
  628                 /* if allocation failed drop packet */
  629                 if (m0 == NULL) {
  630                         ifp->if_iqdrops++;
  631                         ifp->if_oerrors++;
  632                         return (ENOBUFS);
  633                 } else {
  634                         bcopy(dst, m0->m_data, dst->sa_len);
  635                 }
  636         }
  637 
  638         if (cached_tun_flags & TUN_IFHEAD) {
  639                 /* Prepend the address family */
  640                 M_PREPEND(m0, 4, M_DONTWAIT);
  641 
  642                 /* if allocation failed drop packet */
  643                 if (m0 == NULL) {
  644                         ifp->if_iqdrops++;
  645                         ifp->if_oerrors++;
  646                         return (ENOBUFS);
  647                 } else
  648                         *(u_int32_t *)m0->m_data = htonl(dst->sa_family);
  649         } else {
  650 #ifdef INET
  651                 if (dst->sa_family != AF_INET)
  652 #endif
  653                 {
  654                         m_freem(m0);
  655                         return (EAFNOSUPPORT);
  656                 }
  657         }
  658 
  659         error = (ifp->if_transmit)(ifp, m0);
  660         if (error)
  661                 return (ENOBUFS);
  662         ifp->if_opackets++;
  663         return (0);
  664 }
  665 
  666 /*
  667  * the cdevsw interface is now pretty minimal.
  668  */
  669 static  int
  670 tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
  671     struct thread *td)
  672 {
  673         int             error;
  674         struct tun_softc *tp = dev->si_drv1;
  675         struct tuninfo *tunp;
  676 
  677         switch (cmd) {
  678         case TUNSIFINFO:
  679                 tunp = (struct tuninfo *)data;
  680                 if (tunp->mtu < IF_MINMTU)
  681                         return (EINVAL);
  682                 if (TUN2IFP(tp)->if_mtu != tunp->mtu) {
  683                         error = priv_check(td, PRIV_NET_SETIFMTU);
  684                         if (error)
  685                                 return (error);
  686                 }
  687                 mtx_lock(&tp->tun_mtx);
  688                 TUN2IFP(tp)->if_mtu = tunp->mtu;
  689                 TUN2IFP(tp)->if_type = tunp->type;
  690                 TUN2IFP(tp)->if_baudrate = tunp->baudrate;
  691                 mtx_unlock(&tp->tun_mtx);
  692                 break;
  693         case TUNGIFINFO:
  694                 tunp = (struct tuninfo *)data;
  695                 mtx_lock(&tp->tun_mtx);
  696                 tunp->mtu = TUN2IFP(tp)->if_mtu;
  697                 tunp->type = TUN2IFP(tp)->if_type;
  698                 tunp->baudrate = TUN2IFP(tp)->if_baudrate;
  699                 mtx_unlock(&tp->tun_mtx);
  700                 break;
  701         case TUNSDEBUG:
  702                 tundebug = *(int *)data;
  703                 break;
  704         case TUNGDEBUG:
  705                 *(int *)data = tundebug;
  706                 break;
  707         case TUNSLMODE:
  708                 mtx_lock(&tp->tun_mtx);
  709                 if (*(int *)data) {
  710                         tp->tun_flags |= TUN_LMODE;
  711                         tp->tun_flags &= ~TUN_IFHEAD;
  712                 } else
  713                         tp->tun_flags &= ~TUN_LMODE;
  714                 mtx_unlock(&tp->tun_mtx);
  715                 break;
  716         case TUNSIFHEAD:
  717                 mtx_lock(&tp->tun_mtx);
  718                 if (*(int *)data) {
  719                         tp->tun_flags |= TUN_IFHEAD;
  720                         tp->tun_flags &= ~TUN_LMODE;
  721                 } else
  722                         tp->tun_flags &= ~TUN_IFHEAD;
  723                 mtx_unlock(&tp->tun_mtx);
  724                 break;
  725         case TUNGIFHEAD:
  726                 mtx_lock(&tp->tun_mtx);
  727                 *(int *)data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
  728                 mtx_unlock(&tp->tun_mtx);
  729                 break;
  730         case TUNSIFMODE:
  731                 /* deny this if UP */
  732                 if (TUN2IFP(tp)->if_flags & IFF_UP)
  733                         return(EBUSY);
  734 
  735                 switch (*(int *)data & ~IFF_MULTICAST) {
  736                 case IFF_POINTOPOINT:
  737                 case IFF_BROADCAST:
  738                         mtx_lock(&tp->tun_mtx);
  739                         TUN2IFP(tp)->if_flags &=
  740                             ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
  741                         TUN2IFP(tp)->if_flags |= *(int *)data;
  742                         mtx_unlock(&tp->tun_mtx);
  743                         break;
  744                 default:
  745                         return(EINVAL);
  746                 }
  747                 break;
  748         case TUNSIFPID:
  749                 mtx_lock(&tp->tun_mtx);
  750                 tp->tun_pid = curthread->td_proc->p_pid;
  751                 mtx_unlock(&tp->tun_mtx);
  752                 break;
  753         case FIONBIO:
  754                 break;
  755         case FIOASYNC:
  756                 mtx_lock(&tp->tun_mtx);
  757                 if (*(int *)data)
  758                         tp->tun_flags |= TUN_ASYNC;
  759                 else
  760                         tp->tun_flags &= ~TUN_ASYNC;
  761                 mtx_unlock(&tp->tun_mtx);
  762                 break;
  763         case FIONREAD:
  764                 if (!IFQ_IS_EMPTY(&TUN2IFP(tp)->if_snd)) {
  765                         struct mbuf *mb;
  766                         IFQ_LOCK(&TUN2IFP(tp)->if_snd);
  767                         IFQ_POLL_NOLOCK(&TUN2IFP(tp)->if_snd, mb);
  768                         for (*(int *)data = 0; mb != NULL; mb = mb->m_next)
  769                                 *(int *)data += mb->m_len;
  770                         IFQ_UNLOCK(&TUN2IFP(tp)->if_snd);
  771                 } else
  772                         *(int *)data = 0;
  773                 break;
  774         case FIOSETOWN:
  775                 return (fsetown(*(int *)data, &tp->tun_sigio));
  776 
  777         case FIOGETOWN:
  778                 *(int *)data = fgetown(&tp->tun_sigio);
  779                 return (0);
  780 
  781         /* This is deprecated, FIOSETOWN should be used instead. */
  782         case TIOCSPGRP:
  783                 return (fsetown(-(*(int *)data), &tp->tun_sigio));
  784 
  785         /* This is deprecated, FIOGETOWN should be used instead. */
  786         case TIOCGPGRP:
  787                 *(int *)data = -fgetown(&tp->tun_sigio);
  788                 return (0);
  789 
  790         default:
  791                 return (ENOTTY);
  792         }
  793         return (0);
  794 }
  795 
  796 /*
  797  * The cdevsw read interface - reads a packet at a time, or at
  798  * least as much of a packet as can be read.
  799  */
  800 static  int
  801 tunread(struct cdev *dev, struct uio *uio, int flag)
  802 {
  803         struct tun_softc *tp = dev->si_drv1;
  804         struct ifnet    *ifp = TUN2IFP(tp);
  805         struct mbuf     *m;
  806         int             error=0, len;
  807 
  808         TUNDEBUG (ifp, "read\n");
  809         mtx_lock(&tp->tun_mtx);
  810         if ((tp->tun_flags & TUN_READY) != TUN_READY) {
  811                 mtx_unlock(&tp->tun_mtx);
  812                 TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
  813                 return (EHOSTDOWN);
  814         }
  815 
  816         tp->tun_flags &= ~TUN_RWAIT;
  817 
  818         do {
  819                 IFQ_DEQUEUE(&ifp->if_snd, m);
  820                 if (m == NULL) {
  821                         if (flag & O_NONBLOCK) {
  822                                 mtx_unlock(&tp->tun_mtx);
  823                                 return (EWOULDBLOCK);
  824                         }
  825                         tp->tun_flags |= TUN_RWAIT;
  826                         error = mtx_sleep(tp, &tp->tun_mtx, PCATCH | (PZERO + 1),
  827                             "tunread", 0);
  828                         if (error != 0) {
  829                                 mtx_unlock(&tp->tun_mtx);
  830                                 return (error);
  831                         }
  832                 }
  833         } while (m == NULL);
  834         mtx_unlock(&tp->tun_mtx);
  835 
  836         while (m && uio->uio_resid > 0 && error == 0) {
  837                 len = min(uio->uio_resid, m->m_len);
  838                 if (len != 0)
  839                         error = uiomove(mtod(m, void *), len, uio);
  840                 m = m_free(m);
  841         }
  842 
  843         if (m) {
  844                 TUNDEBUG(ifp, "Dropping mbuf\n");
  845                 m_freem(m);
  846         }
  847         return (error);
  848 }
  849 
  850 /*
  851  * the cdevsw write interface - an atomic write is a packet - or else!
  852  */
  853 static  int
  854 tunwrite(struct cdev *dev, struct uio *uio, int flag)
  855 {
  856         struct tun_softc *tp = dev->si_drv1;
  857         struct ifnet    *ifp = TUN2IFP(tp);
  858         struct mbuf     *m;
  859         uint32_t        family;
  860         int             isr;
  861 
  862         TUNDEBUG(ifp, "tunwrite\n");
  863 
  864         if ((ifp->if_flags & IFF_UP) != IFF_UP)
  865                 /* ignore silently */
  866                 return (0);
  867 
  868         if (uio->uio_resid == 0)
  869                 return (0);
  870 
  871         if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
  872                 TUNDEBUG(ifp, "len=%zd!\n", uio->uio_resid);
  873                 return (EIO);
  874         }
  875 
  876         if ((m = m_uiotombuf(uio, M_DONTWAIT, 0, 0, M_PKTHDR)) == NULL) {
  877                 ifp->if_ierrors++;
  878                 return (ENOBUFS);
  879         }
  880 
  881         m->m_pkthdr.rcvif = ifp;
  882 #ifdef MAC
  883         mac_ifnet_create_mbuf(ifp, m);
  884 #endif
  885 
  886         /* Could be unlocked read? */
  887         mtx_lock(&tp->tun_mtx);
  888         if (tp->tun_flags & TUN_IFHEAD) {
  889                 mtx_unlock(&tp->tun_mtx);
  890                 if (m->m_len < sizeof(family) &&
  891                     (m = m_pullup(m, sizeof(family))) == NULL)
  892                         return (ENOBUFS);
  893                 family = ntohl(*mtod(m, u_int32_t *));
  894                 m_adj(m, sizeof(family));
  895         } else {
  896                 mtx_unlock(&tp->tun_mtx);
  897                 family = AF_INET;
  898         }
  899 
  900         BPF_MTAP2(ifp, &family, sizeof(family), m);
  901 
  902         switch (family) {
  903 #ifdef INET
  904         case AF_INET:
  905                 isr = NETISR_IP;
  906                 break;
  907 #endif
  908 #ifdef INET6
  909         case AF_INET6:
  910                 isr = NETISR_IPV6;
  911                 break;
  912 #endif
  913 #ifdef IPX
  914         case AF_IPX:
  915                 isr = NETISR_IPX;
  916                 break;
  917 #endif
  918 #ifdef NETATALK
  919         case AF_APPLETALK:
  920                 isr = NETISR_ATALK2;
  921                 break;
  922 #endif
  923         default:
  924                 m_freem(m);
  925                 return (EAFNOSUPPORT);
  926         }
  927         /* First chunk of an mbuf contains good junk */
  928         if (harvest.point_to_point)
  929                 random_harvest(m, 16, 3, 0, RANDOM_NET);
  930         ifp->if_ibytes += m->m_pkthdr.len;
  931         ifp->if_ipackets++;
  932         CURVNET_SET(ifp->if_vnet);
  933         M_SETFIB(m, ifp->if_fib);
  934         netisr_dispatch(isr, m);
  935         CURVNET_RESTORE();
  936         return (0);
  937 }
  938 
  939 /*
  940  * tunpoll - the poll interface, this is only useful on reads
  941  * really. The write detect always returns true, write never blocks
  942  * anyway, it either accepts the packet or drops it.
  943  */
  944 static  int
  945 tunpoll(struct cdev *dev, int events, struct thread *td)
  946 {
  947         struct tun_softc *tp = dev->si_drv1;
  948         struct ifnet    *ifp = TUN2IFP(tp);
  949         int             revents = 0;
  950         struct mbuf     *m;
  951 
  952         TUNDEBUG(ifp, "tunpoll\n");
  953 
  954         if (events & (POLLIN | POLLRDNORM)) {
  955                 IFQ_LOCK(&ifp->if_snd);
  956                 IFQ_POLL_NOLOCK(&ifp->if_snd, m);
  957                 if (m != NULL) {
  958                         TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len);
  959                         revents |= events & (POLLIN | POLLRDNORM);
  960                 } else {
  961                         TUNDEBUG(ifp, "tunpoll waiting\n");
  962                         selrecord(td, &tp->tun_rsel);
  963                 }
  964                 IFQ_UNLOCK(&ifp->if_snd);
  965         }
  966         if (events & (POLLOUT | POLLWRNORM))
  967                 revents |= events & (POLLOUT | POLLWRNORM);
  968 
  969         return (revents);
  970 }
  971 
  972 /*
  973  * tunkqfilter - support for the kevent() system call.
  974  */
  975 static int
  976 tunkqfilter(struct cdev *dev, struct knote *kn)
  977 {
  978         struct tun_softc        *tp = dev->si_drv1;
  979         struct ifnet    *ifp = TUN2IFP(tp);
  980 
  981         switch(kn->kn_filter) {
  982         case EVFILT_READ:
  983                 TUNDEBUG(ifp, "%s kqfilter: EVFILT_READ, minor = %#x\n",
  984                     ifp->if_xname, dev2unit(dev));
  985                 kn->kn_fop = &tun_read_filterops;
  986                 break;
  987 
  988         case EVFILT_WRITE:
  989                 TUNDEBUG(ifp, "%s kqfilter: EVFILT_WRITE, minor = %#x\n",
  990                     ifp->if_xname, dev2unit(dev));
  991                 kn->kn_fop = &tun_write_filterops;
  992                 break;
  993 
  994         default:
  995                 TUNDEBUG(ifp, "%s kqfilter: invalid filter, minor = %#x\n",
  996                     ifp->if_xname, dev2unit(dev));
  997                 return(EINVAL);
  998         }
  999 
 1000         kn->kn_hook = tp;
 1001         knlist_add(&tp->tun_rsel.si_note, kn, 0);
 1002 
 1003         return (0);
 1004 }
 1005 
 1006 /*
 1007  * Return true of there is data in the interface queue.
 1008  */
 1009 static int
 1010 tunkqread(struct knote *kn, long hint)
 1011 {
 1012         int                     ret;
 1013         struct tun_softc        *tp = kn->kn_hook;
 1014         struct cdev             *dev = tp->tun_dev;
 1015         struct ifnet    *ifp = TUN2IFP(tp);
 1016 
 1017         if ((kn->kn_data = ifp->if_snd.ifq_len) > 0) {
 1018                 TUNDEBUG(ifp,
 1019                     "%s have data in the queue.  Len = %d, minor = %#x\n",
 1020                     ifp->if_xname, ifp->if_snd.ifq_len, dev2unit(dev));
 1021                 ret = 1;
 1022         } else {
 1023                 TUNDEBUG(ifp,
 1024                     "%s waiting for data, minor = %#x\n", ifp->if_xname,
 1025                     dev2unit(dev));
 1026                 ret = 0;
 1027         }
 1028 
 1029         return (ret);
 1030 }
 1031 
 1032 /*
 1033  * Always can write, always return MTU in kn->data.
 1034  */
 1035 static int
 1036 tunkqwrite(struct knote *kn, long hint)
 1037 {
 1038         struct tun_softc        *tp = kn->kn_hook;
 1039         struct ifnet    *ifp = TUN2IFP(tp);
 1040 
 1041         kn->kn_data = ifp->if_mtu;
 1042 
 1043         return (1);
 1044 }
 1045 
 1046 static void
 1047 tunkqdetach(struct knote *kn)
 1048 {
 1049         struct tun_softc        *tp = kn->kn_hook;
 1050 
 1051         knlist_remove(&tp->tun_rsel.si_note, kn, 0);
 1052 }

Cache object: 4e82c687cfb3cb02d70a65b5405129b8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.