The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/geom/gate/g_gate.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2004 Pawel Jakub Dawidek <pjd@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD$
   27  */
   28 
   29 #include <sys/param.h>
   30 #include <sys/systm.h>
   31 #include <sys/bio.h>
   32 #include <sys/conf.h>
   33 #include <sys/kernel.h>
   34 #include <sys/kthread.h>
   35 #include <sys/fcntl.h>
   36 #include <sys/linker.h>
   37 #include <sys/lock.h>
   38 #include <sys/malloc.h>
   39 #include <sys/mutex.h>
   40 #include <sys/proc.h>
   41 #include <sys/limits.h>
   42 #include <sys/queue.h>
   43 #include <sys/sysctl.h>
   44 #include <sys/signalvar.h>
   45 #include <sys/time.h>
   46 #include <machine/atomic.h>
   47 
   48 #include <geom/geom.h>
   49 #include <geom/gate/g_gate.h>
   50 
   51 static MALLOC_DEFINE(M_GATE, "gg data", "GEOM Gate Data");
   52 
   53 SYSCTL_DECL(_kern_geom);
   54 SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff");
   55 static u_int g_gate_debug = 0;
   56 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
   57     "Debug level");
   58 
   59 static int g_gate_destroy_geom(struct gctl_req *, struct g_class *,
   60     struct g_geom *);
   61 struct g_class g_gate_class = {
   62         .name = G_GATE_CLASS_NAME,
   63         .version = G_VERSION,
   64         .destroy_geom = g_gate_destroy_geom
   65 };
   66 
   67 static struct cdev *status_dev;
   68 static d_ioctl_t g_gate_ioctl;
   69 static struct cdevsw g_gate_cdevsw = {
   70         .d_version =    D_VERSION,
   71         .d_ioctl =      g_gate_ioctl,
   72         .d_name =       G_GATE_CTL_NAME
   73 };
   74 
   75 
   76 static LIST_HEAD(, g_gate_softc) g_gate_list =
   77     LIST_HEAD_INITIALIZER(&g_gate_list);
   78 static struct mtx g_gate_list_mtx;
   79 
   80 
   81 static void
   82 g_gate_wither(struct g_gate_softc *sc)
   83 {
   84 
   85         atomic_set_32(&sc->sc_flags, G_GATE_FLAG_DESTROY);
   86 }
   87 
   88 static int
   89 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
   90 {
   91         struct g_provider *pp;
   92         struct bio *bp;
   93 
   94         g_topology_assert();
   95         mtx_assert(&g_gate_list_mtx, MA_OWNED);
   96         pp = sc->sc_provider;
   97         if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
   98                 mtx_unlock(&g_gate_list_mtx);
   99                 return (EBUSY);
  100         }
  101         if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
  102                 g_gate_wither(sc);
  103                 LIST_REMOVE(sc, sc_next);
  104         }
  105         mtx_unlock(&g_gate_list_mtx);
  106         mtx_lock(&sc->sc_inqueue_mtx);
  107         wakeup(sc);
  108         mtx_unlock(&sc->sc_inqueue_mtx);
  109         if (sc->sc_ref > 0) {
  110                 G_GATE_DEBUG(1, "Cannot destroy %s yet.", sc->sc_name);
  111                 return (0);
  112         }
  113         callout_drain(&sc->sc_callout);
  114         mtx_lock(&sc->sc_inqueue_mtx);
  115         for (;;) {
  116                 bp = bioq_first(&sc->sc_inqueue);
  117                 if (bp != NULL) {
  118                         bioq_remove(&sc->sc_inqueue, bp);
  119                         atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  120                         G_GATE_LOGREQ(1, bp, "Request canceled.");
  121                         g_io_deliver(bp, ENXIO);
  122                 } else {
  123                         break;
  124                 }
  125         }
  126         mtx_destroy(&sc->sc_inqueue_mtx);
  127         mtx_lock(&sc->sc_outqueue_mtx);
  128         for (;;) {
  129                 bp = bioq_first(&sc->sc_outqueue);
  130                 if (bp != NULL) {
  131                         bioq_remove(&sc->sc_outqueue, bp);
  132                         atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  133                         G_GATE_LOGREQ(1, bp, "Request canceled.");
  134                         g_io_deliver(bp, ENXIO);
  135                 } else {
  136                         break;
  137                 }
  138         }
  139         mtx_destroy(&sc->sc_outqueue_mtx);
  140         G_GATE_DEBUG(0, "Device %s destroyed.", sc->sc_name);
  141         pp->geom->softc = NULL;
  142         g_wither_geom(pp->geom, ENXIO);
  143         sc->sc_provider = NULL;
  144         free(sc, M_GATE);
  145         return (0);
  146 }
  147 
  148 static void
  149 g_gate_destroy_it(void *arg, int flag __unused)
  150 {
  151         struct g_gate_softc *sc;
  152 
  153         g_topology_assert();
  154         sc = arg;
  155         mtx_lock(&g_gate_list_mtx);
  156         g_gate_destroy(sc, 1);
  157 }
  158 
  159 static int
  160 g_gate_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
  161 {
  162 
  163         g_topology_assert();
  164         mtx_lock(&g_gate_list_mtx);
  165         return (g_gate_destroy(gp->softc, 0));
  166 }
  167 
  168 static int
  169 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
  170 {
  171         struct g_gate_softc *sc;
  172 
  173         if (dr <= 0 && dw <= 0 && de <= 0)
  174                 return (0);
  175         sc = pp->geom->softc;
  176         if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
  177                 return (ENXIO);
  178         /* XXX: Hack to allow read-only mounts. */
  179 #if 0
  180         if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
  181                 return (EPERM);
  182 #endif
  183         if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
  184                 return (EPERM);
  185         return (0);
  186 }
  187 
  188 static void
  189 g_gate_start(struct bio *bp)
  190 {
  191         struct g_gate_softc *sc;
  192         uint32_t qcount;
  193 
  194         sc = bp->bio_to->geom->softc;
  195         if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
  196                 g_io_deliver(bp, ENXIO);
  197                 return;
  198         }
  199         G_GATE_LOGREQ(2, bp, "Request received.");
  200         switch (bp->bio_cmd) {
  201         case BIO_READ:
  202                 break;
  203         case BIO_DELETE:
  204         case BIO_WRITE:
  205                 /* XXX: Hack to allow read-only mounts. */
  206                 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
  207                         g_io_deliver(bp, EPERM);
  208                         return;
  209                 }
  210                 break;
  211         case BIO_GETATTR:
  212         default:
  213                 G_GATE_LOGREQ(2, bp, "Ignoring request.");
  214                 g_io_deliver(bp, EOPNOTSUPP);
  215                 return;
  216         }
  217 
  218         atomic_store_rel_32(&qcount, sc->sc_queue_count);
  219         if (qcount > sc->sc_queue_size) {
  220                 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
  221                 g_io_deliver(bp, EIO);
  222                 return;
  223         }
  224         atomic_add_acq_32(&sc->sc_queue_count, 1);
  225         bp->bio_driver1 = (void *)sc->sc_seq;
  226         sc->sc_seq++;
  227 
  228         mtx_lock(&sc->sc_inqueue_mtx);
  229         bioq_disksort(&sc->sc_inqueue, bp);
  230         wakeup(sc);
  231         mtx_unlock(&sc->sc_inqueue_mtx);
  232 }
  233 
  234 static struct g_gate_softc *
  235 g_gate_find(u_int unit)
  236 {
  237         struct g_gate_softc *sc;
  238 
  239         mtx_assert(&g_gate_list_mtx, MA_OWNED);
  240         LIST_FOREACH(sc, &g_gate_list, sc_next) {
  241                 if (sc->sc_unit == unit)
  242                         break;
  243         }
  244         return (sc);
  245 }
  246 
  247 static struct g_gate_softc *
  248 g_gate_hold(u_int unit)
  249 {
  250         struct g_gate_softc *sc;
  251 
  252         mtx_lock(&g_gate_list_mtx);
  253         sc = g_gate_find(unit);
  254         if (sc != NULL) {
  255                 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
  256                         sc = NULL;
  257                 else
  258                         sc->sc_ref++;
  259         }
  260         mtx_unlock(&g_gate_list_mtx);
  261         return (sc);
  262 }
  263 
  264 static void
  265 g_gate_release(struct g_gate_softc *sc)
  266 {
  267 
  268         g_topology_assert_not();
  269         mtx_lock(&g_gate_list_mtx);
  270         sc->sc_ref--;
  271         KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
  272         if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
  273                 mtx_unlock(&g_gate_list_mtx);
  274                 g_waitfor_event(g_gate_destroy_it, sc, M_WAITOK, NULL);
  275         } else {
  276                 mtx_unlock(&g_gate_list_mtx);
  277         }
  278 }
  279 
  280 static int
  281 g_gate_getunit(int unit)
  282 {
  283         struct g_gate_softc *sc;
  284 
  285         mtx_assert(&g_gate_list_mtx, MA_OWNED);
  286         if (unit >= 0) {
  287                 LIST_FOREACH(sc, &g_gate_list, sc_next) {
  288                         if (sc->sc_unit == unit)
  289                                 return (-1);
  290                 }
  291         } else {
  292                 unit = 0;
  293 once_again:
  294                 LIST_FOREACH(sc, &g_gate_list, sc_next) {
  295                         if (sc->sc_unit == unit) {
  296                                 if (++unit > 666)
  297                                         return (-1);
  298                                 goto once_again;
  299                         }
  300                 }
  301         }
  302         return (unit);
  303 }
  304 
  305 static void
  306 g_gate_guard(void *arg)
  307 {
  308         struct g_gate_softc *sc;
  309         struct bintime curtime;
  310         struct bio *bp, *bp2;
  311 
  312         sc = arg;
  313         binuptime(&curtime);
  314         g_gate_hold(sc->sc_unit);
  315         mtx_lock(&sc->sc_inqueue_mtx);
  316         TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
  317                 if (curtime.sec - bp->bio_t0.sec < 5)
  318                         continue;
  319                 bioq_remove(&sc->sc_inqueue, bp);
  320                 atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  321                 G_GATE_LOGREQ(1, bp, "Request timeout.");
  322                 g_io_deliver(bp, EIO);
  323         }
  324         mtx_unlock(&sc->sc_inqueue_mtx);
  325         mtx_lock(&sc->sc_outqueue_mtx);
  326         TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
  327                 if (curtime.sec - bp->bio_t0.sec < 5)
  328                         continue;
  329                 bioq_remove(&sc->sc_outqueue, bp);
  330                 atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  331                 G_GATE_LOGREQ(1, bp, "Request timeout.");
  332                 g_io_deliver(bp, EIO);
  333         }
  334         mtx_unlock(&sc->sc_outqueue_mtx);
  335         if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
  336                 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
  337                     g_gate_guard, sc);
  338         }
  339         g_gate_release(sc);
  340 }
  341 
  342 static void
  343 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
  344     struct g_consumer *cp, struct g_provider *pp)
  345 {
  346         struct g_gate_softc *sc;
  347 
  348         sc = gp->softc;
  349         if (sc == NULL || pp != NULL || cp != NULL)
  350                 return;
  351         g_gate_hold(sc->sc_unit);
  352         if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
  353                 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
  354         } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
  355                 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
  356                     "write-only");
  357         } else {
  358                 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
  359                     "read-write");
  360         }
  361         sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
  362         sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
  363         sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
  364             sc->sc_queue_count);
  365         sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
  366             sc->sc_queue_size);
  367         sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
  368         g_topology_unlock();
  369         g_gate_release(sc);
  370         g_topology_lock();
  371 }
  372 
  373 static int
  374 g_gate_create(struct g_gate_ctl_create *ggio)
  375 {
  376         struct g_gate_softc *sc;
  377         struct g_geom *gp;
  378         struct g_provider *pp;
  379 
  380         if (ggio->gctl_mediasize == 0) {
  381                 G_GATE_DEBUG(1, "Invalid media size.");
  382                 return (EINVAL);
  383         }
  384         if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) {
  385                 G_GATE_DEBUG(1, "Invalid sector size.");
  386                 return (EINVAL);
  387         }
  388         if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
  389                 G_GATE_DEBUG(1, "Invalid media size.");
  390                 return (EINVAL);
  391         }
  392         if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
  393             (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
  394                 G_GATE_DEBUG(1, "Invalid flags.");
  395                 return (EINVAL);
  396         }
  397         if (ggio->gctl_unit < -1) {
  398                 G_GATE_DEBUG(1, "Invalid unit number.");
  399                 return (EINVAL);
  400         }
  401 
  402         sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
  403         sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
  404         strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
  405         sc->sc_seq = 0;
  406         bioq_init(&sc->sc_inqueue);
  407         mtx_init(&sc->sc_inqueue_mtx, "gg:inqueue", NULL, MTX_DEF);
  408         bioq_init(&sc->sc_outqueue);
  409         mtx_init(&sc->sc_outqueue_mtx, "gg:outqueue", NULL, MTX_DEF);
  410         sc->sc_queue_count = 0;
  411         sc->sc_queue_size = ggio->gctl_maxcount;
  412         if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
  413                 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
  414         sc->sc_timeout = ggio->gctl_timeout;
  415         callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
  416         mtx_lock(&g_gate_list_mtx);
  417         ggio->gctl_unit = g_gate_getunit(ggio->gctl_unit);
  418         if (ggio->gctl_unit == -1) {
  419                 mtx_unlock(&g_gate_list_mtx);
  420                 mtx_destroy(&sc->sc_inqueue_mtx);
  421                 mtx_destroy(&sc->sc_outqueue_mtx);
  422                 free(sc, M_GATE);
  423                 return (EBUSY);
  424         }
  425         sc->sc_unit = ggio->gctl_unit;
  426         LIST_INSERT_HEAD(&g_gate_list, sc, sc_next);
  427         mtx_unlock(&g_gate_list_mtx);
  428 
  429         g_topology_lock();
  430         gp = g_new_geomf(&g_gate_class, "%s%d", G_GATE_PROVIDER_NAME,
  431             sc->sc_unit);
  432         gp->start = g_gate_start;
  433         gp->access = g_gate_access;
  434         gp->dumpconf = g_gate_dumpconf;
  435         gp->softc = sc;
  436         pp = g_new_providerf(gp, "%s%d", G_GATE_PROVIDER_NAME, sc->sc_unit);
  437         pp->mediasize = ggio->gctl_mediasize;
  438         pp->sectorsize = ggio->gctl_sectorsize;
  439         sc->sc_provider = pp;
  440         g_error_provider(pp, 0);
  441         g_topology_unlock();
  442 
  443         if (sc->sc_timeout > 0) {
  444                 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
  445                     g_gate_guard, sc);
  446         }
  447         return (0);
  448 }
  449 
  450 #define G_GATE_CHECK_VERSION(ggio)      do {                            \
  451         if ((ggio)->gctl_version != G_GATE_VERSION)                     \
  452                 return (EINVAL);                                        \
  453 } while (0)
  454 static int
  455 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
  456 {
  457         struct g_gate_softc *sc;
  458         struct bio *bp;
  459         int error = 0;
  460 
  461         G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
  462             flags, td);
  463 
  464         switch (cmd) {
  465         case G_GATE_CMD_CREATE:
  466             {
  467                 struct g_gate_ctl_create *ggio = (void *)addr;
  468 
  469                 G_GATE_CHECK_VERSION(ggio);
  470                 DROP_GIANT();
  471                 error = g_gate_create(ggio);
  472                 PICKUP_GIANT();
  473                 /*
  474                  * Reset TDP_GEOM flag.
  475                  * There are pending events for sure, because we just created
  476                  * new provider and other classes want to taste it, but we
  477                  * cannot answer on I/O requests until we're here.
  478                  */
  479                 td->td_pflags &= ~TDP_GEOM;
  480                 return (error);
  481             }
  482         case G_GATE_CMD_DESTROY:
  483             {
  484                 struct g_gate_ctl_destroy *ggio = (void *)addr;
  485 
  486                 G_GATE_CHECK_VERSION(ggio);
  487                 sc = g_gate_hold(ggio->gctl_unit);
  488                 if (sc == NULL)
  489                         return (ENXIO);
  490                 DROP_GIANT();
  491                 g_topology_lock();
  492                 mtx_lock(&g_gate_list_mtx);
  493                 error = g_gate_destroy(sc, ggio->gctl_force);
  494                 if (error == 0)
  495                         g_gate_wither(sc);
  496                 g_topology_unlock();
  497                 PICKUP_GIANT();
  498                 g_gate_release(sc);
  499                 return (error);
  500             }
  501         case G_GATE_CMD_START:
  502             {
  503                 struct g_gate_ctl_io *ggio = (void *)addr;
  504 
  505                 G_GATE_CHECK_VERSION(ggio);
  506                 sc = g_gate_hold(ggio->gctl_unit);
  507                 if (sc == NULL)
  508                         return (ENXIO);
  509                 for (;;) {
  510                         mtx_lock(&sc->sc_inqueue_mtx);
  511                         bp = bioq_first(&sc->sc_inqueue);
  512                         if (bp != NULL)
  513                                 break;
  514                         if (msleep(sc, &sc->sc_inqueue_mtx,
  515                             PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
  516                                 g_gate_release(sc);
  517                                 ggio->gctl_error = ECANCELED;
  518                                 return (0);
  519                         }
  520                         if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
  521                                 g_gate_release(sc);
  522                                 ggio->gctl_error = ECANCELED;
  523                                 return (0);
  524                         }
  525                 }
  526                 ggio->gctl_cmd = bp->bio_cmd;
  527                 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
  528                     bp->bio_length > ggio->gctl_length) {
  529                         mtx_unlock(&sc->sc_inqueue_mtx);
  530                         g_gate_release(sc);
  531                         ggio->gctl_length = bp->bio_length;
  532                         ggio->gctl_error = ENOMEM;
  533                         return (0);
  534                 }
  535                 bioq_remove(&sc->sc_inqueue, bp);
  536                 atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  537                 mtx_unlock(&sc->sc_inqueue_mtx);
  538                 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
  539                 ggio->gctl_offset = bp->bio_offset;
  540                 ggio->gctl_length = bp->bio_length;
  541                 switch (bp->bio_cmd) {
  542                 case BIO_READ:
  543                         break;
  544                 case BIO_DELETE:
  545                 case BIO_WRITE:
  546                         error = copyout(bp->bio_data, ggio->gctl_data,
  547                             bp->bio_length);
  548                         if (error != 0) {
  549                                 mtx_lock(&sc->sc_inqueue_mtx);
  550                                 bioq_disksort(&sc->sc_inqueue, bp);
  551                                 mtx_unlock(&sc->sc_inqueue_mtx);
  552                                 g_gate_release(sc);
  553                                 return (error);
  554                         }
  555                         break;
  556                 }
  557                 mtx_lock(&sc->sc_outqueue_mtx);
  558                 bioq_insert_tail(&sc->sc_outqueue, bp);
  559                 atomic_add_acq_32(&sc->sc_queue_count, 1);
  560                 mtx_unlock(&sc->sc_outqueue_mtx);
  561                 g_gate_release(sc);
  562                 return (0);
  563             }
  564         case G_GATE_CMD_DONE:
  565             {
  566                 struct g_gate_ctl_io *ggio = (void *)addr;
  567 
  568                 G_GATE_CHECK_VERSION(ggio);
  569                 sc = g_gate_hold(ggio->gctl_unit);
  570                 if (sc == NULL)
  571                         return (ENOENT);
  572                 mtx_lock(&sc->sc_outqueue_mtx);
  573                 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
  574                         if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
  575                                 break;
  576                 }
  577                 if (bp != NULL) {
  578                         bioq_remove(&sc->sc_outqueue, bp);
  579                         atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  580                 }
  581                 mtx_unlock(&sc->sc_outqueue_mtx);
  582                 if (bp == NULL) {
  583                         /*
  584                          * Request was probably canceled.
  585                          */
  586                         g_gate_release(sc);
  587                         return (0);
  588                 }
  589                 if (ggio->gctl_error == EAGAIN) {
  590                         bp->bio_error = 0;
  591                         G_GATE_LOGREQ(1, bp, "Request desisted.");
  592                         atomic_add_acq_32(&sc->sc_queue_count, 1);
  593                         mtx_lock(&sc->sc_inqueue_mtx);
  594                         bioq_disksort(&sc->sc_inqueue, bp);
  595                         wakeup(sc);
  596                         mtx_unlock(&sc->sc_inqueue_mtx);
  597                 } else {
  598                         bp->bio_error = ggio->gctl_error;
  599                         if (bp->bio_error == 0) {
  600                                 bp->bio_completed = bp->bio_length;
  601                                 switch (bp->bio_cmd) {
  602                                 case BIO_READ:
  603                                         error = copyin(ggio->gctl_data,
  604                                             bp->bio_data, bp->bio_length);
  605                                         if (error != 0)
  606                                                 bp->bio_error = error;
  607                                         break;
  608                                 case BIO_DELETE:
  609                                 case BIO_WRITE:
  610                                         break;
  611                                 }
  612                         }
  613                         G_GATE_LOGREQ(2, bp, "Request done.");
  614                         g_io_deliver(bp, bp->bio_error);
  615                 }
  616                 g_gate_release(sc);
  617                 return (error);
  618             }
  619         }
  620         return (ENOIOCTL);
  621 }
  622 
  623 static void
  624 g_gate_device(void)
  625 {
  626 
  627         status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
  628             G_GATE_CTL_NAME);
  629 }
  630 
  631 static int
  632 g_gate_modevent(module_t mod, int type, void *data)
  633 {
  634         int error = 0;
  635 
  636         switch (type) {
  637         case MOD_LOAD:
  638                 mtx_init(&g_gate_list_mtx, "gg_list_lock", NULL, MTX_DEF);
  639                 g_gate_device();
  640                 break;
  641         case MOD_UNLOAD:
  642                 mtx_lock(&g_gate_list_mtx);
  643                 if (!LIST_EMPTY(&g_gate_list)) {
  644                         mtx_unlock(&g_gate_list_mtx);
  645                         error = EBUSY;
  646                         break;
  647                 }
  648                 mtx_unlock(&g_gate_list_mtx);
  649                 mtx_destroy(&g_gate_list_mtx);
  650                 if (status_dev != 0)
  651                         destroy_dev(status_dev);
  652                 break;
  653         default:
  654                 return (EOPNOTSUPP);
  655                 break;
  656         }
  657 
  658         return (error);
  659 }
  660 static moduledata_t g_gate_module = {
  661         G_GATE_MOD_NAME,
  662         g_gate_modevent,
  663         NULL
  664 };
  665 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
  666 DECLARE_GEOM_CLASS(g_gate_class, g_gate);

Cache object: d060fd211077fc06941b399bfb290159


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.