The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/geom/gate/g_gate.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2004-2005 Pawel Jakub Dawidek <pjd@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD: releng/6.0/sys/geom/gate/g_gate.c 147887 2005-07-10 21:10:20Z pjd $
   27  */
   28 
   29 #include <sys/param.h>
   30 #include <sys/systm.h>
   31 #include <sys/bio.h>
   32 #include <sys/conf.h>
   33 #include <sys/kernel.h>
   34 #include <sys/kthread.h>
   35 #include <sys/fcntl.h>
   36 #include <sys/linker.h>
   37 #include <sys/lock.h>
   38 #include <sys/malloc.h>
   39 #include <sys/mutex.h>
   40 #include <sys/proc.h>
   41 #include <sys/limits.h>
   42 #include <sys/queue.h>
   43 #include <sys/sysctl.h>
   44 #include <sys/signalvar.h>
   45 #include <sys/time.h>
   46 #include <machine/atomic.h>
   47 
   48 #include <geom/geom.h>
   49 #include <geom/gate/g_gate.h>
   50 
   51 static MALLOC_DEFINE(M_GATE, "gg data", "GEOM Gate Data");
   52 
   53 SYSCTL_DECL(_kern_geom);
   54 SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff");
   55 static u_int g_gate_debug = 0;
   56 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
   57     "Debug level");
   58 
   59 static int g_gate_destroy_geom(struct gctl_req *, struct g_class *,
   60     struct g_geom *);
   61 struct g_class g_gate_class = {
   62         .name = G_GATE_CLASS_NAME,
   63         .version = G_VERSION,
   64         .destroy_geom = g_gate_destroy_geom
   65 };
   66 
   67 static struct cdev *status_dev;
   68 static d_ioctl_t g_gate_ioctl;
   69 static struct cdevsw g_gate_cdevsw = {
   70         .d_version =    D_VERSION,
   71         .d_ioctl =      g_gate_ioctl,
   72         .d_name =       G_GATE_CTL_NAME
   73 };
   74 
   75 
   76 static LIST_HEAD(, g_gate_softc) g_gate_list =
   77     LIST_HEAD_INITIALIZER(&g_gate_list);
   78 static struct mtx g_gate_list_mtx;
   79 
   80 
   81 static void
   82 g_gate_wither(struct g_gate_softc *sc)
   83 {
   84 
   85         atomic_set_32(&sc->sc_flags, G_GATE_FLAG_DESTROY);
   86 }
   87 
   88 static int
   89 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
   90 {
   91         struct g_provider *pp;
   92         struct bio *bp;
   93 
   94         g_topology_assert();
   95         mtx_assert(&g_gate_list_mtx, MA_OWNED);
   96         pp = sc->sc_provider;
   97         if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
   98                 mtx_unlock(&g_gate_list_mtx);
   99                 return (EBUSY);
  100         }
  101         if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
  102                 g_gate_wither(sc);
  103                 LIST_REMOVE(sc, sc_next);
  104         }
  105         mtx_unlock(&g_gate_list_mtx);
  106         mtx_lock(&sc->sc_queue_mtx);
  107         wakeup(sc);
  108         mtx_unlock(&sc->sc_queue_mtx);
  109         if (sc->sc_ref > 0) {
  110                 G_GATE_DEBUG(1, "Cannot destroy %s yet.", sc->sc_name);
  111                 return (0);
  112         }
  113         callout_drain(&sc->sc_callout);
  114         mtx_lock(&sc->sc_queue_mtx);
  115         for (;;) {
  116                 bp = bioq_first(&sc->sc_inqueue);
  117                 if (bp != NULL) {
  118                         bioq_remove(&sc->sc_inqueue, bp);
  119                         sc->sc_queue_count--;
  120                         G_GATE_LOGREQ(1, bp, "Request canceled.");
  121                         g_io_deliver(bp, ENXIO);
  122                 } else {
  123                         break;
  124                 }
  125         }
  126         for (;;) {
  127                 bp = bioq_first(&sc->sc_outqueue);
  128                 if (bp != NULL) {
  129                         bioq_remove(&sc->sc_outqueue, bp);
  130                         sc->sc_queue_count--;
  131                         G_GATE_LOGREQ(1, bp, "Request canceled.");
  132                         g_io_deliver(bp, ENXIO);
  133                 } else {
  134                         break;
  135                 }
  136         }
  137         mtx_destroy(&sc->sc_queue_mtx);
  138         G_GATE_DEBUG(0, "Device %s destroyed.", sc->sc_name);
  139         pp->geom->softc = NULL;
  140         g_wither_geom(pp->geom, ENXIO);
  141         sc->sc_provider = NULL;
  142         free(sc, M_GATE);
  143         return (0);
  144 }
  145 
  146 static void
  147 g_gate_destroy_it(void *arg, int flag __unused)
  148 {
  149         struct g_gate_softc *sc;
  150 
  151         g_topology_assert();
  152         sc = arg;
  153         mtx_lock(&g_gate_list_mtx);
  154         g_gate_destroy(sc, 1);
  155 }
  156 
  157 static int
  158 g_gate_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
  159 {
  160 
  161         g_topology_assert();
  162         mtx_lock(&g_gate_list_mtx);
  163         return (g_gate_destroy(gp->softc, 0));
  164 }
  165 
  166 static int
  167 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
  168 {
  169         struct g_gate_softc *sc;
  170 
  171         if (dr <= 0 && dw <= 0 && de <= 0)
  172                 return (0);
  173         sc = pp->geom->softc;
  174         if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
  175                 return (ENXIO);
  176         /* XXX: Hack to allow read-only mounts. */
  177 #if 0
  178         if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
  179                 return (EPERM);
  180 #endif
  181         if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
  182                 return (EPERM);
  183         return (0);
  184 }
  185 
  186 static void
  187 g_gate_start(struct bio *bp)
  188 {
  189         struct g_gate_softc *sc;
  190 
  191         sc = bp->bio_to->geom->softc;
  192         if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
  193                 g_io_deliver(bp, ENXIO);
  194                 return;
  195         }
  196         G_GATE_LOGREQ(2, bp, "Request received.");
  197         switch (bp->bio_cmd) {
  198         case BIO_READ:
  199                 break;
  200         case BIO_DELETE:
  201         case BIO_WRITE:
  202                 /* XXX: Hack to allow read-only mounts. */
  203                 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
  204                         g_io_deliver(bp, EPERM);
  205                         return;
  206                 }
  207                 break;
  208         case BIO_GETATTR:
  209         default:
  210                 G_GATE_LOGREQ(2, bp, "Ignoring request.");
  211                 g_io_deliver(bp, EOPNOTSUPP);
  212                 return;
  213         }
  214 
  215         mtx_lock(&sc->sc_queue_mtx);
  216         if (sc->sc_queue_count > sc->sc_queue_size) {
  217                 mtx_unlock(&sc->sc_queue_mtx);
  218                 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
  219                 g_io_deliver(bp, EIO);
  220                 return;
  221         }
  222 
  223         bp->bio_driver1 = (void *)sc->sc_seq;
  224         sc->sc_seq++;
  225         sc->sc_queue_count++;
  226 
  227         bioq_insert_tail(&sc->sc_inqueue, bp);
  228         wakeup(sc);
  229 
  230         mtx_unlock(&sc->sc_queue_mtx);
  231 }
  232 
  233 static struct g_gate_softc *
  234 g_gate_find(u_int unit)
  235 {
  236         struct g_gate_softc *sc;
  237 
  238         LIST_FOREACH(sc, &g_gate_list, sc_next) {
  239                 if (sc->sc_unit == unit)
  240                         break;
  241         }
  242         return (sc);
  243 }
  244 
  245 static struct g_gate_softc *
  246 g_gate_hold(u_int unit)
  247 {
  248         struct g_gate_softc *sc;
  249 
  250         mtx_lock(&g_gate_list_mtx);
  251         sc = g_gate_find(unit);
  252         if (sc != NULL) {
  253                 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
  254                         sc = NULL;
  255                 else
  256                         sc->sc_ref++;
  257         }
  258         mtx_unlock(&g_gate_list_mtx);
  259         return (sc);
  260 }
  261 
  262 static void
  263 g_gate_release(struct g_gate_softc *sc)
  264 {
  265 
  266         g_topology_assert_not();
  267         mtx_lock(&g_gate_list_mtx);
  268         sc->sc_ref--;
  269         KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
  270         if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
  271                 mtx_unlock(&g_gate_list_mtx);
  272                 g_waitfor_event(g_gate_destroy_it, sc, M_WAITOK, NULL);
  273         } else {
  274                 mtx_unlock(&g_gate_list_mtx);
  275         }
  276 }
  277 
  278 static int
  279 g_gate_getunit(int unit)
  280 {
  281         struct g_gate_softc *sc;
  282 
  283         mtx_assert(&g_gate_list_mtx, MA_OWNED);
  284         if (unit >= 0) {
  285                 LIST_FOREACH(sc, &g_gate_list, sc_next) {
  286                         if (sc->sc_unit == unit)
  287                                 return (-1);
  288                 }
  289         } else {
  290                 unit = 0;
  291 once_again:
  292                 LIST_FOREACH(sc, &g_gate_list, sc_next) {
  293                         if (sc->sc_unit == unit) {
  294                                 if (++unit > 666)
  295                                         return (-1);
  296                                 goto once_again;
  297                         }
  298                 }
  299         }
  300         return (unit);
  301 }
  302 
  303 static void
  304 g_gate_guard(void *arg)
  305 {
  306         struct g_gate_softc *sc;
  307         struct bintime curtime;
  308         struct bio *bp, *bp2;
  309 
  310         sc = arg;
  311         binuptime(&curtime);
  312         g_gate_hold(sc->sc_unit);
  313         mtx_lock(&sc->sc_queue_mtx);
  314         TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
  315                 if (curtime.sec - bp->bio_t0.sec < 5)
  316                         continue;
  317                 bioq_remove(&sc->sc_inqueue, bp);
  318                 sc->sc_queue_count--;
  319                 G_GATE_LOGREQ(1, bp, "Request timeout.");
  320                 g_io_deliver(bp, EIO);
  321         }
  322         TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
  323                 if (curtime.sec - bp->bio_t0.sec < 5)
  324                         continue;
  325                 bioq_remove(&sc->sc_outqueue, bp);
  326                 sc->sc_queue_count--;
  327                 G_GATE_LOGREQ(1, bp, "Request timeout.");
  328                 g_io_deliver(bp, EIO);
  329         }
  330         mtx_unlock(&sc->sc_queue_mtx);
  331         if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
  332                 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
  333                     g_gate_guard, sc);
  334         }
  335         g_gate_release(sc);
  336 }
  337 
  338 static void
  339 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
  340     struct g_consumer *cp, struct g_provider *pp)
  341 {
  342         struct g_gate_softc *sc;
  343 
  344         sc = gp->softc;
  345         if (sc == NULL || pp != NULL || cp != NULL)
  346                 return;
  347         g_gate_hold(sc->sc_unit);
  348         if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
  349                 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
  350         } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
  351                 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
  352                     "write-only");
  353         } else {
  354                 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
  355                     "read-write");
  356         }
  357         sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
  358         sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
  359         sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
  360             sc->sc_queue_count);
  361         sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
  362             sc->sc_queue_size);
  363         sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
  364         g_topology_unlock();
  365         g_gate_release(sc);
  366         g_topology_lock();
  367 }
  368 
  369 static int
  370 g_gate_create(struct g_gate_ctl_create *ggio)
  371 {
  372         struct g_gate_softc *sc;
  373         struct g_geom *gp;
  374         struct g_provider *pp;
  375 
  376         if (ggio->gctl_mediasize == 0) {
  377                 G_GATE_DEBUG(1, "Invalid media size.");
  378                 return (EINVAL);
  379         }
  380         if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) {
  381                 G_GATE_DEBUG(1, "Invalid sector size.");
  382                 return (EINVAL);
  383         }
  384         if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
  385                 G_GATE_DEBUG(1, "Invalid media size.");
  386                 return (EINVAL);
  387         }
  388         if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
  389             (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
  390                 G_GATE_DEBUG(1, "Invalid flags.");
  391                 return (EINVAL);
  392         }
  393         if (ggio->gctl_unit < -1) {
  394                 G_GATE_DEBUG(1, "Invalid unit number.");
  395                 return (EINVAL);
  396         }
  397 
  398         sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
  399         sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
  400         strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
  401         sc->sc_seq = 0;
  402         bioq_init(&sc->sc_inqueue);
  403         bioq_init(&sc->sc_outqueue);
  404         mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
  405         sc->sc_queue_count = 0;
  406         sc->sc_queue_size = ggio->gctl_maxcount;
  407         if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
  408                 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
  409         sc->sc_timeout = ggio->gctl_timeout;
  410         callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
  411         mtx_lock(&g_gate_list_mtx);
  412         ggio->gctl_unit = g_gate_getunit(ggio->gctl_unit);
  413         if (ggio->gctl_unit == -1) {
  414                 mtx_unlock(&g_gate_list_mtx);
  415                 mtx_destroy(&sc->sc_queue_mtx);
  416                 free(sc, M_GATE);
  417                 return (EBUSY);
  418         }
  419         sc->sc_unit = ggio->gctl_unit;
  420         LIST_INSERT_HEAD(&g_gate_list, sc, sc_next);
  421         mtx_unlock(&g_gate_list_mtx);
  422 
  423         g_topology_lock();
  424         gp = g_new_geomf(&g_gate_class, "%s%d", G_GATE_PROVIDER_NAME,
  425             sc->sc_unit);
  426         gp->start = g_gate_start;
  427         gp->access = g_gate_access;
  428         gp->dumpconf = g_gate_dumpconf;
  429         gp->softc = sc;
  430         pp = g_new_providerf(gp, "%s%d", G_GATE_PROVIDER_NAME, sc->sc_unit);
  431         pp->mediasize = ggio->gctl_mediasize;
  432         pp->sectorsize = ggio->gctl_sectorsize;
  433         sc->sc_provider = pp;
  434         g_error_provider(pp, 0);
  435         g_topology_unlock();
  436 
  437         if (sc->sc_timeout > 0) {
  438                 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
  439                     g_gate_guard, sc);
  440         }
  441         return (0);
  442 }
  443 
  444 #define G_GATE_CHECK_VERSION(ggio)      do {                            \
  445         if ((ggio)->gctl_version != G_GATE_VERSION) {                   \
  446                 printf("Version mismatch %d != %d.\n",                  \
  447                     ggio->gctl_version, G_GATE_VERSION);                \
  448                 return (EINVAL);                                        \
  449         }                                                               \
  450 } while (0)
  451 static int
  452 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
  453 {
  454         struct g_gate_softc *sc;
  455         struct bio *bp;
  456         int error = 0;
  457 
  458         G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
  459             flags, td);
  460 
  461         switch (cmd) {
  462         case G_GATE_CMD_CREATE:
  463             {
  464                 struct g_gate_ctl_create *ggio = (void *)addr;
  465 
  466                 G_GATE_CHECK_VERSION(ggio);
  467                 error = g_gate_create(ggio);
  468                 /*
  469                  * Reset TDP_GEOM flag.
  470                  * There are pending events for sure, because we just created
  471                  * new provider and other classes want to taste it, but we
  472                  * cannot answer on I/O requests until we're here.
  473                  */
  474                 td->td_pflags &= ~TDP_GEOM;
  475                 return (error);
  476             }
  477         case G_GATE_CMD_DESTROY:
  478             {
  479                 struct g_gate_ctl_destroy *ggio = (void *)addr;
  480 
  481                 G_GATE_CHECK_VERSION(ggio);
  482                 sc = g_gate_hold(ggio->gctl_unit);
  483                 if (sc == NULL)
  484                         return (ENXIO);
  485                 g_topology_lock();
  486                 mtx_lock(&g_gate_list_mtx);
  487                 error = g_gate_destroy(sc, ggio->gctl_force);
  488                 if (error == 0)
  489                         g_gate_wither(sc);
  490                 g_topology_unlock();
  491                 g_gate_release(sc);
  492                 return (error);
  493             }
  494         case G_GATE_CMD_CANCEL:
  495             {
  496                 struct g_gate_ctl_cancel *ggio = (void *)addr;
  497                 struct bio *tbp, *lbp;
  498 
  499                 G_GATE_CHECK_VERSION(ggio);
  500                 sc = g_gate_hold(ggio->gctl_unit);
  501                 if (sc == NULL)
  502                         return (ENXIO);
  503                 lbp = NULL;
  504                 mtx_lock(&sc->sc_queue_mtx);
  505                 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
  506                         if (ggio->gctl_seq == 0 ||
  507                             ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
  508                                 G_GATE_LOGREQ(1, bp, "Request canceled.");
  509                                 bioq_remove(&sc->sc_outqueue, bp);
  510                                 /*
  511                                  * Be sure to put requests back onto incoming
  512                                  * queue in the proper order.
  513                                  */
  514                                 if (lbp == NULL)
  515                                         bioq_insert_head(&sc->sc_inqueue, bp);
  516                                 else {
  517                                         TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
  518                                             lbp, bp, bio_queue);
  519                                 }
  520                                 lbp = bp;
  521                                 /*
  522                                  * If only one request was canceled, leave now.
  523                                  */
  524                                 if (ggio->gctl_seq != 0)
  525                                         break;
  526                         }
  527                 }
  528                 mtx_unlock(&sc->sc_queue_mtx);
  529                 g_gate_release(sc);
  530                 return (error);
  531             }
  532         case G_GATE_CMD_START:
  533             {
  534                 struct g_gate_ctl_io *ggio = (void *)addr;
  535 
  536                 G_GATE_CHECK_VERSION(ggio);
  537                 sc = g_gate_find(ggio->gctl_unit);
  538                 if (sc == NULL)
  539                         return (ENXIO);
  540                 for (;;) {
  541                         mtx_lock(&sc->sc_queue_mtx);
  542                         bp = bioq_first(&sc->sc_inqueue);
  543                         if (bp != NULL)
  544                                 break;
  545                         if (msleep(sc, &sc->sc_queue_mtx,
  546                             PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
  547                                 ggio->gctl_error = ECANCELED;
  548                                 return (0);
  549                         }
  550                         if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
  551                                 ggio->gctl_error = ECANCELED;
  552                                 return (0);
  553                         }
  554                 }
  555                 ggio->gctl_cmd = bp->bio_cmd;
  556                 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
  557                     bp->bio_length > ggio->gctl_length) {
  558                         mtx_unlock(&sc->sc_queue_mtx);
  559                         ggio->gctl_length = bp->bio_length;
  560                         ggio->gctl_error = ENOMEM;
  561                         return (0);
  562                 }
  563                 bioq_remove(&sc->sc_inqueue, bp);
  564                 bioq_insert_tail(&sc->sc_outqueue, bp);
  565                 mtx_unlock(&sc->sc_queue_mtx);
  566 
  567                 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
  568                 ggio->gctl_offset = bp->bio_offset;
  569                 ggio->gctl_length = bp->bio_length;
  570 
  571                 switch (bp->bio_cmd) {
  572                 case BIO_READ:
  573                         break;
  574                 case BIO_DELETE:
  575                 case BIO_WRITE:
  576                         error = copyout(bp->bio_data, ggio->gctl_data,
  577                             bp->bio_length);
  578                         if (error != 0) {
  579                                 mtx_lock(&sc->sc_queue_mtx);
  580                                 bioq_remove(&sc->sc_outqueue, bp);
  581                                 bioq_insert_head(&sc->sc_inqueue, bp);
  582                                 mtx_unlock(&sc->sc_queue_mtx);
  583                                 return (error);
  584                         }
  585                         break;
  586                 }
  587                 return (0);
  588             }
  589         case G_GATE_CMD_DONE:
  590             {
  591                 struct g_gate_ctl_io *ggio = (void *)addr;
  592 
  593                 G_GATE_CHECK_VERSION(ggio);
  594                 sc = g_gate_find(ggio->gctl_unit);
  595                 if (sc == NULL)
  596                         return (ENOENT);
  597                 mtx_lock(&sc->sc_queue_mtx);
  598                 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
  599                         if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
  600                                 break;
  601                 }
  602                 if (bp != NULL) {
  603                         bioq_remove(&sc->sc_outqueue, bp);
  604                         sc->sc_queue_count--;
  605                 }
  606                 mtx_unlock(&sc->sc_queue_mtx);
  607                 if (bp == NULL) {
  608                         /*
  609                          * Request was probably canceled.
  610                          */
  611                         return (0);
  612                 }
  613                 if (ggio->gctl_error == EAGAIN) {
  614                         bp->bio_error = 0;
  615                         G_GATE_LOGREQ(1, bp, "Request desisted.");
  616                         mtx_lock(&sc->sc_queue_mtx);
  617                         sc->sc_queue_count++;
  618                         bioq_insert_head(&sc->sc_inqueue, bp);
  619                         wakeup(sc);
  620                         mtx_unlock(&sc->sc_queue_mtx);
  621                 } else {
  622                         bp->bio_error = ggio->gctl_error;
  623                         if (bp->bio_error == 0) {
  624                                 bp->bio_completed = bp->bio_length;
  625                                 switch (bp->bio_cmd) {
  626                                 case BIO_READ:
  627                                         error = copyin(ggio->gctl_data,
  628                                             bp->bio_data, bp->bio_length);
  629                                         if (error != 0)
  630                                                 bp->bio_error = error;
  631                                         break;
  632                                 case BIO_DELETE:
  633                                 case BIO_WRITE:
  634                                         break;
  635                                 }
  636                         }
  637                         G_GATE_LOGREQ(2, bp, "Request done.");
  638                         g_io_deliver(bp, bp->bio_error);
  639                 }
  640                 return (error);
  641             }
  642         }
  643         return (ENOIOCTL);
  644 }
  645 
  646 static void
  647 g_gate_device(void)
  648 {
  649 
  650         status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
  651             G_GATE_CTL_NAME);
  652 }
  653 
  654 static int
  655 g_gate_modevent(module_t mod, int type, void *data)
  656 {
  657         int error = 0;
  658 
  659         switch (type) {
  660         case MOD_LOAD:
  661                 mtx_init(&g_gate_list_mtx, "gg_list_lock", NULL, MTX_DEF);
  662                 g_gate_device();
  663                 break;
  664         case MOD_UNLOAD:
  665                 mtx_lock(&g_gate_list_mtx);
  666                 if (!LIST_EMPTY(&g_gate_list)) {
  667                         mtx_unlock(&g_gate_list_mtx);
  668                         error = EBUSY;
  669                         break;
  670                 }
  671                 mtx_unlock(&g_gate_list_mtx);
  672                 mtx_destroy(&g_gate_list_mtx);
  673                 if (status_dev != 0)
  674                         destroy_dev(status_dev);
  675                 break;
  676         default:
  677                 return (EOPNOTSUPP);
  678                 break;
  679         }
  680 
  681         return (error);
  682 }
  683 static moduledata_t g_gate_module = {
  684         G_GATE_MOD_NAME,
  685         g_gate_modevent,
  686         NULL
  687 };
  688 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
  689 DECLARE_GEOM_CLASS(g_gate_class, g_gate);

Cache object: 0c23aaa591be70481d8f86d19b4f3bee


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.