The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/geom/gate/g_gate.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2004 Pawel Jakub Dawidek <pjd@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD: releng/5.3/sys/geom/gate/g_gate.c 133318 2004-08-08 07:57:53Z phk $
   27  */
   28 
   29 #include <sys/param.h>
   30 #include <sys/systm.h>
   31 #include <sys/bio.h>
   32 #include <sys/conf.h>
   33 #include <sys/kernel.h>
   34 #include <sys/kthread.h>
   35 #include <sys/fcntl.h>
   36 #include <sys/linker.h>
   37 #include <sys/lock.h>
   38 #include <sys/malloc.h>
   39 #include <sys/mutex.h>
   40 #include <sys/proc.h>
   41 #include <sys/limits.h>
   42 #include <sys/queue.h>
   43 #include <sys/sysctl.h>
   44 #include <sys/signalvar.h>
   45 #include <sys/time.h>
   46 #include <machine/atomic.h>
   47 
   48 #include <geom/geom.h>
   49 #include <geom/gate/g_gate.h>
   50 
   51 static MALLOC_DEFINE(M_GATE, "gg data", "GEOM Gate Data");
   52 
   53 SYSCTL_DECL(_kern_geom);
   54 SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff");
   55 static u_int g_gate_debug = 0;
   56 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
   57     "Debug level");
   58 
   59 static int g_gate_destroy_geom(struct gctl_req *, struct g_class *,
   60     struct g_geom *);
   61 struct g_class g_gate_class = {
   62         .name = G_GATE_CLASS_NAME,
   63         .version = G_VERSION,
   64         .destroy_geom = g_gate_destroy_geom
   65 };
   66 
   67 static struct cdev *status_dev;
   68 static d_ioctl_t g_gate_ioctl;
   69 static struct cdevsw g_gate_cdevsw = {
   70         .d_version =    D_VERSION,
   71         .d_ioctl =      g_gate_ioctl,
   72         .d_name =       G_GATE_CTL_NAME
   73 };
   74 
   75 
   76 static LIST_HEAD(, g_gate_softc) g_gate_list =
   77     LIST_HEAD_INITIALIZER(&g_gate_list);
   78 static struct mtx g_gate_list_mtx;
   79 
   80 
   81 static void
   82 g_gate_wither(struct g_gate_softc *sc)
   83 {
   84 
   85         atomic_set_32(&sc->sc_flags, G_GATE_FLAG_DESTROY);
   86 }
   87 
   88 static int
   89 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
   90 {
   91         struct g_provider *pp;
   92         struct bio *bp;
   93 
   94         g_topology_assert();
   95         mtx_assert(&g_gate_list_mtx, MA_OWNED);
   96         pp = sc->sc_provider;
   97         if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
   98                 mtx_unlock(&g_gate_list_mtx);
   99                 return (EBUSY);
  100         }
  101         if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
  102                 g_gate_wither(sc);
  103                 LIST_REMOVE(sc, sc_next);
  104         }
  105         mtx_unlock(&g_gate_list_mtx);
  106         mtx_lock(&sc->sc_inqueue_mtx);
  107         wakeup(sc);
  108         mtx_unlock(&sc->sc_inqueue_mtx);
  109         if (sc->sc_ref > 0) {
  110                 G_GATE_DEBUG(1, "Cannot destroy %s yet.", sc->sc_name);
  111                 return (0);
  112         }
  113         callout_drain(&sc->sc_callout);
  114         mtx_lock(&sc->sc_inqueue_mtx);
  115         for (;;) {
  116                 bp = bioq_first(&sc->sc_inqueue);
  117                 if (bp != NULL) {
  118                         bioq_remove(&sc->sc_inqueue, bp);
  119                         atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  120                         G_GATE_LOGREQ(1, bp, "Request canceled.");
  121                         g_io_deliver(bp, ENXIO);
  122                 } else {
  123                         break;
  124                 }
  125         }
  126         mtx_destroy(&sc->sc_inqueue_mtx);
  127         mtx_lock(&sc->sc_outqueue_mtx);
  128         for (;;) {
  129                 bp = bioq_first(&sc->sc_outqueue);
  130                 if (bp != NULL) {
  131                         bioq_remove(&sc->sc_outqueue, bp);
  132                         atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  133                         G_GATE_LOGREQ(1, bp, "Request canceled.");
  134                         g_io_deliver(bp, ENXIO);
  135                 } else {
  136                         break;
  137                 }
  138         }
  139         mtx_destroy(&sc->sc_outqueue_mtx);
  140         G_GATE_DEBUG(0, "Device %s destroyed.", sc->sc_name);
  141         pp->geom->softc = NULL;
  142         g_wither_geom(pp->geom, ENXIO);
  143         sc->sc_provider = NULL;
  144         free(sc, M_GATE);
  145         return (0);
  146 }
  147 
  148 static void
  149 g_gate_destroy_it(void *arg, int flag __unused)
  150 {
  151         struct g_gate_softc *sc;
  152 
  153         g_topology_assert();
  154         sc = arg;
  155         mtx_lock(&g_gate_list_mtx);
  156         g_gate_destroy(sc, 1);
  157 }
  158 
  159 static int
  160 g_gate_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
  161 {
  162 
  163         g_topology_assert();
  164         mtx_lock(&g_gate_list_mtx);
  165         return (g_gate_destroy(gp->softc, 0));
  166 }
  167 
  168 static int
  169 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
  170 {
  171         struct g_gate_softc *sc;
  172 
  173         if (dr <= 0 && dw <= 0 && de <= 0)
  174                 return (0);
  175         sc = pp->geom->softc;
  176         if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
  177                 return (ENXIO);
  178         /* XXX: Hack to allow read-only mounts. */
  179 #if 0
  180         if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
  181                 return (EPERM);
  182 #endif
  183         if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
  184                 return (EPERM);
  185         return (0);
  186 }
  187 
  188 static void
  189 g_gate_start(struct bio *bp)
  190 {
  191         struct g_gate_softc *sc;
  192         uint32_t qcount;
  193 
  194         sc = bp->bio_to->geom->softc;
  195         if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
  196                 g_io_deliver(bp, ENXIO);
  197                 return;
  198         }
  199         G_GATE_LOGREQ(2, bp, "Request received.");
  200         switch (bp->bio_cmd) {
  201         case BIO_READ:
  202                 break;
  203         case BIO_DELETE:
  204         case BIO_WRITE:
  205                 /* XXX: Hack to allow read-only mounts. */
  206                 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
  207                         g_io_deliver(bp, EPERM);
  208                         return;
  209                 }
  210                 break;
  211         case BIO_GETATTR:
  212         default:
  213                 G_GATE_LOGREQ(2, bp, "Ignoring request.");
  214                 g_io_deliver(bp, EOPNOTSUPP);
  215                 return;
  216         }
  217 
  218         atomic_store_rel_32(&qcount, sc->sc_queue_count);
  219         if (qcount > sc->sc_queue_size) {
  220                 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
  221                 g_io_deliver(bp, EIO);
  222                 return;
  223         }
  224         atomic_add_acq_32(&sc->sc_queue_count, 1);
  225         bp->bio_driver1 = (void *)sc->sc_seq;
  226         sc->sc_seq++;
  227 
  228         mtx_lock(&sc->sc_inqueue_mtx);
  229         bioq_disksort(&sc->sc_inqueue, bp);
  230         wakeup(sc);
  231         mtx_unlock(&sc->sc_inqueue_mtx);
  232 }
  233 
  234 static struct g_gate_softc *
  235 g_gate_find(u_int unit)
  236 {
  237         struct g_gate_softc *sc;
  238 
  239         mtx_assert(&g_gate_list_mtx, MA_OWNED);
  240         LIST_FOREACH(sc, &g_gate_list, sc_next) {
  241                 if (sc->sc_unit == unit)
  242                         break;
  243         }
  244         return (sc);
  245 }
  246 
  247 static struct g_gate_softc *
  248 g_gate_hold(u_int unit)
  249 {
  250         struct g_gate_softc *sc;
  251 
  252         mtx_lock(&g_gate_list_mtx);
  253         sc = g_gate_find(unit);
  254         if (sc != NULL) {
  255                 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
  256                         sc = NULL;
  257                 else
  258                         sc->sc_ref++;
  259         }
  260         mtx_unlock(&g_gate_list_mtx);
  261         return (sc);
  262 }
  263 
  264 static void
  265 g_gate_release(struct g_gate_softc *sc)
  266 {
  267 
  268         g_topology_assert_not();
  269         mtx_lock(&g_gate_list_mtx);
  270         sc->sc_ref--;
  271         KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
  272         if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
  273                 mtx_unlock(&g_gate_list_mtx);
  274                 g_waitfor_event(g_gate_destroy_it, sc, M_WAITOK, NULL);
  275         } else {
  276                 mtx_unlock(&g_gate_list_mtx);
  277         }
  278 }
  279 
  280 static int
  281 g_gate_getunit(int unit)
  282 {
  283         struct g_gate_softc *sc;
  284 
  285         mtx_assert(&g_gate_list_mtx, MA_OWNED);
  286         if (unit >= 0) {
  287                 LIST_FOREACH(sc, &g_gate_list, sc_next) {
  288                         if (sc->sc_unit == unit)
  289                                 return (-1);
  290                 }
  291         } else {
  292                 unit = 0;
  293 once_again:
  294                 LIST_FOREACH(sc, &g_gate_list, sc_next) {
  295                         if (sc->sc_unit == unit) {
  296                                 if (++unit > 666)
  297                                         return (-1);
  298                                 goto once_again;
  299                         }
  300                 }
  301         }
  302         return (unit);
  303 }
  304 
  305 static void
  306 g_gate_guard(void *arg)
  307 {
  308         struct g_gate_softc *sc;
  309         struct bintime curtime;
  310         struct bio *bp, *bp2;
  311 
  312         sc = arg;
  313         binuptime(&curtime);
  314         g_gate_hold(sc->sc_unit);
  315         mtx_lock(&sc->sc_inqueue_mtx);
  316         TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
  317                 if (curtime.sec - bp->bio_t0.sec < 5)
  318                         continue;
  319                 bioq_remove(&sc->sc_inqueue, bp);
  320                 atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  321                 G_GATE_LOGREQ(1, bp, "Request timeout.");
  322                 g_io_deliver(bp, EIO);
  323         }
  324         mtx_unlock(&sc->sc_inqueue_mtx);
  325         mtx_lock(&sc->sc_outqueue_mtx);
  326         TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
  327                 if (curtime.sec - bp->bio_t0.sec < 5)
  328                         continue;
  329                 bioq_remove(&sc->sc_outqueue, bp);
  330                 atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  331                 G_GATE_LOGREQ(1, bp, "Request timeout.");
  332                 g_io_deliver(bp, EIO);
  333         }
  334         mtx_unlock(&sc->sc_outqueue_mtx);
  335         if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
  336                 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
  337                     g_gate_guard, sc);
  338         }
  339         g_gate_release(sc);
  340 }
  341 
  342 static void
  343 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
  344     struct g_consumer *cp, struct g_provider *pp)
  345 {
  346         struct g_gate_softc *sc;
  347 
  348         sc = gp->softc;
  349         if (sc == NULL || pp != NULL || cp != NULL)
  350                 return;
  351         g_gate_hold(sc->sc_unit);
  352         if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
  353                 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
  354         } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
  355                 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
  356                     "write-only");
  357         } else {
  358                 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
  359                     "read-write");
  360         }
  361         sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
  362         sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
  363         sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
  364             sc->sc_queue_count);
  365         sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
  366             sc->sc_queue_size);
  367         sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
  368         g_topology_unlock();
  369         g_gate_release(sc);
  370         g_topology_lock();
  371 }
  372 
  373 static int
  374 g_gate_create(struct g_gate_ctl_create *ggio)
  375 {
  376         struct g_gate_softc *sc;
  377         struct g_geom *gp;
  378         struct g_provider *pp;
  379 
  380         if (ggio->gctl_mediasize == 0) {
  381                 G_GATE_DEBUG(1, "Invalid media size.");
  382                 return (EINVAL);
  383         }
  384         if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) {
  385                 G_GATE_DEBUG(1, "Invalid sector size.");
  386                 return (EINVAL);
  387         }
  388         if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
  389             (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
  390                 G_GATE_DEBUG(1, "Invalid flags.");
  391                 return (EINVAL);
  392         }
  393         if (ggio->gctl_unit < -1) {
  394                 G_GATE_DEBUG(1, "Invalid unit number.");
  395                 return (EINVAL);
  396         }
  397 
  398         sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
  399         sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
  400         strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
  401         sc->sc_seq = 0;
  402         bioq_init(&sc->sc_inqueue);
  403         mtx_init(&sc->sc_inqueue_mtx, "gg:inqueue", NULL, MTX_DEF);
  404         bioq_init(&sc->sc_outqueue);
  405         mtx_init(&sc->sc_outqueue_mtx, "gg:outqueue", NULL, MTX_DEF);
  406         sc->sc_queue_count = 0;
  407         sc->sc_queue_size = ggio->gctl_maxcount;
  408         if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
  409                 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
  410         sc->sc_timeout = ggio->gctl_timeout;
  411         callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
  412         mtx_lock(&g_gate_list_mtx);
  413         ggio->gctl_unit = g_gate_getunit(ggio->gctl_unit);
  414         if (ggio->gctl_unit == -1) {
  415                 mtx_destroy(&sc->sc_inqueue_mtx);
  416                 mtx_destroy(&sc->sc_outqueue_mtx);
  417                 free(sc, M_GATE);
  418                 return (EBUSY);
  419         }
  420         sc->sc_unit = ggio->gctl_unit;
  421         LIST_INSERT_HEAD(&g_gate_list, sc, sc_next);
  422         mtx_unlock(&g_gate_list_mtx);
  423 
  424         DROP_GIANT();
  425         g_topology_lock();
  426         gp = g_new_geomf(&g_gate_class, "%s%d", G_GATE_PROVIDER_NAME,
  427             sc->sc_unit);
  428         gp->start = g_gate_start;
  429         gp->access = g_gate_access;
  430         gp->dumpconf = g_gate_dumpconf;
  431         gp->softc = sc;
  432         pp = g_new_providerf(gp, "%s%d", G_GATE_PROVIDER_NAME, sc->sc_unit);
  433         pp->mediasize = ggio->gctl_mediasize;
  434         pp->sectorsize = ggio->gctl_sectorsize;
  435         sc->sc_provider = pp;
  436         g_error_provider(pp, 0);
  437         g_topology_unlock();
  438         PICKUP_GIANT();
  439 
  440         if (sc->sc_timeout > 0) {
  441                 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
  442                     g_gate_guard, sc);
  443         }
  444         return (0);
  445 }
  446 
  447 #define G_GATE_CHECK_VERSION(ggio)      do {                            \
  448         if ((ggio)->gctl_version != G_GATE_VERSION)                     \
  449                 return (EINVAL);                                        \
  450 } while (0)
  451 static int
  452 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
  453 {
  454         struct g_gate_softc *sc;
  455         struct bio *bp;
  456         int error = 0;
  457 
  458         G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
  459             flags, td);
  460 
  461         switch (cmd) {
  462         case G_GATE_CMD_CREATE:
  463             {
  464                 struct g_gate_ctl_create *ggio = (void *)addr;
  465 
  466                 G_GATE_CHECK_VERSION(ggio);
  467                 return (g_gate_create(ggio));
  468             }
  469         case G_GATE_CMD_DESTROY:
  470             {
  471                 struct g_gate_ctl_destroy *ggio = (void *)addr;
  472 
  473                 G_GATE_CHECK_VERSION(ggio);
  474                 sc = g_gate_hold(ggio->gctl_unit);
  475                 if (sc == NULL)
  476                         return (ENXIO);
  477                 g_topology_lock();
  478                 mtx_lock(&g_gate_list_mtx);
  479                 error = g_gate_destroy(sc, ggio->gctl_force);
  480                 if (error == 0)
  481                         g_gate_wither(sc);
  482                 g_topology_unlock();
  483                 g_gate_release(sc);
  484                 return (error);
  485             }
  486         case G_GATE_CMD_START:
  487             {
  488                 struct g_gate_ctl_io *ggio = (void *)addr;
  489 
  490                 G_GATE_CHECK_VERSION(ggio);
  491                 sc = g_gate_hold(ggio->gctl_unit);
  492                 if (sc == NULL)
  493                         return (ENXIO);
  494                 for (;;) {
  495                         mtx_lock(&sc->sc_inqueue_mtx);
  496                         bp = bioq_first(&sc->sc_inqueue);
  497                         if (bp != NULL)
  498                                 break;
  499                         if (msleep(sc, &sc->sc_inqueue_mtx,
  500                             PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
  501                                 g_gate_release(sc);
  502                                 ggio->gctl_error = ECANCELED;
  503                                 return (0);
  504                         }
  505                         if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
  506                                 g_gate_release(sc);
  507                                 ggio->gctl_error = ECANCELED;
  508                                 return (0);
  509                         }
  510                 }
  511                 ggio->gctl_cmd = bp->bio_cmd;
  512                 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
  513                     bp->bio_length > ggio->gctl_length) {
  514                         mtx_unlock(&sc->sc_inqueue_mtx);
  515                         g_gate_release(sc);
  516                         ggio->gctl_length = bp->bio_length;
  517                         ggio->gctl_error = ENOMEM;
  518                         return (0);
  519                 }
  520                 bioq_remove(&sc->sc_inqueue, bp);
  521                 atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  522                 mtx_unlock(&sc->sc_inqueue_mtx);
  523                 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
  524                 ggio->gctl_offset = bp->bio_offset;
  525                 ggio->gctl_length = bp->bio_length;
  526                 switch (bp->bio_cmd) {
  527                 case BIO_READ:
  528                         break;
  529                 case BIO_DELETE:
  530                 case BIO_WRITE:
  531                         error = copyout(bp->bio_data, ggio->gctl_data,
  532                             bp->bio_length);
  533                         if (error != 0) {
  534                                 mtx_lock(&sc->sc_inqueue_mtx);
  535                                 bioq_disksort(&sc->sc_inqueue, bp);
  536                                 mtx_unlock(&sc->sc_inqueue_mtx);
  537                                 g_gate_release(sc);
  538                                 return (error);
  539                         }
  540                         break;
  541                 }
  542                 mtx_lock(&sc->sc_outqueue_mtx);
  543                 bioq_insert_tail(&sc->sc_outqueue, bp);
  544                 atomic_add_acq_32(&sc->sc_queue_count, 1);
  545                 mtx_unlock(&sc->sc_outqueue_mtx);
  546                 g_gate_release(sc);
  547                 return (0);
  548             }
  549         case G_GATE_CMD_DONE:
  550             {
  551                 struct g_gate_ctl_io *ggio = (void *)addr;
  552 
  553                 G_GATE_CHECK_VERSION(ggio);
  554                 sc = g_gate_hold(ggio->gctl_unit);
  555                 if (sc == NULL)
  556                         return (ENOENT);
  557                 mtx_lock(&sc->sc_outqueue_mtx);
  558                 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
  559                         if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
  560                                 break;
  561                 }
  562                 if (bp != NULL) {
  563                         bioq_remove(&sc->sc_outqueue, bp);
  564                         atomic_subtract_rel_32(&sc->sc_queue_count, 1);
  565                 }
  566                 mtx_unlock(&sc->sc_outqueue_mtx);
  567                 if (bp == NULL) {
  568                         /*
  569                          * Request was probably canceled.
  570                          */
  571                         g_gate_release(sc);
  572                         return (0);
  573                 }
  574                 if (ggio->gctl_error == EAGAIN) {
  575                         bp->bio_error = 0;
  576                         G_GATE_LOGREQ(1, bp, "Request desisted.");
  577                         atomic_add_acq_32(&sc->sc_queue_count, 1);
  578                         mtx_lock(&sc->sc_inqueue_mtx);
  579                         bioq_disksort(&sc->sc_inqueue, bp);
  580                         wakeup(sc);
  581                         mtx_unlock(&sc->sc_inqueue_mtx);
  582                 } else {
  583                         bp->bio_error = ggio->gctl_error;
  584                         if (bp->bio_error == 0) {
  585                                 bp->bio_completed = bp->bio_length;
  586                                 switch (bp->bio_cmd) {
  587                                 case BIO_READ:
  588                                         error = copyin(ggio->gctl_data,
  589                                             bp->bio_data, bp->bio_length);
  590                                         if (error != 0)
  591                                                 bp->bio_error = error;
  592                                         break;
  593                                 case BIO_DELETE:
  594                                 case BIO_WRITE:
  595                                         break;
  596                                 }
  597                         }
  598                         G_GATE_LOGREQ(2, bp, "Request done.");
  599                         g_io_deliver(bp, bp->bio_error);
  600                 }
  601                 g_gate_release(sc);
  602                 return (error);
  603             }
  604         }
  605         return (ENOIOCTL);
  606 }
  607 
  608 static void
  609 g_gate_device(void)
  610 {
  611 
  612         status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
  613             G_GATE_CTL_NAME);
  614 }
  615 
  616 static int
  617 g_gate_modevent(module_t mod, int type, void *data)
  618 {
  619         int error = 0;
  620 
  621         switch (type) {
  622         case MOD_LOAD:
  623                 mtx_init(&g_gate_list_mtx, "gg_list_lock", NULL, MTX_DEF);
  624                 g_gate_device();
  625                 break;
  626         case MOD_UNLOAD:
  627                 mtx_lock(&g_gate_list_mtx);
  628                 if (!LIST_EMPTY(&g_gate_list)) {
  629                         mtx_unlock(&g_gate_list_mtx);
  630                         error = EBUSY;
  631                         break;
  632                 }
  633                 mtx_unlock(&g_gate_list_mtx);
  634                 mtx_destroy(&g_gate_list_mtx);
  635                 if (status_dev != 0)
  636                         destroy_dev(status_dev);
  637                 break;
  638         default:
  639                 return (EOPNOTSUPP);
  640                 break;
  641         }
  642 
  643         return (error);
  644 }
  645 static moduledata_t g_gate_module = {
  646         G_GATE_MOD_NAME,
  647         g_gate_modevent,
  648         NULL
  649 };
  650 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
  651 DECLARE_GEOM_CLASS(g_gate_class, g_gate);

Cache object: 113e488ce5bc32cdb4de1ee3075aae5e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.