The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_vfs.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2004 Poul-Henning Kamp
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include <sys/param.h>
   33 #include <sys/systm.h>
   34 #include <sys/bio.h>
   35 #include <sys/kernel.h>
   36 #include <sys/lock.h>
   37 #include <sys/malloc.h>
   38 #include <sys/mutex.h>
   39 #include <sys/sbuf.h>
   40 #include <sys/vnode.h>
   41 #include <sys/mount.h>
   42 
   43 #include <geom/geom.h>
   44 #include <geom/geom_vfs.h>
   45 
   46 /*
   47  * subroutines for use by filesystems.
   48  *
   49  * XXX: should maybe live somewhere else ?
   50  */
   51 #include <sys/buf.h>
   52 
   53 struct g_vfs_softc {
   54         struct mtx       sc_mtx;
   55         struct bufobj   *sc_bo;
   56         struct g_event  *sc_event;
   57         int              sc_active;
   58         bool             sc_orphaned;
   59         int              sc_enxio_active;
   60         int              sc_enxio_reported;
   61 };
   62 
   63 static struct buf_ops __g_vfs_bufops = {
   64         .bop_name =     "GEOM_VFS",
   65         .bop_write =    bufwrite,
   66         .bop_strategy = g_vfs_strategy, 
   67         .bop_sync =     bufsync,        
   68         .bop_bdflush =  bufbdflush
   69 };
   70 
   71 struct buf_ops *g_vfs_bufops = &__g_vfs_bufops;
   72 
   73 static g_orphan_t g_vfs_orphan;
   74 
   75 static struct g_class g_vfs_class = {
   76         .name =         "VFS",
   77         .version =      G_VERSION,
   78         .orphan =       g_vfs_orphan,
   79 };
   80 
   81 DECLARE_GEOM_CLASS(g_vfs_class, g_vfs);
   82 
   83 static void
   84 g_vfs_destroy(void *arg, int flags __unused)
   85 {
   86         struct g_consumer *cp;
   87 
   88         g_topology_assert();
   89         cp = arg;
   90         if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
   91                 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
   92         g_detach(cp);
   93         if (cp->geom->softc == NULL)
   94                 g_wither_geom(cp->geom, ENXIO);
   95 }
   96 
   97 static void
   98 g_vfs_done(struct bio *bip)
   99 {
  100         struct g_consumer *cp;
  101         struct g_event *event;
  102         struct g_vfs_softc *sc;
  103         struct buf *bp;
  104         int destroy;
  105         struct mount *mp;
  106         struct vnode *vp;
  107         struct cdev *cdevp;
  108 
  109         /*
  110          * Collect statistics on synchronous and asynchronous read
  111          * and write counts for disks that have associated filesystems.
  112          */
  113         bp = bip->bio_caller2;
  114         vp = bp->b_vp;
  115         if (vp != NULL) {
  116                 /*
  117                  * If not a disk vnode, use its associated mount point
  118                  * otherwise use the mountpoint associated with the disk.
  119                  */
  120                 VI_LOCK(vp);
  121                 if (vp->v_type != VCHR ||
  122                     (cdevp = vp->v_rdev) == NULL ||
  123                     cdevp->si_devsw == NULL ||
  124                     (cdevp->si_devsw->d_flags & D_DISK) == 0)
  125                         mp = vp->v_mount;
  126                 else
  127                         mp = cdevp->si_mountpt;
  128                 if (mp != NULL) {
  129                         if (bp->b_iocmd == BIO_READ) {
  130                                 if (LK_HOLDER(bp->b_lock.lk_lock) == LK_KERNPROC)
  131                                         mp->mnt_stat.f_asyncreads++;
  132                                 else
  133                                         mp->mnt_stat.f_syncreads++;
  134                         } else if (bp->b_iocmd == BIO_WRITE) {
  135                                 if (LK_HOLDER(bp->b_lock.lk_lock) == LK_KERNPROC)
  136                                         mp->mnt_stat.f_asyncwrites++;
  137                                 else
  138                                         mp->mnt_stat.f_syncwrites++;
  139                         }
  140                 }
  141                 VI_UNLOCK(vp);
  142         }
  143 
  144         cp = bip->bio_from;
  145         sc = cp->geom->softc;
  146         if (bip->bio_error != 0 && bip->bio_error != EOPNOTSUPP) {
  147                 if ((bp->b_xflags & BX_CVTENXIO) != 0) {
  148                         if (atomic_cmpset_int(&sc->sc_enxio_active, 0, 1))
  149                                 printf("g_vfs_done(): %s converting all errors to ENXIO\n",
  150                                     bip->bio_to->name);
  151                 }
  152                 if (sc->sc_enxio_active)
  153                         bip->bio_error = ENXIO;
  154                 if (bip->bio_error != ENXIO ||
  155                     atomic_cmpset_int(&sc->sc_enxio_reported, 0, 1)) {
  156                         g_print_bio("g_vfs_done():", bip, "error = %d%s",
  157                             bip->bio_error,
  158                             bip->bio_error != ENXIO ? "" :
  159                             " supressing further ENXIO");
  160                 }
  161         }
  162         bp->b_error = bip->bio_error;
  163         bp->b_ioflags = bip->bio_flags;
  164         if (bip->bio_error)
  165                 bp->b_ioflags |= BIO_ERROR;
  166         bp->b_resid = bp->b_bcount - bip->bio_completed;
  167         g_destroy_bio(bip);
  168 
  169         mtx_lock(&sc->sc_mtx);
  170         destroy = ((--sc->sc_active) == 0 && sc->sc_orphaned);
  171         if (destroy) {
  172                 event = sc->sc_event;
  173                 sc->sc_event = NULL;
  174         } else
  175                 event = NULL;
  176         mtx_unlock(&sc->sc_mtx);
  177         if (destroy)
  178                 g_post_event_ep(g_vfs_destroy, cp, event, NULL);
  179 
  180         bufdone(bp);
  181 }
  182 
  183 void
  184 g_vfs_strategy(struct bufobj *bo, struct buf *bp)
  185 {
  186         struct g_vfs_softc *sc;
  187         struct g_consumer *cp;
  188         struct bio *bip;
  189 
  190         cp = bo->bo_private;
  191         sc = cp->geom->softc;
  192 
  193         /*
  194          * If the provider has orphaned us, just return ENXIO.
  195          */
  196         mtx_lock(&sc->sc_mtx);
  197         if (sc->sc_orphaned || sc->sc_enxio_active) {
  198                 mtx_unlock(&sc->sc_mtx);
  199                 bp->b_error = ENXIO;
  200                 bp->b_ioflags |= BIO_ERROR;
  201                 bufdone(bp);
  202                 return;
  203         }
  204         sc->sc_active++;
  205         mtx_unlock(&sc->sc_mtx);
  206 
  207         bip = g_alloc_bio();
  208         bip->bio_cmd = bp->b_iocmd;
  209         bip->bio_offset = bp->b_iooffset;
  210         bip->bio_length = bp->b_bcount;
  211         bdata2bio(bp, bip);
  212         if ((bp->b_flags & B_BARRIER) != 0) {
  213                 bip->bio_flags |= BIO_ORDERED;
  214                 bp->b_flags &= ~B_BARRIER;
  215         }
  216         if (bp->b_iocmd == BIO_SPEEDUP)
  217                 bip->bio_flags |= bp->b_ioflags;
  218         bip->bio_done = g_vfs_done;
  219         bip->bio_caller2 = bp;
  220 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
  221         buf_track(bp, __func__);
  222         bip->bio_track_bp = bp;
  223 #endif
  224         g_io_request(bip, cp);
  225 }
  226 
  227 static void
  228 g_vfs_orphan(struct g_consumer *cp)
  229 {
  230         struct g_geom *gp;
  231         struct g_event *event;
  232         struct g_vfs_softc *sc;
  233         int destroy;
  234 
  235         g_topology_assert();
  236 
  237         gp = cp->geom;
  238         g_trace(G_T_TOPOLOGY, "g_vfs_orphan(%p(%s))", cp, gp->name);
  239         sc = gp->softc;
  240         if (sc == NULL)
  241                 return;
  242         event = g_alloc_event(M_WAITOK);
  243         mtx_lock(&sc->sc_mtx);
  244         KASSERT(sc->sc_event == NULL, ("g_vfs %p already has an event", sc));
  245         sc->sc_orphaned = true;
  246         destroy = (sc->sc_active == 0);
  247         if (!destroy) {
  248                 sc->sc_event = event;
  249                 event = NULL;
  250         }
  251         mtx_unlock(&sc->sc_mtx);
  252         if (destroy) {
  253                 g_free(event);
  254                 g_vfs_destroy(cp, 0);
  255         }
  256 
  257         /*
  258          * Do not destroy the geom.  Filesystem will do that during unmount.
  259          */
  260 }
  261 
  262 int
  263 g_vfs_open(struct vnode *vp, struct g_consumer **cpp, const char *fsname, int wr)
  264 {
  265         struct g_geom *gp;
  266         struct g_provider *pp;
  267         struct g_consumer *cp;
  268         struct g_vfs_softc *sc;
  269         struct bufobj *bo;
  270         int error;
  271 
  272         g_topology_assert();
  273 
  274         *cpp = NULL;
  275         bo = &vp->v_bufobj;
  276         if (bo->bo_private != vp)
  277                 return (EBUSY);
  278 
  279         pp = g_dev_getprovider(vp->v_rdev);
  280         if (pp == NULL)
  281                 return (ENOENT);
  282         gp = g_new_geomf(&g_vfs_class, "%s.%s", fsname, pp->name);
  283         sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
  284         mtx_init(&sc->sc_mtx, "g_vfs", NULL, MTX_DEF);
  285         sc->sc_bo = bo;
  286         gp->softc = sc;
  287         cp = g_new_consumer(gp);
  288         error = g_attach(cp, pp);
  289         if (error) {
  290                 g_wither_geom(gp, ENXIO);
  291                 return (error);
  292         }
  293         error = g_access(cp, 1, wr, wr);
  294         if (error) {
  295                 g_wither_geom(gp, ENXIO);
  296                 return (error);
  297         }
  298         vnode_create_vobject(vp, pp->mediasize, curthread);
  299         *cpp = cp;
  300         cp->private = vp;
  301         cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
  302         bo->bo_ops = g_vfs_bufops;
  303         bo->bo_private = cp;
  304         bo->bo_bsize = pp->sectorsize;
  305 
  306         return (error);
  307 }
  308 
  309 void
  310 g_vfs_close(struct g_consumer *cp)
  311 {
  312         struct g_geom *gp;
  313         struct g_vfs_softc *sc;
  314 
  315         g_topology_assert();
  316 
  317         gp = cp->geom;
  318         sc = gp->softc;
  319         bufobj_invalbuf(sc->sc_bo, V_SAVE, 0, 0);
  320         sc->sc_bo->bo_private = cp->private;
  321         gp->softc = NULL;
  322         mtx_destroy(&sc->sc_mtx);
  323         if (!sc->sc_orphaned || cp->provider == NULL)
  324                 g_wither_geom_close(gp, ENXIO);
  325         KASSERT(sc->sc_event == NULL, ("g_vfs %p event is non-NULL", sc));
  326         g_free(sc);
  327 }

Cache object: a9e837c0b16cdaca0aada4fa5a0c57e8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.