The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/geom/raid/tr_raid1.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include <sys/param.h>
   33 #include <sys/bio.h>
   34 #include <sys/endian.h>
   35 #include <sys/kernel.h>
   36 #include <sys/kobj.h>
   37 #include <sys/limits.h>
   38 #include <sys/lock.h>
   39 #include <sys/malloc.h>
   40 #include <sys/mutex.h>
   41 #include <sys/sysctl.h>
   42 #include <sys/systm.h>
   43 #include <geom/geom.h>
   44 #include <geom/geom_dbg.h>
   45 #include "geom/raid/g_raid.h"
   46 #include "g_raid_tr_if.h"
   47 
   48 SYSCTL_DECL(_kern_geom_raid_raid1);
   49 
   50 #define RAID1_REBUILD_SLAB      (1 << 20) /* One transation in a rebuild */
   51 static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
   52 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RWTUN,
   53     &g_raid1_rebuild_slab, 0,
   54     "Amount of the disk to rebuild each read/write cycle of the rebuild.");
   55 
   56 #define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
   57 static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
   58 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RWTUN,
   59     &g_raid1_rebuild_fair_io, 0,
   60     "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
   61 
   62 #define RAID1_REBUILD_CLUSTER_IDLE 100
   63 static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
   64 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RWTUN,
   65     &g_raid1_rebuild_cluster_idle, 0,
   66     "Number of slabs to do each time we trigger a rebuild cycle");
   67 
   68 #define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
   69 static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
   70 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RWTUN,
   71     &g_raid1_rebuild_meta_update, 0,
   72     "When to update the meta data.");
   73 
   74 static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
   75 
   76 #define TR_RAID1_NONE 0
   77 #define TR_RAID1_REBUILD 1
   78 #define TR_RAID1_RESYNC 2
   79 
   80 #define TR_RAID1_F_DOING_SOME   0x1
   81 #define TR_RAID1_F_LOCKED       0x2
   82 #define TR_RAID1_F_ABORT        0x4
   83 
   84 struct g_raid_tr_raid1_object {
   85         struct g_raid_tr_object  trso_base;
   86         int                      trso_starting;
   87         int                      trso_stopping;
   88         int                      trso_type;
   89         int                      trso_recover_slabs; /* slabs before rest */
   90         int                      trso_fair_io;
   91         int                      trso_meta_update;
   92         int                      trso_flags;
   93         struct g_raid_subdisk   *trso_failed_sd; /* like per volume */
   94         void                    *trso_buffer;    /* Buffer space */
   95         struct bio               trso_bio;
   96 };
   97 
   98 static g_raid_tr_taste_t g_raid_tr_taste_raid1;
   99 static g_raid_tr_event_t g_raid_tr_event_raid1;
  100 static g_raid_tr_start_t g_raid_tr_start_raid1;
  101 static g_raid_tr_stop_t g_raid_tr_stop_raid1;
  102 static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
  103 static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
  104 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
  105 static g_raid_tr_locked_t g_raid_tr_locked_raid1;
  106 static g_raid_tr_idle_t g_raid_tr_idle_raid1;
  107 static g_raid_tr_free_t g_raid_tr_free_raid1;
  108 
  109 static kobj_method_t g_raid_tr_raid1_methods[] = {
  110         KOBJMETHOD(g_raid_tr_taste,     g_raid_tr_taste_raid1),
  111         KOBJMETHOD(g_raid_tr_event,     g_raid_tr_event_raid1),
  112         KOBJMETHOD(g_raid_tr_start,     g_raid_tr_start_raid1),
  113         KOBJMETHOD(g_raid_tr_stop,      g_raid_tr_stop_raid1),
  114         KOBJMETHOD(g_raid_tr_iostart,   g_raid_tr_iostart_raid1),
  115         KOBJMETHOD(g_raid_tr_iodone,    g_raid_tr_iodone_raid1),
  116         KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
  117         KOBJMETHOD(g_raid_tr_locked,    g_raid_tr_locked_raid1),
  118         KOBJMETHOD(g_raid_tr_idle,      g_raid_tr_idle_raid1),
  119         KOBJMETHOD(g_raid_tr_free,      g_raid_tr_free_raid1),
  120         { 0, 0 }
  121 };
  122 
  123 static struct g_raid_tr_class g_raid_tr_raid1_class = {
  124         "RAID1",
  125         g_raid_tr_raid1_methods,
  126         sizeof(struct g_raid_tr_raid1_object),
  127         .trc_enable = 1,
  128         .trc_priority = 100,
  129         .trc_accept_unmapped = 1
  130 };
  131 
  132 static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
  133 static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
  134     struct g_raid_subdisk *sd);
  135 
  136 static int
  137 g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
  138 {
  139         struct g_raid_tr_raid1_object *trs;
  140 
  141         trs = (struct g_raid_tr_raid1_object *)tr;
  142         if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
  143             (tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1SM &&
  144              tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1MM))
  145                 return (G_RAID_TR_TASTE_FAIL);
  146         trs->trso_starting = 1;
  147         return (G_RAID_TR_TASTE_SUCCEED);
  148 }
  149 
  150 static int
  151 g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
  152     struct g_raid_subdisk *sd)
  153 {
  154         struct g_raid_tr_raid1_object *trs;
  155         struct g_raid_softc *sc;
  156         struct g_raid_subdisk *tsd, *bestsd;
  157         u_int s;
  158         int i, na, ns;
  159 
  160         sc = vol->v_softc;
  161         trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
  162         if (trs->trso_stopping &&
  163             (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
  164                 s = G_RAID_VOLUME_S_STOPPED;
  165         else if (trs->trso_starting)
  166                 s = G_RAID_VOLUME_S_STARTING;
  167         else {
  168                 /* Make sure we have at least one ACTIVE disk. */
  169                 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
  170                 if (na == 0) {
  171                         /*
  172                          * Critical situation! We have no any active disk!
  173                          * Choose the best disk we have to make it active.
  174                          */
  175                         bestsd = &vol->v_subdisks[0];
  176                         for (i = 1; i < vol->v_disks_count; i++) {
  177                                 tsd = &vol->v_subdisks[i];
  178                                 if (tsd->sd_state > bestsd->sd_state)
  179                                         bestsd = tsd;
  180                                 else if (tsd->sd_state == bestsd->sd_state &&
  181                                     (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
  182                                      tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
  183                                     tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
  184                                         bestsd = tsd;
  185                         }
  186                         if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
  187                                 /* We found reasonable candidate. */
  188                                 G_RAID_DEBUG1(1, sc,
  189                                     "Promote subdisk %s:%d from %s to ACTIVE.",
  190                                     vol->v_name, bestsd->sd_pos,
  191                                     g_raid_subdisk_state2str(bestsd->sd_state));
  192                                 g_raid_change_subdisk_state(bestsd,
  193                                     G_RAID_SUBDISK_S_ACTIVE);
  194                                 g_raid_write_metadata(sc,
  195                                     vol, bestsd, bestsd->sd_disk);
  196                         }
  197                 }
  198                 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
  199                 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
  200                      g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
  201                 if (na == vol->v_disks_count)
  202                         s = G_RAID_VOLUME_S_OPTIMAL;
  203                 else if (na + ns == vol->v_disks_count)
  204                         s = G_RAID_VOLUME_S_SUBOPTIMAL;
  205                 else if (na > 0)
  206                         s = G_RAID_VOLUME_S_DEGRADED;
  207                 else
  208                         s = G_RAID_VOLUME_S_BROKEN;
  209                 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
  210         }
  211         if (s != vol->v_state) {
  212                 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
  213                     G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
  214                     G_RAID_EVENT_VOLUME);
  215                 g_raid_change_volume_state(vol, s);
  216                 if (!trs->trso_starting && !trs->trso_stopping)
  217                         g_raid_write_metadata(sc, vol, NULL, NULL);
  218         }
  219         return (0);
  220 }
  221 
  222 static void
  223 g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
  224     struct g_raid_disk *disk)
  225 {
  226         /*
  227          * We don't fail the last disk in the pack, since it still has decent
  228          * data on it and that's better than failing the disk if it is the root
  229          * file system.
  230          *
  231          * XXX should this be controlled via a tunable?  It makes sense for
  232          * the volume that has / on it.  I can't think of a case where we'd
  233          * want the volume to go away on this kind of event.
  234          */
  235         if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
  236             g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
  237                 return;
  238         g_raid_fail_disk(sc, sd, disk);
  239 }
  240 
  241 static void
  242 g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
  243 {
  244         struct g_raid_tr_raid1_object *trs;
  245         struct g_raid_subdisk *sd, *good_sd;
  246         struct bio *bp;
  247 
  248         trs = (struct g_raid_tr_raid1_object *)tr;
  249         if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
  250                 return;
  251         sd = trs->trso_failed_sd;
  252         good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
  253         if (good_sd == NULL) {
  254                 g_raid_tr_raid1_rebuild_abort(tr);
  255                 return;
  256         }
  257         bp = &trs->trso_bio;
  258         memset(bp, 0, sizeof(*bp));
  259         bp->bio_offset = sd->sd_rebuild_pos;
  260         bp->bio_length = MIN(g_raid1_rebuild_slab,
  261             sd->sd_size - sd->sd_rebuild_pos);
  262         bp->bio_data = trs->trso_buffer;
  263         bp->bio_cmd = BIO_READ;
  264         bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
  265         bp->bio_caller1 = good_sd;
  266         trs->trso_flags |= TR_RAID1_F_DOING_SOME;
  267         trs->trso_flags |= TR_RAID1_F_LOCKED;
  268         g_raid_lock_range(sd->sd_volume,        /* Lock callback starts I/O */
  269            bp->bio_offset, bp->bio_length, NULL, bp);
  270 }
  271 
  272 static void
  273 g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
  274 {
  275         struct g_raid_volume *vol;
  276         struct g_raid_subdisk *sd;
  277 
  278         vol = trs->trso_base.tro_volume;
  279         sd = trs->trso_failed_sd;
  280         g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
  281         free(trs->trso_buffer, M_TR_RAID1);
  282         trs->trso_buffer = NULL;
  283         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
  284         trs->trso_type = TR_RAID1_NONE;
  285         trs->trso_recover_slabs = 0;
  286         trs->trso_failed_sd = NULL;
  287         g_raid_tr_update_state_raid1(vol, NULL);
  288 }
  289 
  290 static void
  291 g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
  292 {
  293         struct g_raid_tr_raid1_object *trs;
  294         struct g_raid_subdisk *sd;
  295 
  296         trs = (struct g_raid_tr_raid1_object *)tr;
  297         sd = trs->trso_failed_sd;
  298         G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
  299             "Subdisk %s:%d-%s rebuild completed.",
  300             sd->sd_volume->v_name, sd->sd_pos,
  301             sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
  302         g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
  303         sd->sd_rebuild_pos = 0;
  304         g_raid_tr_raid1_rebuild_done(trs);
  305 }
  306 
  307 static void
  308 g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
  309 {
  310         struct g_raid_tr_raid1_object *trs;
  311         struct g_raid_subdisk *sd;
  312         struct g_raid_volume *vol;
  313         off_t len;
  314 
  315         vol = tr->tro_volume;
  316         trs = (struct g_raid_tr_raid1_object *)tr;
  317         sd = trs->trso_failed_sd;
  318         if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
  319                 G_RAID_DEBUG1(1, vol->v_softc,
  320                     "Subdisk %s:%d-%s rebuild is aborting.",
  321                     sd->sd_volume->v_name, sd->sd_pos,
  322                     sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
  323                 trs->trso_flags |= TR_RAID1_F_ABORT;
  324         } else {
  325                 G_RAID_DEBUG1(0, vol->v_softc,
  326                     "Subdisk %s:%d-%s rebuild aborted.",
  327                     sd->sd_volume->v_name, sd->sd_pos,
  328                     sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
  329                 trs->trso_flags &= ~TR_RAID1_F_ABORT;
  330                 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
  331                         trs->trso_flags &= ~TR_RAID1_F_LOCKED;
  332                         len = MIN(g_raid1_rebuild_slab,
  333                             sd->sd_size - sd->sd_rebuild_pos);
  334                         g_raid_unlock_range(tr->tro_volume,
  335                             sd->sd_rebuild_pos, len);
  336                 }
  337                 g_raid_tr_raid1_rebuild_done(trs);
  338         }
  339 }
  340 
  341 static void
  342 g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
  343 {
  344         struct g_raid_volume *vol;
  345         struct g_raid_tr_raid1_object *trs;
  346         struct g_raid_subdisk *sd, *fsd;
  347 
  348         vol = tr->tro_volume;
  349         trs = (struct g_raid_tr_raid1_object *)tr;
  350         if (trs->trso_failed_sd) {
  351                 G_RAID_DEBUG1(1, vol->v_softc,
  352                     "Already rebuild in start rebuild. pos %jd\n",
  353                     (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
  354                 return;
  355         }
  356         sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
  357         if (sd == NULL) {
  358                 G_RAID_DEBUG1(1, vol->v_softc,
  359                     "No active disk to rebuild.  night night.");
  360                 return;
  361         }
  362         fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
  363         if (fsd == NULL)
  364                 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
  365         if (fsd == NULL) {
  366                 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
  367                 if (fsd != NULL) {
  368                         fsd->sd_rebuild_pos = 0;
  369                         g_raid_change_subdisk_state(fsd,
  370                             G_RAID_SUBDISK_S_RESYNC);
  371                         g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
  372                 } else {
  373                         fsd = g_raid_get_subdisk(vol,
  374                             G_RAID_SUBDISK_S_UNINITIALIZED);
  375                         if (fsd == NULL)
  376                                 fsd = g_raid_get_subdisk(vol,
  377                                     G_RAID_SUBDISK_S_NEW);
  378                         if (fsd != NULL) {
  379                                 fsd->sd_rebuild_pos = 0;
  380                                 g_raid_change_subdisk_state(fsd,
  381                                     G_RAID_SUBDISK_S_REBUILD);
  382                                 g_raid_write_metadata(vol->v_softc,
  383                                     vol, fsd, NULL);
  384                         }
  385                 }
  386         }
  387         if (fsd == NULL) {
  388                 G_RAID_DEBUG1(1, vol->v_softc,
  389                     "No failed disk to rebuild.  night night.");
  390                 return;
  391         }
  392         trs->trso_failed_sd = fsd;
  393         G_RAID_DEBUG1(0, vol->v_softc,
  394             "Subdisk %s:%d-%s rebuild start at %jd.",
  395             fsd->sd_volume->v_name, fsd->sd_pos,
  396             fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
  397             trs->trso_failed_sd->sd_rebuild_pos);
  398         trs->trso_type = TR_RAID1_REBUILD;
  399         trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
  400         trs->trso_meta_update = g_raid1_rebuild_meta_update;
  401         g_raid_tr_raid1_rebuild_some(tr);
  402 }
  403 
  404 static void
  405 g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
  406     struct g_raid_subdisk *sd)
  407 {
  408         struct g_raid_volume *vol;
  409         struct g_raid_tr_raid1_object *trs;
  410         int na, nr;
  411 
  412         /*
  413          * If we're stopping, don't do anything.  If we don't have at least one
  414          * good disk and one bad disk, we don't do anything.  And if there's a
  415          * 'good disk' stored in the trs, then we're in progress and we punt.
  416          * If we make it past all these checks, we need to rebuild.
  417          */
  418         vol = tr->tro_volume;
  419         trs = (struct g_raid_tr_raid1_object *)tr;
  420         if (trs->trso_stopping)
  421                 return;
  422         na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
  423         nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
  424             g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
  425         switch(trs->trso_type) {
  426         case TR_RAID1_NONE:
  427                 if (na == 0)
  428                         return;
  429                 if (nr == 0) {
  430                         nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
  431                             g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
  432                             g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
  433                         if (nr == 0)
  434                                 return;
  435                 }
  436                 g_raid_tr_raid1_rebuild_start(tr);
  437                 break;
  438         case TR_RAID1_REBUILD:
  439                 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
  440                         g_raid_tr_raid1_rebuild_abort(tr);
  441                 break;
  442         case TR_RAID1_RESYNC:
  443                 break;
  444         }
  445 }
  446 
  447 static int
  448 g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
  449     struct g_raid_subdisk *sd, u_int event)
  450 {
  451 
  452         g_raid_tr_update_state_raid1(tr->tro_volume, sd);
  453         return (0);
  454 }
  455 
  456 static int
  457 g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
  458 {
  459         struct g_raid_tr_raid1_object *trs;
  460         struct g_raid_volume *vol;
  461 
  462         trs = (struct g_raid_tr_raid1_object *)tr;
  463         vol = tr->tro_volume;
  464         trs->trso_starting = 0;
  465         g_raid_tr_update_state_raid1(vol, NULL);
  466         return (0);
  467 }
  468 
  469 static int
  470 g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
  471 {
  472         struct g_raid_tr_raid1_object *trs;
  473         struct g_raid_volume *vol;
  474 
  475         trs = (struct g_raid_tr_raid1_object *)tr;
  476         vol = tr->tro_volume;
  477         trs->trso_starting = 0;
  478         trs->trso_stopping = 1;
  479         g_raid_tr_update_state_raid1(vol, NULL);
  480         return (0);
  481 }
  482 
  483 /*
  484  * Select the disk to read from.  Take into account: subdisk state, running
  485  * error recovery, average disk load, head position and possible cache hits.
  486  */
  487 #define ABS(x)          (((x) >= 0) ? (x) : (-(x)))
  488 static struct g_raid_subdisk *
  489 g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
  490     u_int mask)
  491 {
  492         struct g_raid_subdisk *sd, *best;
  493         int i, prio, bestprio;
  494 
  495         best = NULL;
  496         bestprio = INT_MAX;
  497         for (i = 0; i < vol->v_disks_count; i++) {
  498                 sd = &vol->v_subdisks[i];
  499                 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
  500                     ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
  501                       sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
  502                      bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
  503                         continue;
  504                 if ((mask & (1 << i)) != 0)
  505                         continue;
  506                 prio = G_RAID_SUBDISK_LOAD(sd);
  507                 prio += min(sd->sd_recovery, 255) << 22;
  508                 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
  509                 /* If disk head is precisely in position - highly prefer it. */
  510                 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
  511                         prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
  512                 else
  513                 /* If disk head is close to position - prefer it. */
  514                 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
  515                     G_RAID_SUBDISK_TRACK_SIZE)
  516                         prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
  517                 if (prio < bestprio) {
  518                         best = sd;
  519                         bestprio = prio;
  520                 }
  521         }
  522         return (best);
  523 }
  524 
  525 static void
  526 g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
  527 {
  528         struct g_raid_subdisk *sd;
  529         struct bio *cbp;
  530 
  531         sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
  532         KASSERT(sd != NULL, ("No active disks in volume %s.",
  533                 tr->tro_volume->v_name));
  534 
  535         cbp = g_clone_bio(bp);
  536         if (cbp == NULL) {
  537                 g_raid_iodone(bp, ENOMEM);
  538                 return;
  539         }
  540 
  541         g_raid_subdisk_iostart(sd, cbp);
  542 }
  543 
  544 static void
  545 g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
  546 {
  547         struct g_raid_volume *vol;
  548         struct g_raid_subdisk *sd;
  549         struct bio_queue_head queue;
  550         struct bio *cbp;
  551         int i;
  552 
  553         vol = tr->tro_volume;
  554 
  555         /*
  556          * Allocate all bios before sending any request, so we can return
  557          * ENOMEM in nice and clean way.
  558          */
  559         bioq_init(&queue);
  560         for (i = 0; i < vol->v_disks_count; i++) {
  561                 sd = &vol->v_subdisks[i];
  562                 switch (sd->sd_state) {
  563                 case G_RAID_SUBDISK_S_ACTIVE:
  564                         break;
  565                 case G_RAID_SUBDISK_S_REBUILD:
  566                         /*
  567                          * When rebuilding, only part of this subdisk is
  568                          * writable, the rest will be written as part of the
  569                          * that process.
  570                          */
  571                         if (bp->bio_offset >= sd->sd_rebuild_pos)
  572                                 continue;
  573                         break;
  574                 case G_RAID_SUBDISK_S_STALE:
  575                 case G_RAID_SUBDISK_S_RESYNC:
  576                         /*
  577                          * Resyncing still writes on the theory that the
  578                          * resync'd disk is very close and writing it will
  579                          * keep it that way better if we keep up while
  580                          * resyncing.
  581                          */
  582                         break;
  583                 default:
  584                         continue;
  585                 }
  586                 cbp = g_clone_bio(bp);
  587                 if (cbp == NULL)
  588                         goto failure;
  589                 cbp->bio_caller1 = sd;
  590                 bioq_insert_tail(&queue, cbp);
  591         }
  592         while ((cbp = bioq_takefirst(&queue)) != NULL) {
  593                 sd = cbp->bio_caller1;
  594                 cbp->bio_caller1 = NULL;
  595                 g_raid_subdisk_iostart(sd, cbp);
  596         }
  597         return;
  598 failure:
  599         while ((cbp = bioq_takefirst(&queue)) != NULL)
  600                 g_destroy_bio(cbp);
  601         if (bp->bio_error == 0)
  602                 bp->bio_error = ENOMEM;
  603         g_raid_iodone(bp, bp->bio_error);
  604 }
  605 
  606 static void
  607 g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
  608 {
  609         struct g_raid_volume *vol;
  610         struct g_raid_tr_raid1_object *trs;
  611 
  612         vol = tr->tro_volume;
  613         trs = (struct g_raid_tr_raid1_object *)tr;
  614         if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
  615             vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
  616             vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
  617                 g_raid_iodone(bp, EIO);
  618                 return;
  619         }
  620         /*
  621          * If we're rebuilding, squeeze in rebuild activity every so often,
  622          * even when the disk is busy.  Be sure to only count real I/O
  623          * to the disk.  All 'SPECIAL' I/O is traffic generated to the disk
  624          * by this module.
  625          */
  626         if (trs->trso_failed_sd != NULL &&
  627             !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
  628                 /* Make this new or running now round short. */
  629                 trs->trso_recover_slabs = 0;
  630                 if (--trs->trso_fair_io <= 0) {
  631                         trs->trso_fair_io = g_raid1_rebuild_fair_io;
  632                         g_raid_tr_raid1_rebuild_some(tr);
  633                 }
  634         }
  635         switch (bp->bio_cmd) {
  636         case BIO_READ:
  637                 g_raid_tr_iostart_raid1_read(tr, bp);
  638                 break;
  639         case BIO_WRITE:
  640         case BIO_DELETE:
  641                 g_raid_tr_iostart_raid1_write(tr, bp);
  642                 break;
  643         case BIO_SPEEDUP:
  644         case BIO_FLUSH:
  645                 g_raid_tr_flush_common(tr, bp);
  646                 break;
  647         default:
  648                 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
  649                     bp->bio_cmd, vol->v_name));
  650                 break;
  651         }
  652 }
  653 
  654 static void
  655 g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
  656     struct g_raid_subdisk *sd, struct bio *bp)
  657 {
  658         struct bio *cbp;
  659         struct g_raid_subdisk *nsd;
  660         struct g_raid_volume *vol;
  661         struct bio *pbp;
  662         struct g_raid_tr_raid1_object *trs;
  663         uintptr_t *mask;
  664         int error, do_write;
  665 
  666         trs = (struct g_raid_tr_raid1_object *)tr;
  667         vol = tr->tro_volume;
  668         if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
  669                 /*
  670                  * This operation is part of a rebuild or resync operation.
  671                  * See what work just got done, then schedule the next bit of
  672                  * work, if any.  Rebuild/resync is done a little bit at a
  673                  * time.  Either when a timeout happens, or after we get a
  674                  * bunch of I/Os to the disk (to make sure an active system
  675                  * will complete in a sane amount of time).
  676                  *
  677                  * We are setup to do differing amounts of work for each of
  678                  * these cases.  so long as the slabs is smallish (less than
  679                  * 50 or so, I'd guess, but that's just a WAG), we shouldn't
  680                  * have any bio starvation issues.  For active disks, we do
  681                  * 5MB of data, for inactive ones, we do 50MB.
  682                  */
  683                 if (trs->trso_type == TR_RAID1_REBUILD) {
  684                         if (bp->bio_cmd == BIO_READ) {
  685                                 /* Immediately abort rebuild, if requested. */
  686                                 if (trs->trso_flags & TR_RAID1_F_ABORT) {
  687                                         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
  688                                         g_raid_tr_raid1_rebuild_abort(tr);
  689                                         return;
  690                                 }
  691 
  692                                 /* On read error, skip and cross fingers. */
  693                                 if (bp->bio_error != 0) {
  694                                         G_RAID_LOGREQ(0, bp,
  695                                             "Read error during rebuild (%d), "
  696                                             "possible data loss!",
  697                                             bp->bio_error);
  698                                         goto rebuild_round_done;
  699                                 }
  700 
  701                                 /*
  702                                  * The read operation finished, queue the
  703                                  * write and get out.
  704                                  */
  705                                 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
  706                                     bp->bio_error);
  707                                 bp->bio_cmd = BIO_WRITE;
  708                                 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
  709                                 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
  710                                 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
  711                         } else {
  712                                 /*
  713                                  * The write operation just finished.  Do
  714                                  * another.  We keep cloning the master bio
  715                                  * since it has the right buffers allocated to
  716                                  * it.
  717                                  */
  718                                 G_RAID_LOGREQ(4, bp,
  719                                     "rebuild write done. Error %d",
  720                                     bp->bio_error);
  721                                 nsd = trs->trso_failed_sd;
  722                                 if (bp->bio_error != 0 ||
  723                                     trs->trso_flags & TR_RAID1_F_ABORT) {
  724                                         if ((trs->trso_flags &
  725                                             TR_RAID1_F_ABORT) == 0) {
  726                                                 g_raid_tr_raid1_fail_disk(sd->sd_softc,
  727                                                     nsd, nsd->sd_disk);
  728                                         }
  729                                         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
  730                                         g_raid_tr_raid1_rebuild_abort(tr);
  731                                         return;
  732                                 }
  733 rebuild_round_done:
  734                                 nsd = trs->trso_failed_sd;
  735                                 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
  736                                 g_raid_unlock_range(sd->sd_volume,
  737                                     bp->bio_offset, bp->bio_length);
  738                                 nsd->sd_rebuild_pos += bp->bio_length;
  739                                 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
  740                                         g_raid_tr_raid1_rebuild_finish(tr);
  741                                         return;
  742                                 }
  743 
  744                                 /* Abort rebuild if we are stopping */
  745                                 if (trs->trso_stopping) {
  746                                         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
  747                                         g_raid_tr_raid1_rebuild_abort(tr);
  748                                         return;
  749                                 }
  750 
  751                                 if (--trs->trso_meta_update <= 0) {
  752                                         g_raid_write_metadata(vol->v_softc,
  753                                             vol, nsd, nsd->sd_disk);
  754                                         trs->trso_meta_update =
  755                                             g_raid1_rebuild_meta_update;
  756                                 }
  757                                 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
  758                                 if (--trs->trso_recover_slabs <= 0)
  759                                         return;
  760                                 g_raid_tr_raid1_rebuild_some(tr);
  761                         }
  762                 } else if (trs->trso_type == TR_RAID1_RESYNC) {
  763                         /*
  764                          * read good sd, read bad sd in parallel.  when both
  765                          * done, compare the buffers.  write good to the bad
  766                          * if different.  do the next bit of work.
  767                          */
  768                         panic("Somehow, we think we're doing a resync");
  769                 }
  770                 return;
  771         }
  772         pbp = bp->bio_parent;
  773         pbp->bio_inbed++;
  774         if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
  775                 /*
  776                  * Read failed on first drive.  Retry the read error on
  777                  * another disk drive, if available, before erroring out the
  778                  * read.
  779                  */
  780                 sd->sd_disk->d_read_errs++;
  781                 G_RAID_LOGREQ(0, bp,
  782                     "Read error (%d), %d read errors total",
  783                     bp->bio_error, sd->sd_disk->d_read_errs);
  784 
  785                 /*
  786                  * If there are too many read errors, we move to degraded.
  787                  * XXX Do we want to FAIL the drive (eg, make the user redo
  788                  * everything to get it back in sync), or just degrade the
  789                  * drive, which kicks off a resync?
  790                  */
  791                 do_write = 1;
  792                 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
  793                         g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
  794                         if (pbp->bio_children == 1)
  795                                 do_write = 0;
  796                 }
  797 
  798                 /*
  799                  * Find the other disk, and try to do the I/O to it.
  800                  */
  801                 mask = (uintptr_t *)(&pbp->bio_driver2);
  802                 if (pbp->bio_children == 1) {
  803                         /* Save original subdisk. */
  804                         pbp->bio_driver1 = do_write ? sd : NULL;
  805                         *mask = 0;
  806                 }
  807                 *mask |= 1 << sd->sd_pos;
  808                 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
  809                 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
  810                         g_destroy_bio(bp);
  811                         G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
  812                             nsd->sd_pos);
  813                         if (pbp->bio_children == 2 && do_write) {
  814                                 sd->sd_recovery++;
  815                                 cbp->bio_caller1 = nsd;
  816                                 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
  817                                 /* Lock callback starts I/O */
  818                                 g_raid_lock_range(sd->sd_volume,
  819                                     cbp->bio_offset, cbp->bio_length, pbp, cbp);
  820                         } else {
  821                                 g_raid_subdisk_iostart(nsd, cbp);
  822                         }
  823                         return;
  824                 }
  825                 /*
  826                  * We can't retry.  Return the original error by falling
  827                  * through.  This will happen when there's only one good disk.
  828                  * We don't need to fail the raid, since its actual state is
  829                  * based on the state of the subdisks.
  830                  */
  831                 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
  832         }
  833         if (bp->bio_cmd == BIO_READ &&
  834             bp->bio_error == 0 &&
  835             pbp->bio_children > 1 &&
  836             pbp->bio_driver1 != NULL) {
  837                 /*
  838                  * If it was a read, and bio_children is >1, then we just
  839                  * recovered the data from the second drive.  We should try to
  840                  * write that data to the first drive if sector remapping is
  841                  * enabled.  A write should put the data in a new place on the
  842                  * disk, remapping the bad sector.  Do we need to do that by
  843                  * queueing a request to the main worker thread?  It doesn't
  844                  * affect the return code of this current read, and can be
  845                  * done at our leisure.  However, to make the code simpler, it
  846                  * is done synchronously.
  847                  */
  848                 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
  849                 cbp = g_clone_bio(pbp);
  850                 if (cbp != NULL) {
  851                         g_destroy_bio(bp);
  852                         cbp->bio_cmd = BIO_WRITE;
  853                         cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
  854                         G_RAID_LOGREQ(2, cbp,
  855                             "Attempting bad sector remap on failing drive.");
  856                         g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
  857                         return;
  858                 }
  859         }
  860         if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
  861                 /*
  862                  * We're done with a recovery, mark the range as unlocked.
  863                  * For any write errors, we aggressively fail the disk since
  864                  * there was both a READ and a WRITE error at this location.
  865                  * Both types of errors generally indicates the drive is on
  866                  * the verge of total failure anyway.  Better to stop trusting
  867                  * it now.  However, we need to reset error to 0 in that case
  868                  * because we're not failing the original I/O which succeeded.
  869                  */
  870                 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
  871                         G_RAID_LOGREQ(0, bp, "Remap write failed: "
  872                             "failing subdisk.");
  873                         g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
  874                         bp->bio_error = 0;
  875                 }
  876                 if (pbp->bio_driver1 != NULL) {
  877                         ((struct g_raid_subdisk *)pbp->bio_driver1)
  878                             ->sd_recovery--;
  879                 }
  880                 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
  881                 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
  882                     bp->bio_length);
  883         }
  884         if (pbp->bio_cmd != BIO_READ) {
  885                 if (pbp->bio_inbed == 1 || pbp->bio_error != 0)
  886                         pbp->bio_error = bp->bio_error;
  887                 if (pbp->bio_cmd == BIO_WRITE && bp->bio_error != 0) {
  888                         G_RAID_LOGREQ(0, bp, "Write failed: failing subdisk.");
  889                         g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
  890                 }
  891                 error = pbp->bio_error;
  892         } else
  893                 error = bp->bio_error;
  894         g_destroy_bio(bp);
  895         if (pbp->bio_children == pbp->bio_inbed) {
  896                 pbp->bio_completed = pbp->bio_length;
  897                 g_raid_iodone(pbp, error);
  898         }
  899 }
  900 
  901 static int
  902 g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr, void *virtual,
  903     off_t offset, size_t length)
  904 {
  905         struct g_raid_volume *vol;
  906         struct g_raid_subdisk *sd;
  907         int error, i, ok;
  908 
  909         vol = tr->tro_volume;
  910         error = 0;
  911         ok = 0;
  912         for (i = 0; i < vol->v_disks_count; i++) {
  913                 sd = &vol->v_subdisks[i];
  914                 switch (sd->sd_state) {
  915                 case G_RAID_SUBDISK_S_ACTIVE:
  916                         break;
  917                 case G_RAID_SUBDISK_S_REBUILD:
  918                         /*
  919                          * When rebuilding, only part of this subdisk is
  920                          * writable, the rest will be written as part of the
  921                          * that process.
  922                          */
  923                         if (offset >= sd->sd_rebuild_pos)
  924                                 continue;
  925                         break;
  926                 case G_RAID_SUBDISK_S_STALE:
  927                 case G_RAID_SUBDISK_S_RESYNC:
  928                         /*
  929                          * Resyncing still writes on the theory that the
  930                          * resync'd disk is very close and writing it will
  931                          * keep it that way better if we keep up while
  932                          * resyncing.
  933                          */
  934                         break;
  935                 default:
  936                         continue;
  937                 }
  938                 error = g_raid_subdisk_kerneldump(sd, virtual, offset, length);
  939                 if (error == 0)
  940                         ok++;
  941         }
  942         return (ok > 0 ? 0 : error);
  943 }
  944 
  945 static int
  946 g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
  947 {
  948         struct bio *bp;
  949         struct g_raid_subdisk *sd;
  950 
  951         bp = (struct bio *)argp;
  952         sd = (struct g_raid_subdisk *)bp->bio_caller1;
  953         g_raid_subdisk_iostart(sd, bp);
  954 
  955         return (0);
  956 }
  957 
  958 static int
  959 g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
  960 {
  961         struct g_raid_tr_raid1_object *trs;
  962 
  963         trs = (struct g_raid_tr_raid1_object *)tr;
  964         trs->trso_fair_io = g_raid1_rebuild_fair_io;
  965         trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
  966         if (trs->trso_type == TR_RAID1_REBUILD)
  967                 g_raid_tr_raid1_rebuild_some(tr);
  968         return (0);
  969 }
  970 
  971 static int
  972 g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
  973 {
  974         struct g_raid_tr_raid1_object *trs;
  975 
  976         trs = (struct g_raid_tr_raid1_object *)tr;
  977 
  978         if (trs->trso_buffer != NULL) {
  979                 free(trs->trso_buffer, M_TR_RAID1);
  980                 trs->trso_buffer = NULL;
  981         }
  982         return (0);
  983 }
  984 
  985 G_RAID_TR_DECLARE(raid1, "RAID1");

Cache object: cf699a814ec722a99fa918b49c39329c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.