The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/geom/raid/md_promise.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2011 Alexander Motin <mav@FreeBSD.org>
    5  * Copyright (c) 2000 - 2008 Søren Schmidt <sos@FreeBSD.org>
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bio.h>
   35 #include <sys/endian.h>
   36 #include <sys/kernel.h>
   37 #include <sys/kobj.h>
   38 #include <sys/limits.h>
   39 #include <sys/lock.h>
   40 #include <sys/malloc.h>
   41 #include <sys/mutex.h>
   42 #include <sys/systm.h>
   43 #include <geom/geom.h>
   44 #include <geom/geom_dbg.h>
   45 #include "geom/raid/g_raid.h"
   46 #include "g_raid_md_if.h"
   47 
   48 static MALLOC_DEFINE(M_MD_PROMISE, "md_promise_data", "GEOM_RAID Promise metadata");
   49 
   50 #define PROMISE_MAX_DISKS       8
   51 #define PROMISE_MAX_SUBDISKS    2
   52 #define PROMISE_META_OFFSET     14
   53 
   54 struct promise_raid_disk {
   55         uint8_t         flags;                  /* Subdisk status. */
   56 #define PROMISE_F_VALID         0x01
   57 #define PROMISE_F_ONLINE        0x02
   58 #define PROMISE_F_ASSIGNED      0x04
   59 #define PROMISE_F_SPARE         0x08
   60 #define PROMISE_F_DUPLICATE     0x10
   61 #define PROMISE_F_REDIR         0x20
   62 #define PROMISE_F_DOWN          0x40
   63 #define PROMISE_F_READY         0x80
   64 
   65         uint8_t         number;                 /* Position in a volume. */
   66         uint8_t         channel;                /* ATA channel number. */
   67         uint8_t         device;                 /* ATA device number. */
   68         uint64_t        id __packed;            /* Subdisk ID. */
   69 } __packed;
   70 
   71 struct promise_raid_conf {
   72         char            promise_id[24];
   73 #define PROMISE_MAGIC           "Promise Technology, Inc."
   74 #define FREEBSD_MAGIC           "FreeBSD ATA driver RAID "
   75 
   76         uint32_t        dummy_0;
   77         uint64_t        magic_0;
   78 #define PROMISE_MAGIC0(x)       (((uint64_t)(x.channel) << 48) | \
   79                                 ((uint64_t)(x.device != 0) << 56))
   80         uint16_t        magic_1;
   81         uint32_t        magic_2;
   82         uint8_t         filler1[470];
   83 
   84         uint32_t        integrity;
   85 #define PROMISE_I_VALID         0x00000080
   86 
   87         struct promise_raid_disk        disk;   /* This subdisk info. */
   88         uint32_t        disk_offset;            /* Subdisk offset. */
   89         uint32_t        disk_sectors;           /* Subdisk size */
   90         uint32_t        disk_rebuild;           /* Rebuild position. */
   91         uint16_t        generation;             /* Generation number. */
   92         uint8_t         status;                 /* Volume status. */
   93 #define PROMISE_S_VALID         0x01
   94 #define PROMISE_S_ONLINE        0x02
   95 #define PROMISE_S_INITED        0x04
   96 #define PROMISE_S_READY         0x08
   97 #define PROMISE_S_DEGRADED      0x10
   98 #define PROMISE_S_MARKED        0x20
   99 #define PROMISE_S_MIGRATING     0x40
  100 #define PROMISE_S_FUNCTIONAL    0x80
  101 
  102         uint8_t         type;                   /* Voluem type. */
  103 #define PROMISE_T_RAID0         0x00
  104 #define PROMISE_T_RAID1         0x01
  105 #define PROMISE_T_RAID3         0x02
  106 #define PROMISE_T_RAID5         0x04
  107 #define PROMISE_T_SPAN          0x08
  108 #define PROMISE_T_JBOD          0x10
  109 
  110         uint8_t         total_disks;            /* Disks in this volume. */
  111         uint8_t         stripe_shift;           /* Strip size. */
  112         uint8_t         array_width;            /* Number of RAID0 stripes. */
  113         uint8_t         array_number;           /* Global volume number. */
  114         uint32_t        total_sectors;          /* Volume size. */
  115         uint16_t        cylinders;              /* Volume geometry: C. */
  116         uint8_t         heads;                  /* Volume geometry: H. */
  117         uint8_t         sectors;                /* Volume geometry: S. */
  118         uint64_t        volume_id __packed;     /* Volume ID, */
  119         struct promise_raid_disk        disks[PROMISE_MAX_DISKS];
  120                                                 /* Subdisks in this volume. */
  121         char            name[32];               /* Volume label. */
  122 
  123         uint32_t        filler2[8];
  124         uint32_t        magic_3;        /* Something related to rebuild. */
  125         uint64_t        rebuild_lba64;  /* Per-volume rebuild position. */
  126         uint32_t        magic_4;
  127         uint32_t        magic_5;
  128         uint32_t        total_sectors_high;
  129         uint8_t         magic_6;
  130         uint8_t         sector_size;
  131         uint16_t        magic_7;
  132         uint32_t        magic_8[31];
  133         uint32_t        backup_time;
  134         uint16_t        magic_9;
  135         uint32_t        disk_offset_high;
  136         uint32_t        disk_sectors_high;
  137         uint32_t        disk_rebuild_high;
  138         uint16_t        magic_10;
  139         uint32_t        magic_11[3];
  140         uint32_t        filler3[284];
  141         uint32_t        checksum;
  142 } __packed;
  143 
  144 struct g_raid_md_promise_perdisk {
  145         int              pd_updated;
  146         int              pd_subdisks;
  147         struct promise_raid_conf        *pd_meta[PROMISE_MAX_SUBDISKS];
  148 };
  149 
  150 struct g_raid_md_promise_pervolume {
  151         struct promise_raid_conf        *pv_meta;
  152         uint64_t                         pv_id;
  153         uint16_t                         pv_generation;
  154         int                              pv_disks_present;
  155         int                              pv_started;
  156         struct callout                   pv_start_co;   /* STARTING state timer. */
  157 };
  158 
  159 static g_raid_md_create_t g_raid_md_create_promise;
  160 static g_raid_md_taste_t g_raid_md_taste_promise;
  161 static g_raid_md_event_t g_raid_md_event_promise;
  162 static g_raid_md_volume_event_t g_raid_md_volume_event_promise;
  163 static g_raid_md_ctl_t g_raid_md_ctl_promise;
  164 static g_raid_md_write_t g_raid_md_write_promise;
  165 static g_raid_md_fail_disk_t g_raid_md_fail_disk_promise;
  166 static g_raid_md_free_disk_t g_raid_md_free_disk_promise;
  167 static g_raid_md_free_volume_t g_raid_md_free_volume_promise;
  168 static g_raid_md_free_t g_raid_md_free_promise;
  169 
  170 static kobj_method_t g_raid_md_promise_methods[] = {
  171         KOBJMETHOD(g_raid_md_create,    g_raid_md_create_promise),
  172         KOBJMETHOD(g_raid_md_taste,     g_raid_md_taste_promise),
  173         KOBJMETHOD(g_raid_md_event,     g_raid_md_event_promise),
  174         KOBJMETHOD(g_raid_md_volume_event,      g_raid_md_volume_event_promise),
  175         KOBJMETHOD(g_raid_md_ctl,       g_raid_md_ctl_promise),
  176         KOBJMETHOD(g_raid_md_write,     g_raid_md_write_promise),
  177         KOBJMETHOD(g_raid_md_fail_disk, g_raid_md_fail_disk_promise),
  178         KOBJMETHOD(g_raid_md_free_disk, g_raid_md_free_disk_promise),
  179         KOBJMETHOD(g_raid_md_free_volume,       g_raid_md_free_volume_promise),
  180         KOBJMETHOD(g_raid_md_free,      g_raid_md_free_promise),
  181         { 0, 0 }
  182 };
  183 
  184 static struct g_raid_md_class g_raid_md_promise_class = {
  185         "Promise",
  186         g_raid_md_promise_methods,
  187         sizeof(struct g_raid_md_object),
  188         .mdc_enable = 1,
  189         .mdc_priority = 100
  190 };
  191 
  192 static void
  193 g_raid_md_promise_print(struct promise_raid_conf *meta)
  194 {
  195         int i;
  196 
  197         if (g_raid_debug < 1)
  198                 return;
  199 
  200         printf("********* ATA Promise Metadata *********\n");
  201         printf("promise_id          <%.24s>\n", meta->promise_id);
  202         printf("disk                %02x %02x %02x %02x %016jx\n",
  203             meta->disk.flags, meta->disk.number, meta->disk.channel,
  204             meta->disk.device, meta->disk.id);
  205         printf("disk_offset         %u\n", meta->disk_offset);
  206         printf("disk_sectors        %u\n", meta->disk_sectors);
  207         printf("disk_rebuild        %u\n", meta->disk_rebuild);
  208         printf("generation          %u\n", meta->generation);
  209         printf("status              0x%02x\n", meta->status);
  210         printf("type                %u\n", meta->type);
  211         printf("total_disks         %u\n", meta->total_disks);
  212         printf("stripe_shift        %u\n", meta->stripe_shift);
  213         printf("array_width         %u\n", meta->array_width);
  214         printf("array_number        %u\n", meta->array_number);
  215         printf("total_sectors       %u\n", meta->total_sectors);
  216         printf("cylinders           %u\n", meta->cylinders);
  217         printf("heads               %u\n", meta->heads);
  218         printf("sectors             %u\n", meta->sectors);
  219         printf("volume_id           0x%016jx\n", meta->volume_id);
  220         printf("disks:\n");
  221         for (i = 0; i < PROMISE_MAX_DISKS; i++ ) {
  222                 printf("                    %02x %02x %02x %02x %016jx\n",
  223                     meta->disks[i].flags, meta->disks[i].number,
  224                     meta->disks[i].channel, meta->disks[i].device,
  225                     meta->disks[i].id);
  226         }
  227         printf("name                <%.32s>\n", meta->name);
  228         printf("magic_3             0x%08x\n", meta->magic_3);
  229         printf("rebuild_lba64       %ju\n", meta->rebuild_lba64);
  230         printf("magic_4             0x%08x\n", meta->magic_4);
  231         printf("magic_5             0x%08x\n", meta->magic_5);
  232         printf("total_sectors_high  0x%08x\n", meta->total_sectors_high);
  233         printf("sector_size         %u\n", meta->sector_size);
  234         printf("backup_time         %d\n", meta->backup_time);
  235         printf("disk_offset_high    0x%08x\n", meta->disk_offset_high);
  236         printf("disk_sectors_high   0x%08x\n", meta->disk_sectors_high);
  237         printf("disk_rebuild_high   0x%08x\n", meta->disk_rebuild_high);
  238         printf("=================================================\n");
  239 }
  240 
  241 static struct promise_raid_conf *
  242 promise_meta_copy(struct promise_raid_conf *meta)
  243 {
  244         struct promise_raid_conf *nmeta;
  245 
  246         nmeta = malloc(sizeof(*nmeta), M_MD_PROMISE, M_WAITOK);
  247         memcpy(nmeta, meta, sizeof(*nmeta));
  248         return (nmeta);
  249 }
  250 
  251 static int
  252 promise_meta_find_disk(struct promise_raid_conf *meta, uint64_t id)
  253 {
  254         int pos;
  255 
  256         for (pos = 0; pos < meta->total_disks; pos++) {
  257                 if (meta->disks[pos].id == id)
  258                         return (pos);
  259         }
  260         return (-1);
  261 }
  262 
  263 static int
  264 promise_meta_unused_range(struct promise_raid_conf **metaarr, int nsd,
  265     off_t sectors, off_t *off, off_t *size)
  266 {
  267         off_t coff, csize, tmp;
  268         int i, j;
  269 
  270         sectors -= 131072;
  271         *off = 0;
  272         *size = 0;
  273         coff = 0;
  274         csize = sectors;
  275         i = 0;
  276         while (1) {
  277                 for (j = 0; j < nsd; j++) {
  278                         tmp = ((off_t)metaarr[j]->disk_offset_high << 32) +
  279                             metaarr[j]->disk_offset;
  280                         if (tmp >= coff)
  281                                 csize = MIN(csize, tmp - coff);
  282                 }
  283                 if (csize > *size) {
  284                         *off = coff;
  285                         *size = csize;
  286                 }
  287                 if (i >= nsd)
  288                         break;
  289                 coff = ((off_t)metaarr[i]->disk_offset_high << 32) +
  290                      metaarr[i]->disk_offset +
  291                     ((off_t)metaarr[i]->disk_sectors_high << 32) +
  292                      metaarr[i]->disk_sectors;
  293                 csize = sectors - coff;
  294                 i++;
  295         }
  296         return ((*size > 0) ? 1 : 0);
  297 }
  298 
  299 static int
  300 promise_meta_translate_disk(struct g_raid_volume *vol, int md_disk_pos)
  301 {
  302         int disk_pos, width;
  303 
  304         if (md_disk_pos >= 0 && vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E) {
  305                 width = vol->v_disks_count / 2;
  306                 disk_pos = (md_disk_pos / width) +
  307                     (md_disk_pos % width) * width;
  308         } else
  309                 disk_pos = md_disk_pos;
  310         return (disk_pos);
  311 }
  312 
  313 static void
  314 promise_meta_get_name(struct promise_raid_conf *meta, char *buf)
  315 {
  316         int i;
  317 
  318         strncpy(buf, meta->name, 32);
  319         buf[32] = 0;
  320         for (i = 31; i >= 0; i--) {
  321                 if (buf[i] > 0x20)
  322                         break;
  323                 buf[i] = 0;
  324         }
  325 }
  326 
  327 static void
  328 promise_meta_put_name(struct promise_raid_conf *meta, char *buf)
  329 {
  330 
  331         memset(meta->name, 0x20, 32);
  332         memcpy(meta->name, buf, MIN(strlen(buf), 32));
  333 }
  334 
  335 static int
  336 promise_meta_read(struct g_consumer *cp, struct promise_raid_conf **metaarr)
  337 {
  338         struct g_provider *pp;
  339         struct promise_raid_conf *meta;
  340         char *buf;
  341         int error, i, subdisks;
  342         uint32_t checksum, *ptr;
  343 
  344         pp = cp->provider;
  345         subdisks = 0;
  346 
  347         if (pp->sectorsize * 4 > maxphys) {
  348                 G_RAID_DEBUG(1, "%s: Blocksize is too big.", pp->name);
  349                 return (subdisks);
  350         }
  351 next:
  352         /* Read metadata block. */
  353         buf = g_read_data(cp, pp->mediasize - pp->sectorsize *
  354             (63 - subdisks * PROMISE_META_OFFSET),
  355             pp->sectorsize * 4, &error);
  356         if (buf == NULL) {
  357                 G_RAID_DEBUG(1, "Cannot read metadata from %s (error=%d).",
  358                     pp->name, error);
  359                 return (subdisks);
  360         }
  361         meta = (struct promise_raid_conf *)buf;
  362 
  363         /* Check if this is an Promise RAID struct */
  364         if (strncmp(meta->promise_id, PROMISE_MAGIC, strlen(PROMISE_MAGIC)) &&
  365             strncmp(meta->promise_id, FREEBSD_MAGIC, strlen(FREEBSD_MAGIC))) {
  366                 if (subdisks == 0)
  367                         G_RAID_DEBUG(1,
  368                             "Promise signature check failed on %s", pp->name);
  369                 g_free(buf);
  370                 return (subdisks);
  371         }
  372         meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK);
  373         memcpy(meta, buf, MIN(sizeof(*meta), pp->sectorsize * 4));
  374         g_free(buf);
  375 
  376         /* Check metadata checksum. */
  377         for (checksum = 0, ptr = (uint32_t *)meta, i = 0; i < 511; i++)
  378                 checksum += *ptr++;
  379         if (checksum != meta->checksum) {
  380                 G_RAID_DEBUG(1, "Promise checksum check failed on %s", pp->name);
  381                 free(meta, M_MD_PROMISE);
  382                 return (subdisks);
  383         }
  384 
  385         if ((meta->integrity & PROMISE_I_VALID) == 0) {
  386                 G_RAID_DEBUG(1, "Promise metadata is invalid on %s", pp->name);
  387                 free(meta, M_MD_PROMISE);
  388                 return (subdisks);
  389         }
  390 
  391         if (meta->total_disks > PROMISE_MAX_DISKS) {
  392                 G_RAID_DEBUG(1, "Wrong number of disks on %s (%d)",
  393                     pp->name, meta->total_disks);
  394                 free(meta, M_MD_PROMISE);
  395                 return (subdisks);
  396         }
  397 
  398         /* Remove filler garbage from fields used in newer metadata. */
  399         if (meta->disk_offset_high == 0x8b8c8d8e &&
  400             meta->disk_sectors_high == 0x8788898a &&
  401             meta->disk_rebuild_high == 0x83848586) {
  402                 meta->disk_offset_high = 0;
  403                 meta->disk_sectors_high = 0;
  404                 if (meta->disk_rebuild == UINT32_MAX)
  405                         meta->disk_rebuild_high = UINT32_MAX;
  406                 else
  407                         meta->disk_rebuild_high = 0;
  408                 if (meta->total_sectors_high == 0x15161718) {
  409                         meta->total_sectors_high = 0;
  410                         meta->backup_time = 0;
  411                         if (meta->rebuild_lba64 == 0x2122232425262728)
  412                                 meta->rebuild_lba64 = UINT64_MAX;
  413                 }
  414         }
  415         if (meta->sector_size < 1 || meta->sector_size > 8)
  416                 meta->sector_size = 1;
  417 
  418         /* Save this part and look for next. */
  419         *metaarr = meta;
  420         metaarr++;
  421         subdisks++;
  422         if (subdisks < PROMISE_MAX_SUBDISKS)
  423                 goto next;
  424 
  425         return (subdisks);
  426 }
  427 
  428 static int
  429 promise_meta_write(struct g_consumer *cp,
  430     struct promise_raid_conf **metaarr, int nsd)
  431 {
  432         struct g_provider *pp;
  433         struct promise_raid_conf *meta;
  434         char *buf;
  435         off_t off, size;
  436         int error, i, subdisk, fake;
  437         uint32_t checksum, *ptr;
  438 
  439         pp = cp->provider;
  440         subdisk = 0;
  441         fake = 0;
  442 next:
  443         buf = malloc(pp->sectorsize * 4, M_MD_PROMISE, M_WAITOK | M_ZERO);
  444         meta = NULL;
  445         if (subdisk < nsd) {
  446                 meta = metaarr[subdisk];
  447         } else if (!fake && promise_meta_unused_range(metaarr, nsd,
  448             cp->provider->mediasize / cp->provider->sectorsize,
  449             &off, &size)) {
  450                 /* Optionally add record for unused space. */
  451                 meta = (struct promise_raid_conf *)buf;
  452                 memcpy(&meta->promise_id[0], PROMISE_MAGIC,
  453                     sizeof(PROMISE_MAGIC) - 1);
  454                 meta->dummy_0 = 0x00020000;
  455                 meta->integrity = PROMISE_I_VALID;
  456                 meta->disk.flags = PROMISE_F_ONLINE | PROMISE_F_VALID;
  457                 meta->disk.number = 0xff;
  458                 arc4rand(&meta->disk.id, sizeof(meta->disk.id), 0);
  459                 meta->disk_offset_high = off >> 32;
  460                 meta->disk_offset = (uint32_t)off;
  461                 meta->disk_sectors_high = size >> 32;
  462                 meta->disk_sectors = (uint32_t)size;
  463                 meta->disk_rebuild_high = UINT32_MAX;
  464                 meta->disk_rebuild = UINT32_MAX;
  465                 fake = 1;
  466         }
  467         if (meta != NULL) {
  468                 /* Recalculate checksum for case if metadata were changed. */
  469                 meta->checksum = 0;
  470                 for (checksum = 0, ptr = (uint32_t *)meta, i = 0; i < 511; i++)
  471                         checksum += *ptr++;
  472                 meta->checksum = checksum;
  473                 memcpy(buf, meta, MIN(pp->sectorsize * 4, sizeof(*meta)));
  474         }
  475         error = g_write_data(cp, pp->mediasize - pp->sectorsize *
  476             (63 - subdisk * PROMISE_META_OFFSET),
  477             buf, pp->sectorsize * 4);
  478         if (error != 0) {
  479                 G_RAID_DEBUG(1, "Cannot write metadata to %s (error=%d).",
  480                     pp->name, error);
  481         }
  482         free(buf, M_MD_PROMISE);
  483 
  484         subdisk++;
  485         if (subdisk < PROMISE_MAX_SUBDISKS)
  486                 goto next;
  487 
  488         return (error);
  489 }
  490 
  491 static int
  492 promise_meta_erase(struct g_consumer *cp)
  493 {
  494         struct g_provider *pp;
  495         char *buf;
  496         int error, subdisk;
  497 
  498         pp = cp->provider;
  499         buf = malloc(4 * pp->sectorsize, M_MD_PROMISE, M_WAITOK | M_ZERO);
  500         for (subdisk = 0; subdisk < PROMISE_MAX_SUBDISKS; subdisk++) {
  501                 error = g_write_data(cp, pp->mediasize - pp->sectorsize *
  502                     (63 - subdisk * PROMISE_META_OFFSET),
  503                     buf, 4 * pp->sectorsize);
  504                 if (error != 0) {
  505                         G_RAID_DEBUG(1, "Cannot erase metadata on %s (error=%d).",
  506                             pp->name, error);
  507                 }
  508         }
  509         free(buf, M_MD_PROMISE);
  510         return (error);
  511 }
  512 
  513 static int
  514 promise_meta_write_spare(struct g_consumer *cp)
  515 {
  516         struct promise_raid_conf *meta;
  517         off_t tmp;
  518         int error;
  519 
  520         meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK | M_ZERO);
  521         memcpy(&meta->promise_id[0], PROMISE_MAGIC, sizeof(PROMISE_MAGIC) - 1);
  522         meta->dummy_0 = 0x00020000;
  523         meta->integrity = PROMISE_I_VALID;
  524         meta->disk.flags = PROMISE_F_SPARE | PROMISE_F_ONLINE | PROMISE_F_VALID;
  525         meta->disk.number = 0xff;
  526         arc4rand(&meta->disk.id, sizeof(meta->disk.id), 0);
  527         tmp = cp->provider->mediasize / cp->provider->sectorsize - 131072;
  528         meta->disk_sectors_high = tmp >> 32;
  529         meta->disk_sectors = (uint32_t)tmp;
  530         meta->disk_rebuild_high = UINT32_MAX;
  531         meta->disk_rebuild = UINT32_MAX;
  532         error = promise_meta_write(cp, &meta, 1);
  533         free(meta, M_MD_PROMISE);
  534         return (error);
  535 }
  536 
  537 static struct g_raid_volume *
  538 g_raid_md_promise_get_volume(struct g_raid_softc *sc, uint64_t id)
  539 {
  540         struct g_raid_volume    *vol;
  541         struct g_raid_md_promise_pervolume *pv;
  542 
  543         TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
  544                 pv = vol->v_md_data;
  545                 if (pv->pv_id == id)
  546                         break;
  547         }
  548         return (vol);
  549 }
  550 
  551 static int
  552 g_raid_md_promise_purge_volumes(struct g_raid_softc *sc)
  553 {
  554         struct g_raid_volume    *vol, *tvol;
  555         struct g_raid_md_promise_pervolume *pv;
  556         int i, res;
  557 
  558         res = 0;
  559         TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tvol) {
  560                 pv = vol->v_md_data;
  561                 if (!pv->pv_started || vol->v_stopping)
  562                         continue;
  563                 for (i = 0; i < vol->v_disks_count; i++) {
  564                         if (vol->v_subdisks[i].sd_state != G_RAID_SUBDISK_S_NONE)
  565                                 break;
  566                 }
  567                 if (i >= vol->v_disks_count) {
  568                         g_raid_destroy_volume(vol);
  569                         res = 1;
  570                 }
  571         }
  572         return (res);
  573 }
  574 
  575 static int
  576 g_raid_md_promise_purge_disks(struct g_raid_softc *sc)
  577 {
  578         struct g_raid_disk      *disk, *tdisk;
  579         struct g_raid_volume    *vol;
  580         struct g_raid_md_promise_perdisk *pd;
  581         int i, j, res;
  582 
  583         res = 0;
  584         TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
  585                 if (disk->d_state == G_RAID_DISK_S_SPARE)
  586                         continue;
  587                 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
  588 
  589                 /* Scan for deleted volumes. */
  590                 for (i = 0; i < pd->pd_subdisks; ) {
  591                         vol = g_raid_md_promise_get_volume(sc,
  592                             pd->pd_meta[i]->volume_id);
  593                         if (vol != NULL && !vol->v_stopping) {
  594                                 i++;
  595                                 continue;
  596                         }
  597                         free(pd->pd_meta[i], M_MD_PROMISE);
  598                         for (j = i; j < pd->pd_subdisks - 1; j++)
  599                                 pd->pd_meta[j] = pd->pd_meta[j + 1];
  600                         pd->pd_meta[pd->pd_subdisks - 1] = NULL;
  601                         pd->pd_subdisks--;
  602                         pd->pd_updated = 1;
  603                 }
  604 
  605                 /* If there is no metadata left - erase and delete disk. */
  606                 if (pd->pd_subdisks == 0) {
  607                         promise_meta_erase(disk->d_consumer);
  608                         g_raid_destroy_disk(disk);
  609                         res = 1;
  610                 }
  611         }
  612         return (res);
  613 }
  614 
  615 static int
  616 g_raid_md_promise_supported(int level, int qual, int disks, int force)
  617 {
  618 
  619         if (disks > PROMISE_MAX_DISKS)
  620                 return (0);
  621         switch (level) {
  622         case G_RAID_VOLUME_RL_RAID0:
  623                 if (disks < 1)
  624                         return (0);
  625                 if (!force && disks < 2)
  626                         return (0);
  627                 break;
  628         case G_RAID_VOLUME_RL_RAID1:
  629                 if (disks < 1)
  630                         return (0);
  631                 if (!force && (disks != 2))
  632                         return (0);
  633                 break;
  634         case G_RAID_VOLUME_RL_RAID1E:
  635                 if (disks < 2)
  636                         return (0);
  637                 if (disks % 2 != 0)
  638                         return (0);
  639                 if (!force && (disks != 4))
  640                         return (0);
  641                 break;
  642         case G_RAID_VOLUME_RL_SINGLE:
  643                 if (disks != 1)
  644                         return (0);
  645                 break;
  646         case G_RAID_VOLUME_RL_CONCAT:
  647                 if (disks < 2)
  648                         return (0);
  649                 break;
  650         case G_RAID_VOLUME_RL_RAID5:
  651                 if (disks < 3)
  652                         return (0);
  653                 if (qual != G_RAID_VOLUME_RLQ_R5LA)
  654                         return (0);
  655                 break;
  656         default:
  657                 return (0);
  658         }
  659         if (level != G_RAID_VOLUME_RL_RAID5 && qual != G_RAID_VOLUME_RLQ_NONE)
  660                 return (0);
  661         return (1);
  662 }
  663 
  664 static int
  665 g_raid_md_promise_start_disk(struct g_raid_disk *disk, int sdn,
  666     struct g_raid_volume *vol)
  667 {
  668         struct g_raid_softc *sc;
  669         struct g_raid_subdisk *sd;
  670         struct g_raid_md_promise_perdisk *pd;
  671         struct g_raid_md_promise_pervolume *pv;
  672         struct promise_raid_conf *meta;
  673         off_t eoff, esize, size;
  674         int disk_pos, md_disk_pos, i, resurrection = 0;
  675 
  676         sc = disk->d_softc;
  677         pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
  678 
  679         pv = vol->v_md_data;
  680         meta = pv->pv_meta;
  681 
  682         if (sdn >= 0) {
  683                 /* Find disk position in metadata by its serial. */
  684                 md_disk_pos = promise_meta_find_disk(meta, pd->pd_meta[sdn]->disk.id);
  685                 /* For RAID0+1 we need to translate order. */
  686                 disk_pos = promise_meta_translate_disk(vol, md_disk_pos);
  687         } else {
  688                 md_disk_pos = -1;
  689                 disk_pos = -1;
  690         }
  691         if (disk_pos < 0) {
  692                 G_RAID_DEBUG1(1, sc, "Disk %s is not part of the volume %s",
  693                     g_raid_get_diskname(disk), vol->v_name);
  694                 /* Failed stale disk is useless for us. */
  695                 if (sdn >= 0 &&
  696                     pd->pd_meta[sdn]->disk.flags & PROMISE_F_DOWN) {
  697                         g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE_FAILED);
  698                         return (0);
  699                 }
  700                 /* If we were given specific metadata subdisk - erase it. */
  701                 if (sdn >= 0) {
  702                         free(pd->pd_meta[sdn], M_MD_PROMISE);
  703                         for (i = sdn; i < pd->pd_subdisks - 1; i++)
  704                                 pd->pd_meta[i] = pd->pd_meta[i + 1];
  705                         pd->pd_meta[pd->pd_subdisks - 1] = NULL;
  706                         pd->pd_subdisks--;
  707                 }
  708                 /* If we are in the start process, that's all for now. */
  709                 if (!pv->pv_started)
  710                         goto nofit;
  711                 /*
  712                  * If we have already started - try to get use of the disk.
  713                  * Try to replace OFFLINE disks first, then FAILED.
  714                  */
  715                 promise_meta_unused_range(pd->pd_meta, pd->pd_subdisks,
  716                     disk->d_consumer->provider->mediasize /
  717                     disk->d_consumer->provider->sectorsize,
  718                     &eoff, &esize);
  719                 if (esize == 0) {
  720                         G_RAID_DEBUG1(1, sc, "No free space on disk %s",
  721                             g_raid_get_diskname(disk));
  722                         goto nofit;
  723                 }
  724                 size = INT64_MAX;
  725                 for (i = 0; i < vol->v_disks_count; i++) {
  726                         sd = &vol->v_subdisks[i];
  727                         if (sd->sd_state != G_RAID_SUBDISK_S_NONE)
  728                                 size = sd->sd_size;
  729                         if (sd->sd_state <= G_RAID_SUBDISK_S_FAILED &&
  730                             (disk_pos < 0 ||
  731                              vol->v_subdisks[i].sd_state < sd->sd_state))
  732                                 disk_pos = i;
  733                 }
  734                 if (disk_pos >= 0 &&
  735                     vol->v_raid_level != G_RAID_VOLUME_RL_CONCAT &&
  736                     (off_t)esize * 512 < size) {
  737                         G_RAID_DEBUG1(1, sc, "Disk %s free space "
  738                             "is too small (%ju < %ju)",
  739                             g_raid_get_diskname(disk),
  740                             (off_t)esize * 512, size);
  741                         disk_pos = -1;
  742                 }
  743                 if (disk_pos >= 0) {
  744                         if (vol->v_raid_level != G_RAID_VOLUME_RL_CONCAT)
  745                                 esize = size / 512;
  746                         /* For RAID0+1 we need to translate order. */
  747                         md_disk_pos = promise_meta_translate_disk(vol, disk_pos);
  748                 } else {
  749 nofit:
  750                         if (pd->pd_subdisks == 0) {
  751                                 g_raid_change_disk_state(disk,
  752                                     G_RAID_DISK_S_SPARE);
  753                         }
  754                         return (0);
  755                 }
  756                 G_RAID_DEBUG1(1, sc, "Disk %s takes pos %d in the volume %s",
  757                     g_raid_get_diskname(disk), disk_pos, vol->v_name);
  758                 resurrection = 1;
  759         }
  760 
  761         sd = &vol->v_subdisks[disk_pos];
  762 
  763         if (resurrection && sd->sd_disk != NULL) {
  764                 g_raid_change_disk_state(sd->sd_disk,
  765                     G_RAID_DISK_S_STALE_FAILED);
  766                 TAILQ_REMOVE(&sd->sd_disk->d_subdisks,
  767                     sd, sd_next);
  768         }
  769         vol->v_subdisks[disk_pos].sd_disk = disk;
  770         TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
  771 
  772         /* Welcome the new disk. */
  773         if (resurrection)
  774                 g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
  775         else if (meta->disks[md_disk_pos].flags & PROMISE_F_DOWN)
  776                 g_raid_change_disk_state(disk, G_RAID_DISK_S_FAILED);
  777         else
  778                 g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
  779 
  780         if (resurrection) {
  781                 sd->sd_offset = (off_t)eoff * 512;
  782                 sd->sd_size = (off_t)esize * 512;
  783         } else {
  784                 sd->sd_offset = (((off_t)pd->pd_meta[sdn]->disk_offset_high
  785                     << 32) + pd->pd_meta[sdn]->disk_offset) * 512;
  786                 sd->sd_size = (((off_t)pd->pd_meta[sdn]->disk_sectors_high
  787                     << 32) + pd->pd_meta[sdn]->disk_sectors) * 512;
  788         }
  789 
  790         if (resurrection) {
  791                 /* Stale disk, almost same as new. */
  792                 g_raid_change_subdisk_state(sd,
  793                     G_RAID_SUBDISK_S_NEW);
  794         } else if (meta->disks[md_disk_pos].flags & PROMISE_F_DOWN) {
  795                 /* Failed disk. */
  796                 g_raid_change_subdisk_state(sd,
  797                     G_RAID_SUBDISK_S_FAILED);
  798         } else if (meta->disks[md_disk_pos].flags & PROMISE_F_REDIR) {
  799                 /* Rebuilding disk. */
  800                 g_raid_change_subdisk_state(sd,
  801                     G_RAID_SUBDISK_S_REBUILD);
  802                 if (pd->pd_meta[sdn]->generation != meta->generation)
  803                         sd->sd_rebuild_pos = 0;
  804                 else {
  805                         sd->sd_rebuild_pos =
  806                             (((off_t)pd->pd_meta[sdn]->disk_rebuild_high << 32) +
  807                              pd->pd_meta[sdn]->disk_rebuild) * 512;
  808                 }
  809         } else if (!(meta->disks[md_disk_pos].flags & PROMISE_F_ONLINE)) {
  810                 /* Rebuilding disk. */
  811                 g_raid_change_subdisk_state(sd,
  812                     G_RAID_SUBDISK_S_NEW);
  813         } else if (pd->pd_meta[sdn]->generation != meta->generation ||
  814             (meta->status & PROMISE_S_MARKED)) {
  815                 /* Stale disk or dirty volume (unclean shutdown). */
  816                 g_raid_change_subdisk_state(sd,
  817                     G_RAID_SUBDISK_S_STALE);
  818         } else {
  819                 /* Up to date disk. */
  820                 g_raid_change_subdisk_state(sd,
  821                     G_RAID_SUBDISK_S_ACTIVE);
  822         }
  823         g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
  824             G_RAID_EVENT_SUBDISK);
  825 
  826         return (resurrection);
  827 }
  828 
  829 static void
  830 g_raid_md_promise_refill(struct g_raid_softc *sc)
  831 {
  832         struct g_raid_volume *vol;
  833         struct g_raid_subdisk *sd;
  834         struct g_raid_disk *disk;
  835         struct g_raid_md_object *md;
  836         struct g_raid_md_promise_perdisk *pd;
  837         struct g_raid_md_promise_pervolume *pv;
  838         int update, updated, i, bad;
  839 
  840         md = sc->sc_md;
  841 restart:
  842         updated = 0;
  843         TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
  844                 pv = vol->v_md_data;
  845                 if (!pv->pv_started || vol->v_stopping)
  846                         continue;
  847 
  848                 /* Search for subdisk that needs replacement. */
  849                 bad = 0;
  850                 for (i = 0; i < vol->v_disks_count; i++) {
  851                         sd = &vol->v_subdisks[i];
  852                         if (sd->sd_state == G_RAID_SUBDISK_S_NONE ||
  853                             sd->sd_state == G_RAID_SUBDISK_S_FAILED)
  854                                 bad = 1;
  855                 }
  856                 if (!bad)
  857                         continue;
  858 
  859                 G_RAID_DEBUG1(1, sc, "Volume %s is not complete, "
  860                     "trying to refill.", vol->v_name);
  861 
  862                 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
  863                         /* Skip failed. */
  864                         if (disk->d_state < G_RAID_DISK_S_SPARE)
  865                                 continue;
  866                         /* Skip already used by this volume. */
  867                         for (i = 0; i < vol->v_disks_count; i++) {
  868                                 sd = &vol->v_subdisks[i];
  869                                 if (sd->sd_disk == disk)
  870                                         break;
  871                         }
  872                         if (i < vol->v_disks_count)
  873                                 continue;
  874 
  875                         /* Try to use disk if it has empty extents. */
  876                         pd = disk->d_md_data;
  877                         if (pd->pd_subdisks < PROMISE_MAX_SUBDISKS) {
  878                                 update =
  879                                     g_raid_md_promise_start_disk(disk, -1, vol);
  880                         } else
  881                                 update = 0;
  882                         if (update) {
  883                                 updated = 1;
  884                                 g_raid_md_write_promise(md, vol, NULL, disk);
  885                                 break;
  886                         }
  887                 }
  888         }
  889         if (updated)
  890                 goto restart;
  891 }
  892 
  893 static void
  894 g_raid_md_promise_start(struct g_raid_volume *vol)
  895 {
  896         struct g_raid_softc *sc;
  897         struct g_raid_subdisk *sd;
  898         struct g_raid_disk *disk;
  899         struct g_raid_md_object *md;
  900         struct g_raid_md_promise_perdisk *pd;
  901         struct g_raid_md_promise_pervolume *pv;
  902         struct promise_raid_conf *meta;
  903         u_int i;
  904 
  905         sc = vol->v_softc;
  906         md = sc->sc_md;
  907         pv = vol->v_md_data;
  908         meta = pv->pv_meta;
  909 
  910         vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_NONE;
  911         if (meta->type == PROMISE_T_RAID0)
  912                 vol->v_raid_level = G_RAID_VOLUME_RL_RAID0;
  913         else if (meta->type == PROMISE_T_RAID1) {
  914                 if (meta->array_width == 1)
  915                         vol->v_raid_level = G_RAID_VOLUME_RL_RAID1;
  916                 else
  917                         vol->v_raid_level = G_RAID_VOLUME_RL_RAID1E;
  918         } else if (meta->type == PROMISE_T_RAID3)
  919                 vol->v_raid_level = G_RAID_VOLUME_RL_RAID3;
  920         else if (meta->type == PROMISE_T_RAID5) {
  921                 vol->v_raid_level = G_RAID_VOLUME_RL_RAID5;
  922                 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_R5LA;
  923         } else if (meta->type == PROMISE_T_SPAN)
  924                 vol->v_raid_level = G_RAID_VOLUME_RL_CONCAT;
  925         else if (meta->type == PROMISE_T_JBOD)
  926                 vol->v_raid_level = G_RAID_VOLUME_RL_SINGLE;
  927         else
  928                 vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN;
  929         vol->v_strip_size = 512 << meta->stripe_shift; //ZZZ
  930         vol->v_disks_count = meta->total_disks;
  931         vol->v_mediasize = (off_t)meta->total_sectors * 512; //ZZZ
  932         if (meta->total_sectors_high < 256) /* If value looks sane. */
  933                 vol->v_mediasize +=
  934                     ((off_t)meta->total_sectors_high << 32) * 512; //ZZZ
  935         vol->v_sectorsize = 512 * meta->sector_size;
  936         for (i = 0; i < vol->v_disks_count; i++) {
  937                 sd = &vol->v_subdisks[i];
  938                 sd->sd_offset = (((off_t)meta->disk_offset_high << 32) +
  939                     meta->disk_offset) * 512;
  940                 sd->sd_size = (((off_t)meta->disk_sectors_high << 32) +
  941                     meta->disk_sectors) * 512;
  942         }
  943         g_raid_start_volume(vol);
  944 
  945         /* Make all disks found till the moment take their places. */
  946         TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
  947                 pd = disk->d_md_data;
  948                 for (i = 0; i < pd->pd_subdisks; i++) {
  949                         if (pd->pd_meta[i]->volume_id == meta->volume_id)
  950                                 g_raid_md_promise_start_disk(disk, i, vol);
  951                 }
  952         }
  953 
  954         pv->pv_started = 1;
  955         callout_stop(&pv->pv_start_co);
  956         G_RAID_DEBUG1(0, sc, "Volume started.");
  957         g_raid_md_write_promise(md, vol, NULL, NULL);
  958 
  959         /* Pickup any STALE/SPARE disks to refill array if needed. */
  960         g_raid_md_promise_refill(sc);
  961 
  962         g_raid_event_send(vol, G_RAID_VOLUME_E_START, G_RAID_EVENT_VOLUME);
  963 }
  964 
  965 static void
  966 g_raid_promise_go(void *arg)
  967 {
  968         struct g_raid_volume *vol;
  969         struct g_raid_softc *sc;
  970         struct g_raid_md_promise_pervolume *pv;
  971 
  972         vol = arg;
  973         pv = vol->v_md_data;
  974         sc = vol->v_softc;
  975         if (!pv->pv_started) {
  976                 G_RAID_DEBUG1(0, sc, "Force volume start due to timeout.");
  977                 g_raid_event_send(vol, G_RAID_VOLUME_E_STARTMD,
  978                     G_RAID_EVENT_VOLUME);
  979         }
  980 }
  981 
  982 static void
  983 g_raid_md_promise_new_disk(struct g_raid_disk *disk)
  984 {
  985         struct g_raid_softc *sc;
  986         struct g_raid_md_object *md;
  987         struct promise_raid_conf *pdmeta;
  988         struct g_raid_md_promise_perdisk *pd;
  989         struct g_raid_md_promise_pervolume *pv;
  990         struct g_raid_volume *vol;
  991         int i;
  992         char buf[33];
  993 
  994         sc = disk->d_softc;
  995         md = sc->sc_md;
  996         pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
  997 
  998         if (pd->pd_subdisks == 0) {
  999                 g_raid_change_disk_state(disk, G_RAID_DISK_S_SPARE);
 1000                 g_raid_md_promise_refill(sc);
 1001                 return;
 1002         }
 1003 
 1004         for (i = 0; i < pd->pd_subdisks; i++) {
 1005                 pdmeta = pd->pd_meta[i];
 1006 
 1007                 /* Look for volume with matching ID. */
 1008                 vol = g_raid_md_promise_get_volume(sc, pdmeta->volume_id);
 1009                 if (vol == NULL) {
 1010                         promise_meta_get_name(pdmeta, buf);
 1011                         vol = g_raid_create_volume(sc, buf, pdmeta->array_number);
 1012                         pv = malloc(sizeof(*pv), M_MD_PROMISE, M_WAITOK | M_ZERO);
 1013                         pv->pv_id = pdmeta->volume_id;
 1014                         vol->v_md_data = pv;
 1015                         callout_init(&pv->pv_start_co, 1);
 1016                         callout_reset(&pv->pv_start_co,
 1017                             g_raid_start_timeout * hz,
 1018                             g_raid_promise_go, vol);
 1019                 } else
 1020                         pv = vol->v_md_data;
 1021 
 1022                 /* If we haven't started yet - check metadata freshness. */
 1023                 if (pv->pv_meta == NULL || !pv->pv_started) {
 1024                         if (pv->pv_meta == NULL ||
 1025                             ((int16_t)(pdmeta->generation - pv->pv_generation)) > 0) {
 1026                                 G_RAID_DEBUG1(1, sc, "Newer disk");
 1027                                 if (pv->pv_meta != NULL)
 1028                                         free(pv->pv_meta, M_MD_PROMISE);
 1029                                 pv->pv_meta = promise_meta_copy(pdmeta);
 1030                                 pv->pv_generation = pv->pv_meta->generation;
 1031                                 pv->pv_disks_present = 1;
 1032                         } else if (pdmeta->generation == pv->pv_generation) {
 1033                                 pv->pv_disks_present++;
 1034                                 G_RAID_DEBUG1(1, sc, "Matching disk (%d of %d up)",
 1035                                     pv->pv_disks_present,
 1036                                     pv->pv_meta->total_disks);
 1037                         } else {
 1038                                 G_RAID_DEBUG1(1, sc, "Older disk");
 1039                         }
 1040                 }
 1041         }
 1042 
 1043         for (i = 0; i < pd->pd_subdisks; i++) {
 1044                 pdmeta = pd->pd_meta[i];
 1045 
 1046                 /* Look for volume with matching ID. */
 1047                 vol = g_raid_md_promise_get_volume(sc, pdmeta->volume_id);
 1048                 if (vol == NULL)
 1049                         continue;
 1050                 pv = vol->v_md_data;
 1051 
 1052                 if (pv->pv_started) {
 1053                         if (g_raid_md_promise_start_disk(disk, i, vol))
 1054                                 g_raid_md_write_promise(md, vol, NULL, NULL);
 1055                 } else {
 1056                         /* If we collected all needed disks - start array. */
 1057                         if (pv->pv_disks_present == pv->pv_meta->total_disks)
 1058                                 g_raid_md_promise_start(vol);
 1059                 }
 1060         }
 1061 }
 1062 
 1063 static int
 1064 g_raid_md_create_promise(struct g_raid_md_object *md, struct g_class *mp,
 1065     struct g_geom **gp)
 1066 {
 1067         struct g_geom *geom;
 1068         struct g_raid_softc *sc;
 1069 
 1070         /* Search for existing node. */
 1071         LIST_FOREACH(geom, &mp->geom, geom) {
 1072                 sc = geom->softc;
 1073                 if (sc == NULL)
 1074                         continue;
 1075                 if (sc->sc_stopping != 0)
 1076                         continue;
 1077                 if (sc->sc_md->mdo_class != md->mdo_class)
 1078                         continue;
 1079                 break;
 1080         }
 1081         if (geom != NULL) {
 1082                 *gp = geom;
 1083                 return (G_RAID_MD_TASTE_EXISTING);
 1084         }
 1085 
 1086         /* Create new one if not found. */
 1087         sc = g_raid_create_node(mp, "Promise", md);
 1088         if (sc == NULL)
 1089                 return (G_RAID_MD_TASTE_FAIL);
 1090         md->mdo_softc = sc;
 1091         *gp = sc->sc_geom;
 1092         return (G_RAID_MD_TASTE_NEW);
 1093 }
 1094 
 1095 static int
 1096 g_raid_md_taste_promise(struct g_raid_md_object *md, struct g_class *mp,
 1097                               struct g_consumer *cp, struct g_geom **gp)
 1098 {
 1099         struct g_consumer *rcp;
 1100         struct g_provider *pp;
 1101         struct g_raid_softc *sc;
 1102         struct g_raid_disk *disk;
 1103         struct promise_raid_conf *metaarr[4];
 1104         struct g_raid_md_promise_perdisk *pd;
 1105         struct g_geom *geom;
 1106         int i, j, result, len, subdisks;
 1107         char name[16];
 1108         uint16_t vendor;
 1109 
 1110         G_RAID_DEBUG(1, "Tasting Promise on %s", cp->provider->name);
 1111         pp = cp->provider;
 1112 
 1113         /* Read metadata from device. */
 1114         g_topology_unlock();
 1115         vendor = 0xffff;
 1116         len = sizeof(vendor);
 1117         if (pp->geom->rank == 1)
 1118                 g_io_getattr("GEOM::hba_vendor", cp, &len, &vendor);
 1119         subdisks = promise_meta_read(cp, metaarr);
 1120         g_topology_lock();
 1121         if (subdisks == 0) {
 1122                 if (g_raid_aggressive_spare) {
 1123                         if (vendor == 0x105a || vendor == 0x1002) {
 1124                                 G_RAID_DEBUG(1,
 1125                                     "No Promise metadata, forcing spare.");
 1126                                 goto search;
 1127                         } else {
 1128                                 G_RAID_DEBUG(1,
 1129                                     "Promise/ATI vendor mismatch "
 1130                                     "0x%04x != 0x105a/0x1002",
 1131                                     vendor);
 1132                         }
 1133                 }
 1134                 return (G_RAID_MD_TASTE_FAIL);
 1135         }
 1136 
 1137         /* Metadata valid. Print it. */
 1138         for (i = 0; i < subdisks; i++)
 1139                 g_raid_md_promise_print(metaarr[i]);
 1140 
 1141         /* Purge meaningless (empty/spare) records. */
 1142         for (i = 0; i < subdisks; ) {
 1143                 if (metaarr[i]->disk.flags & PROMISE_F_ASSIGNED) {
 1144                         i++;
 1145                         continue;
 1146                 }
 1147                 free(metaarr[i], M_MD_PROMISE);
 1148                 for (j = i; j < subdisks - 1; j++)
 1149                         metaarr[i] = metaarr[j + 1];
 1150                 metaarr[subdisks - 1] = NULL;
 1151                 subdisks--;
 1152         }
 1153 
 1154 search:
 1155         /* Search for matching node. */
 1156         sc = NULL;
 1157         LIST_FOREACH(geom, &mp->geom, geom) {
 1158                 sc = geom->softc;
 1159                 if (sc == NULL)
 1160                         continue;
 1161                 if (sc->sc_stopping != 0)
 1162                         continue;
 1163                 if (sc->sc_md->mdo_class != md->mdo_class)
 1164                         continue;
 1165                 break;
 1166         }
 1167 
 1168         /* Found matching node. */
 1169         if (geom != NULL) {
 1170                 G_RAID_DEBUG(1, "Found matching array %s", sc->sc_name);
 1171                 result = G_RAID_MD_TASTE_EXISTING;
 1172 
 1173         } else { /* Not found matching node -- create one. */
 1174                 result = G_RAID_MD_TASTE_NEW;
 1175                 snprintf(name, sizeof(name), "Promise");
 1176                 sc = g_raid_create_node(mp, name, md);
 1177                 md->mdo_softc = sc;
 1178                 geom = sc->sc_geom;
 1179         }
 1180 
 1181         /* There is no return after this point, so we close passed consumer. */
 1182         g_access(cp, -1, 0, 0);
 1183 
 1184         rcp = g_new_consumer(geom);
 1185         rcp->flags |= G_CF_DIRECT_RECEIVE;
 1186         g_attach(rcp, pp);
 1187         if (g_access(rcp, 1, 1, 1) != 0)
 1188                 ; //goto fail1;
 1189 
 1190         g_topology_unlock();
 1191         sx_xlock(&sc->sc_lock);
 1192 
 1193         pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO);
 1194         pd->pd_subdisks = subdisks;
 1195         for (i = 0; i < subdisks; i++)
 1196                 pd->pd_meta[i] = metaarr[i];
 1197         disk = g_raid_create_disk(sc);
 1198         disk->d_md_data = (void *)pd;
 1199         disk->d_consumer = rcp;
 1200         rcp->private = disk;
 1201 
 1202         g_raid_get_disk_info(disk);
 1203 
 1204         g_raid_md_promise_new_disk(disk);
 1205 
 1206         sx_xunlock(&sc->sc_lock);
 1207         g_topology_lock();
 1208         *gp = geom;
 1209         return (result);
 1210 }
 1211 
 1212 static int
 1213 g_raid_md_event_promise(struct g_raid_md_object *md,
 1214     struct g_raid_disk *disk, u_int event)
 1215 {
 1216         struct g_raid_softc *sc;
 1217 
 1218         sc = md->mdo_softc;
 1219         if (disk == NULL)
 1220                 return (-1);
 1221         switch (event) {
 1222         case G_RAID_DISK_E_DISCONNECTED:
 1223                 /* Delete disk. */
 1224                 g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE);
 1225                 g_raid_destroy_disk(disk);
 1226                 g_raid_md_promise_purge_volumes(sc);
 1227 
 1228                 /* Write updated metadata to all disks. */
 1229                 g_raid_md_write_promise(md, NULL, NULL, NULL);
 1230 
 1231                 /* Check if anything left. */
 1232                 if (g_raid_ndisks(sc, -1) == 0)
 1233                         g_raid_destroy_node(sc, 0);
 1234                 else
 1235                         g_raid_md_promise_refill(sc);
 1236                 return (0);
 1237         }
 1238         return (-2);
 1239 }
 1240 
 1241 static int
 1242 g_raid_md_volume_event_promise(struct g_raid_md_object *md,
 1243     struct g_raid_volume *vol, u_int event)
 1244 {
 1245         struct g_raid_md_promise_pervolume *pv;
 1246 
 1247         pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data;
 1248         switch (event) {
 1249         case G_RAID_VOLUME_E_STARTMD:
 1250                 if (!pv->pv_started)
 1251                         g_raid_md_promise_start(vol);
 1252                 return (0);
 1253         }
 1254         return (-2);
 1255 }
 1256 
 1257 static int
 1258 g_raid_md_ctl_promise(struct g_raid_md_object *md,
 1259     struct gctl_req *req)
 1260 {
 1261         struct g_raid_softc *sc;
 1262         struct g_raid_volume *vol, *vol1;
 1263         struct g_raid_subdisk *sd;
 1264         struct g_raid_disk *disk, *disks[PROMISE_MAX_DISKS];
 1265         struct g_raid_md_promise_perdisk *pd;
 1266         struct g_raid_md_promise_pervolume *pv;
 1267         struct g_consumer *cp;
 1268         struct g_provider *pp;
 1269         char arg[16];
 1270         const char *nodename, *verb, *volname, *levelname, *diskname;
 1271         char *tmp;
 1272         int *nargs, *force;
 1273         off_t esize, offs[PROMISE_MAX_DISKS], size, sectorsize, strip;
 1274         intmax_t *sizearg, *striparg;
 1275         int numdisks, i, len, level, qual;
 1276         int error;
 1277 
 1278         sc = md->mdo_softc;
 1279         verb = gctl_get_param(req, "verb", NULL);
 1280         nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
 1281         error = 0;
 1282         if (strcmp(verb, "label") == 0) {
 1283                 if (*nargs < 4) {
 1284                         gctl_error(req, "Invalid number of arguments.");
 1285                         return (-1);
 1286                 }
 1287                 volname = gctl_get_asciiparam(req, "arg1");
 1288                 if (volname == NULL) {
 1289                         gctl_error(req, "No volume name.");
 1290                         return (-2);
 1291                 }
 1292                 levelname = gctl_get_asciiparam(req, "arg2");
 1293                 if (levelname == NULL) {
 1294                         gctl_error(req, "No RAID level.");
 1295                         return (-3);
 1296                 }
 1297                 if (strcasecmp(levelname, "RAID5") == 0)
 1298                         levelname = "RAID5-LA";
 1299                 if (g_raid_volume_str2level(levelname, &level, &qual)) {
 1300                         gctl_error(req, "Unknown RAID level '%s'.", levelname);
 1301                         return (-4);
 1302                 }
 1303                 numdisks = *nargs - 3;
 1304                 force = gctl_get_paraml(req, "force", sizeof(*force));
 1305                 if (!g_raid_md_promise_supported(level, qual, numdisks,
 1306                     force ? *force : 0)) {
 1307                         gctl_error(req, "Unsupported RAID level "
 1308                             "(0x%02x/0x%02x), or number of disks (%d).",
 1309                             level, qual, numdisks);
 1310                         return (-5);
 1311                 }
 1312 
 1313                 /* Search for disks, connect them and probe. */
 1314                 size = INT64_MAX;
 1315                 sectorsize = 0;
 1316                 bzero(disks, sizeof(disks));
 1317                 bzero(offs, sizeof(offs));
 1318                 for (i = 0; i < numdisks; i++) {
 1319                         snprintf(arg, sizeof(arg), "arg%d", i + 3);
 1320                         diskname = gctl_get_asciiparam(req, arg);
 1321                         if (diskname == NULL) {
 1322                                 gctl_error(req, "No disk name (%s).", arg);
 1323                                 error = -6;
 1324                                 break;
 1325                         }
 1326                         if (strcmp(diskname, "NONE") == 0)
 1327                                 continue;
 1328 
 1329                         TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 1330                                 if (disk->d_consumer != NULL && 
 1331                                     disk->d_consumer->provider != NULL &&
 1332                                     strcmp(disk->d_consumer->provider->name,
 1333                                      diskname) == 0)
 1334                                         break;
 1335                         }
 1336                         if (disk != NULL) {
 1337                                 if (disk->d_state != G_RAID_DISK_S_ACTIVE) {
 1338                                         gctl_error(req, "Disk '%s' is in a "
 1339                                             "wrong state (%s).", diskname,
 1340                                             g_raid_disk_state2str(disk->d_state));
 1341                                         error = -7;
 1342                                         break;
 1343                                 }
 1344                                 pd = disk->d_md_data;
 1345                                 if (pd->pd_subdisks >= PROMISE_MAX_SUBDISKS) {
 1346                                         gctl_error(req, "Disk '%s' already "
 1347                                             "used by %d volumes.",
 1348                                             diskname, pd->pd_subdisks);
 1349                                         error = -7;
 1350                                         break;
 1351                                 }
 1352                                 pp = disk->d_consumer->provider;
 1353                                 disks[i] = disk;
 1354                                 promise_meta_unused_range(pd->pd_meta,
 1355                                     pd->pd_subdisks,
 1356                                     pp->mediasize / pp->sectorsize,
 1357                                     &offs[i], &esize);
 1358                                 size = MIN(size, (off_t)esize * pp->sectorsize);
 1359                                 sectorsize = MAX(sectorsize, pp->sectorsize);
 1360                                 continue;
 1361                         }
 1362 
 1363                         g_topology_lock();
 1364                         cp = g_raid_open_consumer(sc, diskname);
 1365                         if (cp == NULL) {
 1366                                 gctl_error(req, "Can't open disk '%s'.",
 1367                                     diskname);
 1368                                 g_topology_unlock();
 1369                                 error = -8;
 1370                                 break;
 1371                         }
 1372                         pp = cp->provider;
 1373                         pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO);
 1374                         disk = g_raid_create_disk(sc);
 1375                         disk->d_md_data = (void *)pd;
 1376                         disk->d_consumer = cp;
 1377                         disks[i] = disk;
 1378                         cp->private = disk;
 1379                         g_topology_unlock();
 1380 
 1381                         g_raid_get_disk_info(disk);
 1382 
 1383                         /* Reserve some space for metadata. */
 1384                         size = MIN(size, pp->mediasize - 131072llu * pp->sectorsize);
 1385                         sectorsize = MAX(sectorsize, pp->sectorsize);
 1386                 }
 1387                 if (error != 0) {
 1388                         for (i = 0; i < numdisks; i++) {
 1389                                 if (disks[i] != NULL &&
 1390                                     disks[i]->d_state == G_RAID_DISK_S_NONE)
 1391                                         g_raid_destroy_disk(disks[i]);
 1392                         }
 1393                         return (error);
 1394                 }
 1395 
 1396                 if (sectorsize <= 0) {
 1397                         gctl_error(req, "Can't get sector size.");
 1398                         return (-8);
 1399                 }
 1400 
 1401                 /* Handle size argument. */
 1402                 len = sizeof(*sizearg);
 1403                 sizearg = gctl_get_param(req, "size", &len);
 1404                 if (sizearg != NULL && len == sizeof(*sizearg) &&
 1405                     *sizearg > 0) {
 1406                         if (*sizearg > size) {
 1407                                 gctl_error(req, "Size too big %lld > %lld.",
 1408                                     (long long)*sizearg, (long long)size);
 1409                                 return (-9);
 1410                         }
 1411                         size = *sizearg;
 1412                 }
 1413 
 1414                 /* Handle strip argument. */
 1415                 strip = 131072;
 1416                 len = sizeof(*striparg);
 1417                 striparg = gctl_get_param(req, "strip", &len);
 1418                 if (striparg != NULL && len == sizeof(*striparg) &&
 1419                     *striparg > 0) {
 1420                         if (*striparg < sectorsize) {
 1421                                 gctl_error(req, "Strip size too small.");
 1422                                 return (-10);
 1423                         }
 1424                         if (*striparg % sectorsize != 0) {
 1425                                 gctl_error(req, "Incorrect strip size.");
 1426                                 return (-11);
 1427                         }
 1428                         strip = *striparg;
 1429                 }
 1430 
 1431                 /* Round size down to strip or sector. */
 1432                 if (level == G_RAID_VOLUME_RL_RAID1 ||
 1433                     level == G_RAID_VOLUME_RL_SINGLE ||
 1434                     level == G_RAID_VOLUME_RL_CONCAT)
 1435                         size -= (size % sectorsize);
 1436                 else if (level == G_RAID_VOLUME_RL_RAID1E &&
 1437                     (numdisks & 1) != 0)
 1438                         size -= (size % (2 * strip));
 1439                 else
 1440                         size -= (size % strip);
 1441                 if (size <= 0) {
 1442                         gctl_error(req, "Size too small.");
 1443                         return (-13);
 1444                 }
 1445 
 1446                 /* We have all we need, create things: volume, ... */
 1447                 pv = malloc(sizeof(*pv), M_MD_PROMISE, M_WAITOK | M_ZERO);
 1448                 arc4rand(&pv->pv_id, sizeof(pv->pv_id), 0);
 1449                 pv->pv_generation = 0;
 1450                 pv->pv_started = 1;
 1451                 vol = g_raid_create_volume(sc, volname, -1);
 1452                 vol->v_md_data = pv;
 1453                 vol->v_raid_level = level;
 1454                 vol->v_raid_level_qualifier = qual;
 1455                 vol->v_strip_size = strip;
 1456                 vol->v_disks_count = numdisks;
 1457                 if (level == G_RAID_VOLUME_RL_RAID0 ||
 1458                     level == G_RAID_VOLUME_RL_CONCAT ||
 1459                     level == G_RAID_VOLUME_RL_SINGLE)
 1460                         vol->v_mediasize = size * numdisks;
 1461                 else if (level == G_RAID_VOLUME_RL_RAID1)
 1462                         vol->v_mediasize = size;
 1463                 else if (level == G_RAID_VOLUME_RL_RAID3 ||
 1464                     level == G_RAID_VOLUME_RL_RAID5)
 1465                         vol->v_mediasize = size * (numdisks - 1);
 1466                 else { /* RAID1E */
 1467                         vol->v_mediasize = ((size * numdisks) / strip / 2) *
 1468                             strip;
 1469                 }
 1470                 vol->v_sectorsize = sectorsize;
 1471                 g_raid_start_volume(vol);
 1472 
 1473                 /* , and subdisks. */
 1474                 for (i = 0; i < numdisks; i++) {
 1475                         disk = disks[i];
 1476                         sd = &vol->v_subdisks[i];
 1477                         sd->sd_disk = disk;
 1478                         sd->sd_offset = (off_t)offs[i] * 512;
 1479                         sd->sd_size = size;
 1480                         if (disk == NULL)
 1481                                 continue;
 1482                         TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
 1483                         g_raid_change_disk_state(disk,
 1484                             G_RAID_DISK_S_ACTIVE);
 1485                         g_raid_change_subdisk_state(sd,
 1486                             G_RAID_SUBDISK_S_ACTIVE);
 1487                         g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
 1488                             G_RAID_EVENT_SUBDISK);
 1489                 }
 1490 
 1491                 /* Write metadata based on created entities. */
 1492                 G_RAID_DEBUG1(0, sc, "Array started.");
 1493                 g_raid_md_write_promise(md, vol, NULL, NULL);
 1494 
 1495                 /* Pickup any STALE/SPARE disks to refill array if needed. */
 1496                 g_raid_md_promise_refill(sc);
 1497 
 1498                 g_raid_event_send(vol, G_RAID_VOLUME_E_START,
 1499                     G_RAID_EVENT_VOLUME);
 1500                 return (0);
 1501         }
 1502         if (strcmp(verb, "add") == 0) {
 1503                 gctl_error(req, "`add` command is not applicable, "
 1504                     "use `label` instead.");
 1505                 return (-99);
 1506         }
 1507         if (strcmp(verb, "delete") == 0) {
 1508                 nodename = gctl_get_asciiparam(req, "arg0");
 1509                 if (nodename != NULL && strcasecmp(sc->sc_name, nodename) != 0)
 1510                         nodename = NULL;
 1511 
 1512                 /* Full node destruction. */
 1513                 if (*nargs == 1 && nodename != NULL) {
 1514                         /* Check if some volume is still open. */
 1515                         force = gctl_get_paraml(req, "force", sizeof(*force));
 1516                         if (force != NULL && *force == 0 &&
 1517                             g_raid_nopens(sc) != 0) {
 1518                                 gctl_error(req, "Some volume is still open.");
 1519                                 return (-4);
 1520                         }
 1521 
 1522                         TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 1523                                 if (disk->d_consumer)
 1524                                         promise_meta_erase(disk->d_consumer);
 1525                         }
 1526                         g_raid_destroy_node(sc, 0);
 1527                         return (0);
 1528                 }
 1529 
 1530                 /* Destroy specified volume. If it was last - all node. */
 1531                 if (*nargs > 2) {
 1532                         gctl_error(req, "Invalid number of arguments.");
 1533                         return (-1);
 1534                 }
 1535                 volname = gctl_get_asciiparam(req,
 1536                     nodename != NULL ? "arg1" : "arg0");
 1537                 if (volname == NULL) {
 1538                         gctl_error(req, "No volume name.");
 1539                         return (-2);
 1540                 }
 1541 
 1542                 /* Search for volume. */
 1543                 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
 1544                         if (strcmp(vol->v_name, volname) == 0)
 1545                                 break;
 1546                         pp = vol->v_provider;
 1547                         if (pp == NULL)
 1548                                 continue;
 1549                         if (strcmp(pp->name, volname) == 0)
 1550                                 break;
 1551                         if (strncmp(pp->name, "raid/", 5) == 0 &&
 1552                             strcmp(pp->name + 5, volname) == 0)
 1553                                 break;
 1554                 }
 1555                 if (vol == NULL) {
 1556                         i = strtol(volname, &tmp, 10);
 1557                         if (verb != volname && tmp[0] == 0) {
 1558                                 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
 1559                                         if (vol->v_global_id == i)
 1560                                                 break;
 1561                                 }
 1562                         }
 1563                 }
 1564                 if (vol == NULL) {
 1565                         gctl_error(req, "Volume '%s' not found.", volname);
 1566                         return (-3);
 1567                 }
 1568 
 1569                 /* Check if volume is still open. */
 1570                 force = gctl_get_paraml(req, "force", sizeof(*force));
 1571                 if (force != NULL && *force == 0 &&
 1572                     vol->v_provider_open != 0) {
 1573                         gctl_error(req, "Volume is still open.");
 1574                         return (-4);
 1575                 }
 1576 
 1577                 /* Destroy volume and potentially node. */
 1578                 i = 0;
 1579                 TAILQ_FOREACH(vol1, &sc->sc_volumes, v_next)
 1580                         i++;
 1581                 if (i >= 2) {
 1582                         g_raid_destroy_volume(vol);
 1583                         g_raid_md_promise_purge_disks(sc);
 1584                         g_raid_md_write_promise(md, NULL, NULL, NULL);
 1585                 } else {
 1586                         TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 1587                                 if (disk->d_consumer)
 1588                                         promise_meta_erase(disk->d_consumer);
 1589                         }
 1590                         g_raid_destroy_node(sc, 0);
 1591                 }
 1592                 return (0);
 1593         }
 1594         if (strcmp(verb, "remove") == 0 ||
 1595             strcmp(verb, "fail") == 0) {
 1596                 if (*nargs < 2) {
 1597                         gctl_error(req, "Invalid number of arguments.");
 1598                         return (-1);
 1599                 }
 1600                 for (i = 1; i < *nargs; i++) {
 1601                         snprintf(arg, sizeof(arg), "arg%d", i);
 1602                         diskname = gctl_get_asciiparam(req, arg);
 1603                         if (diskname == NULL) {
 1604                                 gctl_error(req, "No disk name (%s).", arg);
 1605                                 error = -2;
 1606                                 break;
 1607                         }
 1608                         if (strncmp(diskname, _PATH_DEV, 5) == 0)
 1609                                 diskname += 5;
 1610 
 1611                         TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 1612                                 if (disk->d_consumer != NULL && 
 1613                                     disk->d_consumer->provider != NULL &&
 1614                                     strcmp(disk->d_consumer->provider->name,
 1615                                      diskname) == 0)
 1616                                         break;
 1617                         }
 1618                         if (disk == NULL) {
 1619                                 gctl_error(req, "Disk '%s' not found.",
 1620                                     diskname);
 1621                                 error = -3;
 1622                                 break;
 1623                         }
 1624 
 1625                         if (strcmp(verb, "fail") == 0) {
 1626                                 g_raid_md_fail_disk_promise(md, NULL, disk);
 1627                                 continue;
 1628                         }
 1629 
 1630                         /* Erase metadata on deleting disk and destroy it. */
 1631                         promise_meta_erase(disk->d_consumer);
 1632                         g_raid_destroy_disk(disk);
 1633                 }
 1634                 g_raid_md_promise_purge_volumes(sc);
 1635 
 1636                 /* Write updated metadata to remaining disks. */
 1637                 g_raid_md_write_promise(md, NULL, NULL, NULL);
 1638 
 1639                 /* Check if anything left. */
 1640                 if (g_raid_ndisks(sc, -1) == 0)
 1641                         g_raid_destroy_node(sc, 0);
 1642                 else
 1643                         g_raid_md_promise_refill(sc);
 1644                 return (error);
 1645         }
 1646         if (strcmp(verb, "insert") == 0) {
 1647                 if (*nargs < 2) {
 1648                         gctl_error(req, "Invalid number of arguments.");
 1649                         return (-1);
 1650                 }
 1651                 for (i = 1; i < *nargs; i++) {
 1652                         /* Get disk name. */
 1653                         snprintf(arg, sizeof(arg), "arg%d", i);
 1654                         diskname = gctl_get_asciiparam(req, arg);
 1655                         if (diskname == NULL) {
 1656                                 gctl_error(req, "No disk name (%s).", arg);
 1657                                 error = -3;
 1658                                 break;
 1659                         }
 1660 
 1661                         /* Try to find provider with specified name. */
 1662                         g_topology_lock();
 1663                         cp = g_raid_open_consumer(sc, diskname);
 1664                         if (cp == NULL) {
 1665                                 gctl_error(req, "Can't open disk '%s'.",
 1666                                     diskname);
 1667                                 g_topology_unlock();
 1668                                 error = -4;
 1669                                 break;
 1670                         }
 1671                         pp = cp->provider;
 1672                         g_topology_unlock();
 1673 
 1674                         pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO);
 1675 
 1676                         disk = g_raid_create_disk(sc);
 1677                         disk->d_consumer = cp;
 1678                         disk->d_md_data = (void *)pd;
 1679                         cp->private = disk;
 1680 
 1681                         g_raid_get_disk_info(disk);
 1682 
 1683                         /* Welcome the "new" disk. */
 1684                         g_raid_change_disk_state(disk, G_RAID_DISK_S_SPARE);
 1685                         promise_meta_write_spare(cp);
 1686                         g_raid_md_promise_refill(sc);
 1687                 }
 1688                 return (error);
 1689         }
 1690         return (-100);
 1691 }
 1692 
 1693 static int
 1694 g_raid_md_write_promise(struct g_raid_md_object *md, struct g_raid_volume *tvol,
 1695     struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
 1696 {
 1697         struct g_raid_softc *sc;
 1698         struct g_raid_volume *vol;
 1699         struct g_raid_subdisk *sd;
 1700         struct g_raid_disk *disk;
 1701         struct g_raid_md_promise_perdisk *pd;
 1702         struct g_raid_md_promise_pervolume *pv;
 1703         struct promise_raid_conf *meta;
 1704         off_t rebuild_lba64;
 1705         int i, j, pos, rebuild;
 1706 
 1707         sc = md->mdo_softc;
 1708 
 1709         if (sc->sc_stopping == G_RAID_DESTROY_HARD)
 1710                 return (0);
 1711 
 1712         /* Generate new per-volume metadata for affected volumes. */
 1713         TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
 1714                 if (vol->v_stopping)
 1715                         continue;
 1716 
 1717                 /* Skip volumes not related to specified targets. */
 1718                 if (tvol != NULL && vol != tvol)
 1719                         continue;
 1720                 if (tsd != NULL && vol != tsd->sd_volume)
 1721                         continue;
 1722                 if (tdisk != NULL) {
 1723                         for (i = 0; i < vol->v_disks_count; i++) {
 1724                                 if (vol->v_subdisks[i].sd_disk == tdisk)
 1725                                         break;
 1726                         }
 1727                         if (i >= vol->v_disks_count)
 1728                                 continue;
 1729                 }
 1730 
 1731                 pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data;
 1732                 pv->pv_generation++;
 1733 
 1734                 meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK | M_ZERO);
 1735                 if (pv->pv_meta != NULL)
 1736                         memcpy(meta, pv->pv_meta, sizeof(*meta));
 1737                 memcpy(meta->promise_id, PROMISE_MAGIC,
 1738                     sizeof(PROMISE_MAGIC) - 1);
 1739                 meta->dummy_0 = 0x00020000;
 1740                 meta->integrity = PROMISE_I_VALID;
 1741 
 1742                 meta->generation = pv->pv_generation;
 1743                 meta->status = PROMISE_S_VALID | PROMISE_S_ONLINE |
 1744                     PROMISE_S_INITED | PROMISE_S_READY;
 1745                 if (vol->v_state <= G_RAID_VOLUME_S_DEGRADED)
 1746                         meta->status |= PROMISE_S_DEGRADED;
 1747                 if (vol->v_dirty)
 1748                         meta->status |= PROMISE_S_MARKED; /* XXX: INVENTED! */
 1749                 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID0 ||
 1750                     vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE)
 1751                         meta->type = PROMISE_T_RAID0;
 1752                 else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
 1753                     vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E)
 1754                         meta->type = PROMISE_T_RAID1;
 1755                 else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3)
 1756                         meta->type = PROMISE_T_RAID3;
 1757                 else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID5)
 1758                         meta->type = PROMISE_T_RAID5;
 1759                 else if (vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT)
 1760                         meta->type = PROMISE_T_SPAN;
 1761                 else
 1762                         meta->type = PROMISE_T_JBOD;
 1763                 meta->total_disks = vol->v_disks_count;
 1764                 meta->stripe_shift = ffs(vol->v_strip_size / 1024);
 1765                 meta->array_width = vol->v_disks_count;
 1766                 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
 1767                     vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E)
 1768                         meta->array_width /= 2;
 1769                 meta->array_number = vol->v_global_id;
 1770                 meta->total_sectors = vol->v_mediasize / 512;
 1771                 meta->total_sectors_high = (vol->v_mediasize / 512) >> 32;
 1772                 meta->sector_size = vol->v_sectorsize / 512;
 1773                 meta->cylinders = meta->total_sectors / (255 * 63) - 1;
 1774                 meta->heads = 254;
 1775                 meta->sectors = 63;
 1776                 meta->volume_id = pv->pv_id;
 1777                 rebuild_lba64 = UINT64_MAX;
 1778                 rebuild = 0;
 1779                 for (i = 0; i < vol->v_disks_count; i++) {
 1780                         sd = &vol->v_subdisks[i];
 1781                         /* For RAID0+1 we need to translate order. */
 1782                         pos = promise_meta_translate_disk(vol, i);
 1783                         meta->disks[pos].flags = PROMISE_F_VALID |
 1784                             PROMISE_F_ASSIGNED;
 1785                         if (sd->sd_state == G_RAID_SUBDISK_S_NONE) {
 1786                                 meta->disks[pos].flags |= 0;
 1787                         } else if (sd->sd_state == G_RAID_SUBDISK_S_FAILED) {
 1788                                 meta->disks[pos].flags |=
 1789                                     PROMISE_F_DOWN | PROMISE_F_REDIR;
 1790                         } else if (sd->sd_state <= G_RAID_SUBDISK_S_REBUILD) {
 1791                                 meta->disks[pos].flags |=
 1792                                     PROMISE_F_ONLINE | PROMISE_F_REDIR;
 1793                                 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD) {
 1794                                         rebuild_lba64 = MIN(rebuild_lba64,
 1795                                             sd->sd_rebuild_pos / 512);
 1796                                 } else
 1797                                         rebuild_lba64 = 0;
 1798                                 rebuild = 1;
 1799                         } else {
 1800                                 meta->disks[pos].flags |= PROMISE_F_ONLINE;
 1801                                 if (sd->sd_state < G_RAID_SUBDISK_S_ACTIVE) {
 1802                                         meta->status |= PROMISE_S_MARKED;
 1803                                         if (sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
 1804                                                 rebuild_lba64 = MIN(rebuild_lba64,
 1805                                                     sd->sd_rebuild_pos / 512);
 1806                                         } else
 1807                                                 rebuild_lba64 = 0;
 1808                                 }
 1809                         }
 1810                         if (pv->pv_meta != NULL) {
 1811                                 meta->disks[pos].id = pv->pv_meta->disks[pos].id;
 1812                         } else {
 1813                                 meta->disks[pos].number = i * 2;
 1814                                 arc4rand(&meta->disks[pos].id,
 1815                                     sizeof(meta->disks[pos].id), 0);
 1816                         }
 1817                 }
 1818                 promise_meta_put_name(meta, vol->v_name);
 1819 
 1820                 /* Try to mimic AMD BIOS rebuild/resync behavior. */
 1821                 if (rebuild_lba64 != UINT64_MAX) {
 1822                         if (rebuild)
 1823                                 meta->magic_3 = 0x03040010UL; /* Rebuild? */
 1824                         else
 1825                                 meta->magic_3 = 0x03040008UL; /* Resync? */
 1826                         /* Translate from per-disk to per-volume LBA. */
 1827                         if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
 1828                             vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E) {
 1829                                 rebuild_lba64 *= meta->array_width;
 1830                         } else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 ||
 1831                             vol->v_raid_level == G_RAID_VOLUME_RL_RAID5) {
 1832                                 rebuild_lba64 *= meta->array_width - 1;
 1833                         } else
 1834                                 rebuild_lba64 = 0;
 1835                 } else
 1836                         meta->magic_3 = 0x03000000UL;
 1837                 meta->rebuild_lba64 = rebuild_lba64;
 1838                 meta->magic_4 = 0x04010101UL;
 1839 
 1840                 /* Replace per-volume metadata with new. */
 1841                 if (pv->pv_meta != NULL)
 1842                         free(pv->pv_meta, M_MD_PROMISE);
 1843                 pv->pv_meta = meta;
 1844 
 1845                 /* Copy new metadata to the disks, adding or replacing old. */
 1846                 for (i = 0; i < vol->v_disks_count; i++) {
 1847                         sd = &vol->v_subdisks[i];
 1848                         disk = sd->sd_disk;
 1849                         if (disk == NULL)
 1850                                 continue;
 1851                         /* For RAID0+1 we need to translate order. */
 1852                         pos = promise_meta_translate_disk(vol, i);
 1853                         pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
 1854                         for (j = 0; j < pd->pd_subdisks; j++) {
 1855                                 if (pd->pd_meta[j]->volume_id == meta->volume_id)
 1856                                         break;
 1857                         }
 1858                         if (j == pd->pd_subdisks)
 1859                                 pd->pd_subdisks++;
 1860                         if (pd->pd_meta[j] != NULL)
 1861                                 free(pd->pd_meta[j], M_MD_PROMISE);
 1862                         pd->pd_meta[j] = promise_meta_copy(meta);
 1863                         pd->pd_meta[j]->disk = meta->disks[pos];
 1864                         pd->pd_meta[j]->disk.number = pos;
 1865                         pd->pd_meta[j]->disk_offset_high =
 1866                             (sd->sd_offset / 512) >> 32;
 1867                         pd->pd_meta[j]->disk_offset = sd->sd_offset / 512;
 1868                         pd->pd_meta[j]->disk_sectors_high =
 1869                             (sd->sd_size / 512) >> 32;
 1870                         pd->pd_meta[j]->disk_sectors = sd->sd_size / 512;
 1871                         if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD) {
 1872                                 pd->pd_meta[j]->disk_rebuild_high =
 1873                                     (sd->sd_rebuild_pos / 512) >> 32;
 1874                                 pd->pd_meta[j]->disk_rebuild =
 1875                                     sd->sd_rebuild_pos / 512;
 1876                         } else if (sd->sd_state < G_RAID_SUBDISK_S_REBUILD) {
 1877                                 pd->pd_meta[j]->disk_rebuild_high = 0;
 1878                                 pd->pd_meta[j]->disk_rebuild = 0;
 1879                         } else {
 1880                                 pd->pd_meta[j]->disk_rebuild_high = UINT32_MAX;
 1881                                 pd->pd_meta[j]->disk_rebuild = UINT32_MAX;
 1882                         }
 1883                         pd->pd_updated = 1;
 1884                 }
 1885         }
 1886 
 1887         TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 1888                 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
 1889                 if (disk->d_state != G_RAID_DISK_S_ACTIVE)
 1890                         continue;
 1891                 if (!pd->pd_updated)
 1892                         continue;
 1893                 G_RAID_DEBUG(1, "Writing Promise metadata to %s",
 1894                     g_raid_get_diskname(disk));
 1895                 for (i = 0; i < pd->pd_subdisks; i++)
 1896                         g_raid_md_promise_print(pd->pd_meta[i]);
 1897                 promise_meta_write(disk->d_consumer,
 1898                     pd->pd_meta, pd->pd_subdisks);
 1899                 pd->pd_updated = 0;
 1900         }
 1901 
 1902         return (0);
 1903 }
 1904 
 1905 static int
 1906 g_raid_md_fail_disk_promise(struct g_raid_md_object *md,
 1907     struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
 1908 {
 1909         struct g_raid_softc *sc;
 1910         struct g_raid_md_promise_perdisk *pd;
 1911         struct g_raid_subdisk *sd;
 1912         int i, pos;
 1913 
 1914         sc = md->mdo_softc;
 1915         pd = (struct g_raid_md_promise_perdisk *)tdisk->d_md_data;
 1916 
 1917         /* We can't fail disk that is not a part of array now. */
 1918         if (tdisk->d_state != G_RAID_DISK_S_ACTIVE)
 1919                 return (-1);
 1920 
 1921         /*
 1922          * Mark disk as failed in metadata and try to write that metadata
 1923          * to the disk itself to prevent it's later resurrection as STALE.
 1924          */
 1925         if (pd->pd_subdisks > 0 && tdisk->d_consumer != NULL)
 1926                 G_RAID_DEBUG(1, "Writing Promise metadata to %s",
 1927                     g_raid_get_diskname(tdisk));
 1928         for (i = 0; i < pd->pd_subdisks; i++) {
 1929                 pd->pd_meta[i]->disk.flags |=
 1930                     PROMISE_F_DOWN | PROMISE_F_REDIR;
 1931                 pos = pd->pd_meta[i]->disk.number;
 1932                 if (pos >= 0 && pos < PROMISE_MAX_DISKS) {
 1933                         pd->pd_meta[i]->disks[pos].flags |=
 1934                             PROMISE_F_DOWN | PROMISE_F_REDIR;
 1935                 }
 1936                 g_raid_md_promise_print(pd->pd_meta[i]);
 1937         }
 1938         if (tdisk->d_consumer != NULL)
 1939                 promise_meta_write(tdisk->d_consumer,
 1940                     pd->pd_meta, pd->pd_subdisks);
 1941 
 1942         /* Change states. */
 1943         g_raid_change_disk_state(tdisk, G_RAID_DISK_S_FAILED);
 1944         TAILQ_FOREACH(sd, &tdisk->d_subdisks, sd_next) {
 1945                 g_raid_change_subdisk_state(sd,
 1946                     G_RAID_SUBDISK_S_FAILED);
 1947                 g_raid_event_send(sd, G_RAID_SUBDISK_E_FAILED,
 1948                     G_RAID_EVENT_SUBDISK);
 1949         }
 1950 
 1951         /* Write updated metadata to remaining disks. */
 1952         g_raid_md_write_promise(md, NULL, NULL, tdisk);
 1953 
 1954         g_raid_md_promise_refill(sc);
 1955         return (0);
 1956 }
 1957 
 1958 static int
 1959 g_raid_md_free_disk_promise(struct g_raid_md_object *md,
 1960     struct g_raid_disk *disk)
 1961 {
 1962         struct g_raid_md_promise_perdisk *pd;
 1963         int i;
 1964 
 1965         pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
 1966         for (i = 0; i < pd->pd_subdisks; i++) {
 1967                 if (pd->pd_meta[i] != NULL) {
 1968                         free(pd->pd_meta[i], M_MD_PROMISE);
 1969                         pd->pd_meta[i] = NULL;
 1970                 }
 1971         }
 1972         free(pd, M_MD_PROMISE);
 1973         disk->d_md_data = NULL;
 1974         return (0);
 1975 }
 1976 
 1977 static int
 1978 g_raid_md_free_volume_promise(struct g_raid_md_object *md,
 1979     struct g_raid_volume *vol)
 1980 {
 1981         struct g_raid_md_promise_pervolume *pv;
 1982 
 1983         pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data;
 1984         if (pv && pv->pv_meta != NULL) {
 1985                 free(pv->pv_meta, M_MD_PROMISE);
 1986                 pv->pv_meta = NULL;
 1987         }
 1988         if (pv && !pv->pv_started) {
 1989                 pv->pv_started = 1;
 1990                 callout_stop(&pv->pv_start_co);
 1991         }
 1992         free(pv, M_MD_PROMISE);
 1993         vol->v_md_data = NULL;
 1994         return (0);
 1995 }
 1996 
 1997 static int
 1998 g_raid_md_free_promise(struct g_raid_md_object *md)
 1999 {
 2000 
 2001         return (0);
 2002 }
 2003 
 2004 G_RAID_MD_DECLARE(promise, "Promise");

Cache object: 1da99f3f291a668459fb085d32f0356a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.