The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/geom/raid/md_nvidia.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2011 Alexander Motin <mav@FreeBSD.org>
    5  * Copyright (c) 2000 - 2008 Søren Schmidt <sos@FreeBSD.org>
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/bio.h>
   35 #include <sys/endian.h>
   36 #include <sys/kernel.h>
   37 #include <sys/kobj.h>
   38 #include <sys/limits.h>
   39 #include <sys/lock.h>
   40 #include <sys/malloc.h>
   41 #include <sys/mutex.h>
   42 #include <sys/systm.h>
   43 #include <sys/taskqueue.h>
   44 #include <geom/geom.h>
   45 #include <geom/geom_dbg.h>
   46 #include "geom/raid/g_raid.h"
   47 #include "g_raid_md_if.h"
   48 
   49 static MALLOC_DEFINE(M_MD_NVIDIA, "md_nvidia_data", "GEOM_RAID NVIDIA metadata");
   50 
   51 struct nvidia_raid_conf {
   52         uint8_t         nvidia_id[8];
   53 #define NVIDIA_MAGIC                "NVIDIA  "
   54 
   55         uint32_t        config_size;
   56         uint32_t        checksum;
   57         uint16_t        version;
   58         uint8_t         disk_number;
   59         uint8_t         dummy_0;
   60         uint32_t        total_sectors;
   61         uint32_t        sector_size;
   62         uint8_t         name[16];
   63         uint8_t         revision[4];
   64         uint32_t        disk_status;
   65 
   66         uint32_t        magic_0;
   67 #define NVIDIA_MAGIC0           0x00640044
   68 
   69         uint64_t        volume_id[2];
   70         uint8_t         state;
   71 #define NVIDIA_S_IDLE           0
   72 #define NVIDIA_S_INIT           2
   73 #define NVIDIA_S_REBUILD        3
   74 #define NVIDIA_S_UPGRADE        4
   75 #define NVIDIA_S_SYNC           5
   76         uint8_t         array_width;
   77         uint8_t         total_disks;
   78         uint8_t         orig_array_width;
   79         uint16_t        type;
   80 #define NVIDIA_T_RAID0          0x0080
   81 #define NVIDIA_T_RAID1          0x0081
   82 #define NVIDIA_T_RAID3          0x0083
   83 #define NVIDIA_T_RAID5          0x0085  /* RLQ = 00/02? */
   84 #define NVIDIA_T_RAID5_SYM      0x0095  /* RLQ = 03 */
   85 #define NVIDIA_T_RAID10         0x008a
   86 #define NVIDIA_T_RAID01         0x8180
   87 #define NVIDIA_T_CONCAT         0x00ff
   88 
   89         uint16_t        dummy_3;
   90         uint32_t        strip_sectors;
   91         uint32_t        strip_bytes;
   92         uint32_t        strip_shift;
   93         uint32_t        strip_mask;
   94         uint32_t        stripe_sectors;
   95         uint32_t        stripe_bytes;
   96         uint32_t        rebuild_lba;
   97         uint32_t        orig_type;
   98         uint32_t        orig_total_sectors;
   99         uint32_t        status;
  100 #define NVIDIA_S_BOOTABLE       0x00000001
  101 #define NVIDIA_S_DEGRADED       0x00000002
  102 
  103         uint32_t        filler[98];
  104 } __packed;
  105 
  106 struct g_raid_md_nvidia_perdisk {
  107         struct nvidia_raid_conf *pd_meta;
  108         int                      pd_disk_pos;
  109         off_t                    pd_disk_size;
  110 };
  111 
  112 struct g_raid_md_nvidia_object {
  113         struct g_raid_md_object  mdio_base;
  114         uint64_t                 mdio_volume_id[2];
  115         struct nvidia_raid_conf *mdio_meta;
  116         struct callout           mdio_start_co; /* STARTING state timer. */
  117         int                      mdio_total_disks;
  118         int                      mdio_disks_present;
  119         int                      mdio_started;
  120         int                      mdio_incomplete;
  121         struct root_hold_token  *mdio_rootmount; /* Root mount delay token. */
  122 };
  123 
  124 static g_raid_md_create_t g_raid_md_create_nvidia;
  125 static g_raid_md_taste_t g_raid_md_taste_nvidia;
  126 static g_raid_md_event_t g_raid_md_event_nvidia;
  127 static g_raid_md_ctl_t g_raid_md_ctl_nvidia;
  128 static g_raid_md_write_t g_raid_md_write_nvidia;
  129 static g_raid_md_fail_disk_t g_raid_md_fail_disk_nvidia;
  130 static g_raid_md_free_disk_t g_raid_md_free_disk_nvidia;
  131 static g_raid_md_free_t g_raid_md_free_nvidia;
  132 
  133 static kobj_method_t g_raid_md_nvidia_methods[] = {
  134         KOBJMETHOD(g_raid_md_create,    g_raid_md_create_nvidia),
  135         KOBJMETHOD(g_raid_md_taste,     g_raid_md_taste_nvidia),
  136         KOBJMETHOD(g_raid_md_event,     g_raid_md_event_nvidia),
  137         KOBJMETHOD(g_raid_md_ctl,       g_raid_md_ctl_nvidia),
  138         KOBJMETHOD(g_raid_md_write,     g_raid_md_write_nvidia),
  139         KOBJMETHOD(g_raid_md_fail_disk, g_raid_md_fail_disk_nvidia),
  140         KOBJMETHOD(g_raid_md_free_disk, g_raid_md_free_disk_nvidia),
  141         KOBJMETHOD(g_raid_md_free,      g_raid_md_free_nvidia),
  142         { 0, 0 }
  143 };
  144 
  145 static struct g_raid_md_class g_raid_md_nvidia_class = {
  146         "NVIDIA",
  147         g_raid_md_nvidia_methods,
  148         sizeof(struct g_raid_md_nvidia_object),
  149         .mdc_enable = 1,
  150         .mdc_priority = 100
  151 };
  152 
  153 static int NVIDIANodeID = 1;
  154 
  155 static void
  156 g_raid_md_nvidia_print(struct nvidia_raid_conf *meta)
  157 {
  158 
  159         if (g_raid_debug < 1)
  160                 return;
  161 
  162         printf("********* ATA NVIDIA RAID Metadata *********\n");
  163         printf("nvidia_id           <%.8s>\n", meta->nvidia_id);
  164         printf("config_size         %u\n", meta->config_size);
  165         printf("checksum            0x%08x\n", meta->checksum);
  166         printf("version             0x%04x\n", meta->version);
  167         printf("disk_number         %d\n", meta->disk_number);
  168         printf("dummy_0             0x%02x\n", meta->dummy_0);
  169         printf("total_sectors       %u\n", meta->total_sectors);
  170         printf("sector_size         %u\n", meta->sector_size);
  171         printf("name                <%.16s>\n", meta->name);
  172         printf("revision            0x%02x%02x%02x%02x\n",
  173             meta->revision[0], meta->revision[1],
  174             meta->revision[2], meta->revision[3]);
  175         printf("disk_status         0x%08x\n", meta->disk_status);
  176         printf("magic_0             0x%08x\n", meta->magic_0);
  177         printf("volume_id           0x%016jx%016jx\n",
  178             meta->volume_id[1], meta->volume_id[0]);
  179         printf("state               0x%02x\n", meta->state);
  180         printf("array_width         %u\n", meta->array_width);
  181         printf("total_disks         %u\n", meta->total_disks);
  182         printf("orig_array_width    %u\n", meta->orig_array_width);
  183         printf("type                0x%04x\n", meta->type);
  184         printf("dummy_3             0x%04x\n", meta->dummy_3);
  185         printf("strip_sectors       %u\n", meta->strip_sectors);
  186         printf("strip_bytes         %u\n", meta->strip_bytes);
  187         printf("strip_shift         %u\n", meta->strip_shift);
  188         printf("strip_mask          0x%08x\n", meta->strip_mask);
  189         printf("stripe_sectors      %u\n", meta->stripe_sectors);
  190         printf("stripe_bytes        %u\n", meta->stripe_bytes);
  191         printf("rebuild_lba         %u\n", meta->rebuild_lba);
  192         printf("orig_type           0x%04x\n", meta->orig_type);
  193         printf("orig_total_sectors  %u\n", meta->orig_total_sectors);
  194         printf("status              0x%08x\n", meta->status);
  195         printf("=================================================\n");
  196 }
  197 
  198 static struct nvidia_raid_conf *
  199 nvidia_meta_copy(struct nvidia_raid_conf *meta)
  200 {
  201         struct nvidia_raid_conf *nmeta;
  202 
  203         nmeta = malloc(sizeof(*meta), M_MD_NVIDIA, M_WAITOK);
  204         memcpy(nmeta, meta, sizeof(*meta));
  205         return (nmeta);
  206 }
  207 
  208 static int
  209 nvidia_meta_translate_disk(struct nvidia_raid_conf *meta, int md_disk_pos)
  210 {
  211         int disk_pos;
  212 
  213         if (md_disk_pos >= 0 && meta->type == NVIDIA_T_RAID01) {
  214                 disk_pos = (md_disk_pos / meta->array_width) +
  215                     (md_disk_pos % meta->array_width) * meta->array_width;
  216         } else
  217                 disk_pos = md_disk_pos;
  218         return (disk_pos);
  219 }
  220 
  221 static void
  222 nvidia_meta_get_name(struct nvidia_raid_conf *meta, char *buf)
  223 {
  224         int i;
  225 
  226         strncpy(buf, meta->name, 16);
  227         buf[16] = 0;
  228         for (i = 15; i >= 0; i--) {
  229                 if (buf[i] > 0x20)
  230                         break;
  231                 buf[i] = 0;
  232         }
  233 }
  234 
  235 static void
  236 nvidia_meta_put_name(struct nvidia_raid_conf *meta, char *buf)
  237 {
  238 
  239         memset(meta->name, 0x20, 16);
  240         memcpy(meta->name, buf, MIN(strlen(buf), 16));
  241 }
  242 
  243 static struct nvidia_raid_conf *
  244 nvidia_meta_read(struct g_consumer *cp)
  245 {
  246         struct g_provider *pp;
  247         struct nvidia_raid_conf *meta;
  248         char *buf;
  249         int error, i;
  250         uint32_t checksum, *ptr;
  251 
  252         pp = cp->provider;
  253         if (pp->sectorsize < sizeof(*meta))
  254                 return (NULL);
  255         /* Read the anchor sector. */
  256         buf = g_read_data(cp,
  257             pp->mediasize - 2 * pp->sectorsize, pp->sectorsize, &error);
  258         if (buf == NULL) {
  259                 G_RAID_DEBUG(1, "Cannot read metadata from %s (error=%d).",
  260                     pp->name, error);
  261                 return (NULL);
  262         }
  263         meta = (struct nvidia_raid_conf *)buf;
  264 
  265         /* Check if this is an NVIDIA RAID struct */
  266         if (strncmp(meta->nvidia_id, NVIDIA_MAGIC, strlen(NVIDIA_MAGIC))) {
  267                 G_RAID_DEBUG(1, "NVIDIA signature check failed on %s", pp->name);
  268                 g_free(buf);
  269                 return (NULL);
  270         }
  271         if (meta->config_size > 128 ||
  272             meta->config_size < 30) {
  273                 G_RAID_DEBUG(1, "NVIDIA metadata size looks wrong: %d",
  274                     meta->config_size);
  275                 g_free(buf);
  276                 return (NULL);
  277         }
  278         meta = malloc(sizeof(*meta), M_MD_NVIDIA, M_WAITOK);
  279         memcpy(meta, buf, min(sizeof(*meta), pp->sectorsize));
  280         g_free(buf);
  281 
  282         /* Check metadata checksum. */
  283         for (checksum = 0, ptr = (uint32_t *)meta,
  284             i = 0; i < meta->config_size; i++)
  285                 checksum += *ptr++;
  286         if (checksum != 0) {
  287                 G_RAID_DEBUG(1, "NVIDIA checksum check failed on %s", pp->name);
  288                 free(meta, M_MD_NVIDIA);
  289                 return (NULL);
  290         }
  291 
  292         /* Check volume state. */
  293         if (meta->state != NVIDIA_S_IDLE && meta->state != NVIDIA_S_INIT &&
  294             meta->state != NVIDIA_S_REBUILD && meta->state != NVIDIA_S_SYNC) {
  295                 G_RAID_DEBUG(1, "NVIDIA unknown state on %s (0x%02x)",
  296                     pp->name, meta->state);
  297                 free(meta, M_MD_NVIDIA);
  298                 return (NULL);
  299         }
  300 
  301         /* Check raid type. */
  302         if (meta->type != NVIDIA_T_RAID0 && meta->type != NVIDIA_T_RAID1 &&
  303             meta->type != NVIDIA_T_RAID3 && meta->type != NVIDIA_T_RAID5 &&
  304             meta->type != NVIDIA_T_RAID5_SYM &&
  305             meta->type != NVIDIA_T_RAID01 && meta->type != NVIDIA_T_CONCAT) {
  306                 G_RAID_DEBUG(1, "NVIDIA unknown RAID level on %s (0x%02x)",
  307                     pp->name, meta->type);
  308                 free(meta, M_MD_NVIDIA);
  309                 return (NULL);
  310         }
  311 
  312         return (meta);
  313 }
  314 
  315 static int
  316 nvidia_meta_write(struct g_consumer *cp, struct nvidia_raid_conf *meta)
  317 {
  318         struct g_provider *pp;
  319         char *buf;
  320         int error, i;
  321         uint32_t checksum, *ptr;
  322 
  323         pp = cp->provider;
  324 
  325         /* Recalculate checksum for case if metadata were changed. */
  326         meta->checksum = 0;
  327         for (checksum = 0, ptr = (uint32_t *)meta,
  328             i = 0; i < meta->config_size; i++)
  329                 checksum += *ptr++;
  330         meta->checksum -= checksum;
  331 
  332         /* Create and fill buffer. */
  333         buf = malloc(pp->sectorsize, M_MD_NVIDIA, M_WAITOK | M_ZERO);
  334         memcpy(buf, meta, sizeof(*meta));
  335 
  336         /* Write metadata. */
  337         error = g_write_data(cp,
  338             pp->mediasize - 2 * pp->sectorsize, buf, pp->sectorsize);
  339         if (error != 0) {
  340                 G_RAID_DEBUG(1, "Cannot write metadata to %s (error=%d).",
  341                     pp->name, error);
  342         }
  343 
  344         free(buf, M_MD_NVIDIA);
  345         return (error);
  346 }
  347 
  348 static int
  349 nvidia_meta_erase(struct g_consumer *cp)
  350 {
  351         struct g_provider *pp;
  352         char *buf;
  353         int error;
  354 
  355         pp = cp->provider;
  356         buf = malloc(pp->sectorsize, M_MD_NVIDIA, M_WAITOK | M_ZERO);
  357         error = g_write_data(cp,
  358             pp->mediasize - 2 * pp->sectorsize, buf, pp->sectorsize);
  359         if (error != 0) {
  360                 G_RAID_DEBUG(1, "Cannot erase metadata on %s (error=%d).",
  361                     pp->name, error);
  362         }
  363         free(buf, M_MD_NVIDIA);
  364         return (error);
  365 }
  366 
  367 static struct g_raid_disk *
  368 g_raid_md_nvidia_get_disk(struct g_raid_softc *sc, int id)
  369 {
  370         struct g_raid_disk      *disk;
  371         struct g_raid_md_nvidia_perdisk *pd;
  372 
  373         TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
  374                 pd = (struct g_raid_md_nvidia_perdisk *)disk->d_md_data;
  375                 if (pd->pd_disk_pos == id)
  376                         break;
  377         }
  378         return (disk);
  379 }
  380 
  381 static int
  382 g_raid_md_nvidia_supported(int level, int qual, int disks, int force)
  383 {
  384 
  385         switch (level) {
  386         case G_RAID_VOLUME_RL_RAID0:
  387                 if (disks < 1)
  388                         return (0);
  389                 if (!force && (disks < 2 || disks > 6))
  390                         return (0);
  391                 break;
  392         case G_RAID_VOLUME_RL_RAID1:
  393                 if (disks < 1)
  394                         return (0);
  395                 if (!force && (disks != 2))
  396                         return (0);
  397                 break;
  398         case G_RAID_VOLUME_RL_RAID1E:
  399                 if (disks < 2)
  400                         return (0);
  401                 if (disks % 2 != 0)
  402                         return (0);
  403                 if (!force && (disks < 4))
  404                         return (0);
  405                 break;
  406         case G_RAID_VOLUME_RL_SINGLE:
  407                 if (disks != 1)
  408                         return (0);
  409                 break;
  410         case G_RAID_VOLUME_RL_CONCAT:
  411                 if (disks < 2)
  412                         return (0);
  413                 break;
  414         case G_RAID_VOLUME_RL_RAID5:
  415                 if (disks < 3)
  416                         return (0);
  417                 if (qual != G_RAID_VOLUME_RLQ_R5LA &&
  418                     qual != G_RAID_VOLUME_RLQ_R5LS)
  419                         return (0);
  420                 break;
  421         default:
  422                 return (0);
  423         }
  424         if (level != G_RAID_VOLUME_RL_RAID5 && qual != G_RAID_VOLUME_RLQ_NONE)
  425                 return (0);
  426         return (1);
  427 }
  428 
  429 static int
  430 g_raid_md_nvidia_start_disk(struct g_raid_disk *disk)
  431 {
  432         struct g_raid_softc *sc;
  433         struct g_raid_subdisk *sd, *tmpsd;
  434         struct g_raid_disk *olddisk, *tmpdisk;
  435         struct g_raid_md_object *md;
  436         struct g_raid_md_nvidia_object *mdi;
  437         struct g_raid_md_nvidia_perdisk *pd, *oldpd;
  438         struct nvidia_raid_conf *meta;
  439         int disk_pos, resurrection = 0;
  440 
  441         sc = disk->d_softc;
  442         md = sc->sc_md;
  443         mdi = (struct g_raid_md_nvidia_object *)md;
  444         meta = mdi->mdio_meta;
  445         pd = (struct g_raid_md_nvidia_perdisk *)disk->d_md_data;
  446         olddisk = NULL;
  447 
  448         /* Find disk position in metadata by its serial. */
  449         if (pd->pd_meta != NULL) {
  450                 disk_pos = pd->pd_meta->disk_number;
  451                 if (disk_pos >= meta->total_disks || mdi->mdio_started)
  452                         disk_pos = -3;
  453         } else
  454                 disk_pos = -3;
  455         /* For RAID0+1 we need to translate order. */
  456         disk_pos = nvidia_meta_translate_disk(meta, disk_pos);
  457         if (disk_pos < 0) {
  458                 G_RAID_DEBUG1(1, sc, "Unknown, probably new or stale disk");
  459                 /* If we are in the start process, that's all for now. */
  460                 if (!mdi->mdio_started)
  461                         goto nofit;
  462                 /*
  463                  * If we have already started - try to get use of the disk.
  464                  * Try to replace OFFLINE disks first, then FAILED.
  465                  */
  466                 TAILQ_FOREACH(tmpdisk, &sc->sc_disks, d_next) {
  467                         if (tmpdisk->d_state != G_RAID_DISK_S_OFFLINE &&
  468                             tmpdisk->d_state != G_RAID_DISK_S_FAILED)
  469                                 continue;
  470                         /* Make sure this disk is big enough. */
  471                         TAILQ_FOREACH(sd, &tmpdisk->d_subdisks, sd_next) {
  472                                 if (sd->sd_offset + sd->sd_size + 2 * 512 >
  473                                     pd->pd_disk_size) {
  474                                         G_RAID_DEBUG1(1, sc,
  475                                             "Disk too small (%ju < %ju)",
  476                                             pd->pd_disk_size,
  477                                             sd->sd_offset + sd->sd_size + 512);
  478                                         break;
  479                                 }
  480                         }
  481                         if (sd != NULL)
  482                                 continue;
  483                         if (tmpdisk->d_state == G_RAID_DISK_S_OFFLINE) {
  484                                 olddisk = tmpdisk;
  485                                 break;
  486                         } else if (olddisk == NULL)
  487                                 olddisk = tmpdisk;
  488                 }
  489                 if (olddisk == NULL) {
  490 nofit:
  491                         g_raid_change_disk_state(disk, G_RAID_DISK_S_SPARE);
  492                         return (1);
  493                 }
  494                 oldpd = (struct g_raid_md_nvidia_perdisk *)olddisk->d_md_data;
  495                 disk_pos = oldpd->pd_disk_pos;
  496                 resurrection = 1;
  497         }
  498 
  499         if (olddisk == NULL) {
  500                 /* Find placeholder by position. */
  501                 olddisk = g_raid_md_nvidia_get_disk(sc, disk_pos);
  502                 if (olddisk == NULL)
  503                         panic("No disk at position %d!", disk_pos);
  504                 if (olddisk->d_state != G_RAID_DISK_S_OFFLINE) {
  505                         G_RAID_DEBUG1(1, sc, "More than one disk for pos %d",
  506                             disk_pos);
  507                         g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE);
  508                         return (0);
  509                 }
  510                 oldpd = (struct g_raid_md_nvidia_perdisk *)olddisk->d_md_data;
  511         }
  512 
  513         /* Replace failed disk or placeholder with new disk. */
  514         TAILQ_FOREACH_SAFE(sd, &olddisk->d_subdisks, sd_next, tmpsd) {
  515                 TAILQ_REMOVE(&olddisk->d_subdisks, sd, sd_next);
  516                 TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
  517                 sd->sd_disk = disk;
  518         }
  519         oldpd->pd_disk_pos = -2;
  520         pd->pd_disk_pos = disk_pos;
  521 
  522         /* If it was placeholder -- destroy it. */
  523         if (olddisk->d_state == G_RAID_DISK_S_OFFLINE) {
  524                 g_raid_destroy_disk(olddisk);
  525         } else {
  526                 /* Otherwise, make it STALE_FAILED. */
  527                 g_raid_change_disk_state(olddisk, G_RAID_DISK_S_STALE_FAILED);
  528         }
  529 
  530         /* Welcome the new disk. */
  531         if (resurrection)
  532                 g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
  533         else// if (pd->pd_meta->disk_status == NVIDIA_S_CURRENT ||
  534             //pd->pd_meta->disk_status == NVIDIA_S_REBUILD)
  535                 g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
  536 //      else
  537 //              g_raid_change_disk_state(disk, G_RAID_DISK_S_FAILED);
  538         TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
  539                 /*
  540                  * Different disks may have different sizes,
  541                  * in concat mode. Update from real disk size.
  542                  */
  543                 if (meta->type == NVIDIA_T_CONCAT)
  544                         sd->sd_size = pd->pd_disk_size - 0x800 * 512;
  545 
  546                 if (resurrection) {
  547                         /* New or ex-spare disk. */
  548                         g_raid_change_subdisk_state(sd,
  549                             G_RAID_SUBDISK_S_NEW);
  550                 } else if (meta->state == NVIDIA_S_REBUILD &&
  551                     (pd->pd_meta->disk_status & 0x100)) {
  552                         /* Rebuilding disk. */
  553                         g_raid_change_subdisk_state(sd,
  554                             G_RAID_SUBDISK_S_REBUILD);
  555                         sd->sd_rebuild_pos = (off_t)pd->pd_meta->rebuild_lba /
  556                             meta->array_width * pd->pd_meta->sector_size;
  557                 } else if (meta->state == NVIDIA_S_SYNC) {
  558                         /* Resyncing/dirty disk. */
  559                         g_raid_change_subdisk_state(sd,
  560                             G_RAID_SUBDISK_S_RESYNC);
  561                         sd->sd_rebuild_pos = (off_t)pd->pd_meta->rebuild_lba /
  562                             meta->array_width * pd->pd_meta->sector_size;
  563                 } else {
  564                         /* Up to date disk. */
  565                         g_raid_change_subdisk_state(sd,
  566                             G_RAID_SUBDISK_S_ACTIVE);
  567                 }
  568                 g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
  569                     G_RAID_EVENT_SUBDISK);
  570         }
  571 
  572         /* Update status of our need for spare. */
  573         if (mdi->mdio_started) {
  574                 mdi->mdio_incomplete =
  575                     (g_raid_ndisks(sc, G_RAID_DISK_S_ACTIVE) <
  576                      mdi->mdio_total_disks);
  577         }
  578 
  579         return (resurrection);
  580 }
  581 
  582 static void
  583 g_disk_md_nvidia_retaste(void *arg, int pending)
  584 {
  585 
  586         G_RAID_DEBUG(1, "Array is not complete, trying to retaste.");
  587         g_retaste(&g_raid_class);
  588         free(arg, M_MD_NVIDIA);
  589 }
  590 
  591 static void
  592 g_raid_md_nvidia_refill(struct g_raid_softc *sc)
  593 {
  594         struct g_raid_md_object *md;
  595         struct g_raid_md_nvidia_object *mdi;
  596         struct g_raid_disk *disk;
  597         struct task *task;
  598         int update, na;
  599 
  600         md = sc->sc_md;
  601         mdi = (struct g_raid_md_nvidia_object *)md;
  602         update = 0;
  603         do {
  604                 /* Make sure we miss anything. */
  605                 na = g_raid_ndisks(sc, G_RAID_DISK_S_ACTIVE);
  606                 if (na == mdi->mdio_total_disks)
  607                         break;
  608 
  609                 G_RAID_DEBUG1(1, md->mdo_softc,
  610                     "Array is not complete (%d of %d), "
  611                     "trying to refill.", na, mdi->mdio_total_disks);
  612 
  613                 /* Try to get use some of STALE disks. */
  614                 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
  615                         if (disk->d_state == G_RAID_DISK_S_STALE) {
  616                                 update += g_raid_md_nvidia_start_disk(disk);
  617                                 if (disk->d_state == G_RAID_DISK_S_ACTIVE)
  618                                         break;
  619                         }
  620                 }
  621                 if (disk != NULL)
  622                         continue;
  623 
  624                 /* Try to get use some of SPARE disks. */
  625                 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
  626                         if (disk->d_state == G_RAID_DISK_S_SPARE) {
  627                                 update += g_raid_md_nvidia_start_disk(disk);
  628                                 if (disk->d_state == G_RAID_DISK_S_ACTIVE)
  629                                         break;
  630                         }
  631                 }
  632         } while (disk != NULL);
  633 
  634         /* Write new metadata if we changed something. */
  635         if (update)
  636                 g_raid_md_write_nvidia(md, NULL, NULL, NULL);
  637 
  638         /* Update status of our need for spare. */
  639         mdi->mdio_incomplete = (g_raid_ndisks(sc, G_RAID_DISK_S_ACTIVE) <
  640             mdi->mdio_total_disks);
  641 
  642         /* Request retaste hoping to find spare. */
  643         if (mdi->mdio_incomplete) {
  644                 task = malloc(sizeof(struct task),
  645                     M_MD_NVIDIA, M_WAITOK | M_ZERO);
  646                 TASK_INIT(task, 0, g_disk_md_nvidia_retaste, task);
  647                 taskqueue_enqueue(taskqueue_swi, task);
  648         }
  649 }
  650 
  651 static void
  652 g_raid_md_nvidia_start(struct g_raid_softc *sc)
  653 {
  654         struct g_raid_md_object *md;
  655         struct g_raid_md_nvidia_object *mdi;
  656         struct g_raid_md_nvidia_perdisk *pd;
  657         struct nvidia_raid_conf *meta;
  658         struct g_raid_volume *vol;
  659         struct g_raid_subdisk *sd;
  660         struct g_raid_disk *disk;
  661         off_t size;
  662         int j, disk_pos;
  663         char buf[17];
  664 
  665         md = sc->sc_md;
  666         mdi = (struct g_raid_md_nvidia_object *)md;
  667         meta = mdi->mdio_meta;
  668 
  669         /* Create volumes and subdisks. */
  670         nvidia_meta_get_name(meta, buf);
  671         vol = g_raid_create_volume(sc, buf, -1);
  672         vol->v_mediasize = (off_t)meta->total_sectors * 512;
  673         vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_NONE;
  674         if (meta->type == NVIDIA_T_RAID0) {
  675                 vol->v_raid_level = G_RAID_VOLUME_RL_RAID0;
  676                 size = vol->v_mediasize / mdi->mdio_total_disks;
  677         } else if (meta->type == NVIDIA_T_RAID1) {
  678                 vol->v_raid_level = G_RAID_VOLUME_RL_RAID1;
  679                 size = vol->v_mediasize;
  680         } else if (meta->type == NVIDIA_T_RAID01) {
  681                 vol->v_raid_level = G_RAID_VOLUME_RL_RAID1E;
  682                 size = vol->v_mediasize / (mdi->mdio_total_disks / 2);
  683         } else if (meta->type == NVIDIA_T_CONCAT) {
  684                 if (mdi->mdio_total_disks == 1)
  685                         vol->v_raid_level = G_RAID_VOLUME_RL_SINGLE;
  686                 else
  687                         vol->v_raid_level = G_RAID_VOLUME_RL_CONCAT;
  688                 size = 0;
  689         } else if (meta->type == NVIDIA_T_RAID5) {
  690                 vol->v_raid_level = G_RAID_VOLUME_RL_RAID5;
  691                 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_R5LA;
  692                 size = vol->v_mediasize / (mdi->mdio_total_disks - 1);
  693         } else if (meta->type == NVIDIA_T_RAID5_SYM) {
  694                 vol->v_raid_level = G_RAID_VOLUME_RL_RAID5;
  695                 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_R5LS;
  696                 size = vol->v_mediasize / (mdi->mdio_total_disks - 1);
  697         } else {
  698                 vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN;
  699                 size = 0;
  700         }
  701         vol->v_strip_size = meta->strip_sectors * 512; //ZZZ
  702         vol->v_disks_count = mdi->mdio_total_disks;
  703         vol->v_sectorsize = 512; //ZZZ
  704         for (j = 0; j < vol->v_disks_count; j++) {
  705                 sd = &vol->v_subdisks[j];
  706                 sd->sd_offset = 0;
  707                 sd->sd_size = size;
  708         }
  709         g_raid_start_volume(vol);
  710 
  711         /* Create disk placeholders to store data for later writing. */
  712         for (disk_pos = 0; disk_pos < mdi->mdio_total_disks; disk_pos++) {
  713                 pd = malloc(sizeof(*pd), M_MD_NVIDIA, M_WAITOK | M_ZERO);
  714                 pd->pd_disk_pos = disk_pos;
  715                 disk = g_raid_create_disk(sc);
  716                 disk->d_md_data = (void *)pd;
  717                 disk->d_state = G_RAID_DISK_S_OFFLINE;
  718                 sd = &vol->v_subdisks[disk_pos];
  719                 sd->sd_disk = disk;
  720                 TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
  721         }
  722 
  723         /* Make all disks found till the moment take their places. */
  724         do {
  725                 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
  726                         if (disk->d_state == G_RAID_DISK_S_NONE) {
  727                                 g_raid_md_nvidia_start_disk(disk);
  728                                 break;
  729                         }
  730                 }
  731         } while (disk != NULL);
  732 
  733         mdi->mdio_started = 1;
  734         G_RAID_DEBUG1(0, sc, "Array started.");
  735         g_raid_md_write_nvidia(md, NULL, NULL, NULL);
  736 
  737         /* Pickup any STALE/SPARE disks to refill array if needed. */
  738         g_raid_md_nvidia_refill(sc);
  739 
  740         g_raid_event_send(vol, G_RAID_VOLUME_E_START, G_RAID_EVENT_VOLUME);
  741 
  742         callout_stop(&mdi->mdio_start_co);
  743         G_RAID_DEBUG1(1, sc, "root_mount_rel %p", mdi->mdio_rootmount);
  744         root_mount_rel(mdi->mdio_rootmount);
  745         mdi->mdio_rootmount = NULL;
  746 }
  747 
  748 static void
  749 g_raid_md_nvidia_new_disk(struct g_raid_disk *disk)
  750 {
  751         struct g_raid_softc *sc;
  752         struct g_raid_md_object *md;
  753         struct g_raid_md_nvidia_object *mdi;
  754         struct nvidia_raid_conf *pdmeta;
  755         struct g_raid_md_nvidia_perdisk *pd;
  756 
  757         sc = disk->d_softc;
  758         md = sc->sc_md;
  759         mdi = (struct g_raid_md_nvidia_object *)md;
  760         pd = (struct g_raid_md_nvidia_perdisk *)disk->d_md_data;
  761         pdmeta = pd->pd_meta;
  762 
  763         if (mdi->mdio_started) {
  764                 if (g_raid_md_nvidia_start_disk(disk))
  765                         g_raid_md_write_nvidia(md, NULL, NULL, NULL);
  766         } else {
  767                 if (mdi->mdio_meta == NULL ||
  768                     mdi->mdio_meta->disk_number >= mdi->mdio_meta->total_disks) {
  769                         G_RAID_DEBUG1(1, sc, "Newer disk");
  770                         if (mdi->mdio_meta != NULL)
  771                                 free(mdi->mdio_meta, M_MD_NVIDIA);
  772                         mdi->mdio_meta = nvidia_meta_copy(pdmeta);
  773                         mdi->mdio_total_disks = pdmeta->total_disks;
  774                         mdi->mdio_disks_present = 1;
  775                 } else if (pdmeta->disk_number < mdi->mdio_meta->total_disks) {
  776                         mdi->mdio_disks_present++;
  777                         G_RAID_DEBUG1(1, sc, "Matching disk (%d of %d up)",
  778                             mdi->mdio_disks_present,
  779                             mdi->mdio_total_disks);
  780                 } else
  781                         G_RAID_DEBUG1(1, sc, "Spare disk");
  782 
  783                 /* If we collected all needed disks - start array. */
  784                 if (mdi->mdio_disks_present == mdi->mdio_total_disks)
  785                         g_raid_md_nvidia_start(sc);
  786         }
  787 }
  788 
  789 static void
  790 g_raid_nvidia_go(void *arg)
  791 {
  792         struct g_raid_softc *sc;
  793         struct g_raid_md_object *md;
  794         struct g_raid_md_nvidia_object *mdi;
  795 
  796         sc = arg;
  797         md = sc->sc_md;
  798         mdi = (struct g_raid_md_nvidia_object *)md;
  799         if (!mdi->mdio_started) {
  800                 G_RAID_DEBUG1(0, sc, "Force array start due to timeout.");
  801                 g_raid_event_send(sc, G_RAID_NODE_E_START, 0);
  802         }
  803 }
  804 
  805 static int
  806 g_raid_md_create_nvidia(struct g_raid_md_object *md, struct g_class *mp,
  807     struct g_geom **gp)
  808 {
  809         struct g_raid_softc *sc;
  810         struct g_raid_md_nvidia_object *mdi;
  811         char name[32];
  812 
  813         mdi = (struct g_raid_md_nvidia_object *)md;
  814         arc4rand(&mdi->mdio_volume_id, 16, 0);
  815         snprintf(name, sizeof(name), "NVIDIA-%d",
  816             atomic_fetchadd_int(&NVIDIANodeID, 1));
  817         sc = g_raid_create_node(mp, name, md);
  818         if (sc == NULL)
  819                 return (G_RAID_MD_TASTE_FAIL);
  820         md->mdo_softc = sc;
  821         *gp = sc->sc_geom;
  822         return (G_RAID_MD_TASTE_NEW);
  823 }
  824 
  825 static int
  826 g_raid_md_taste_nvidia(struct g_raid_md_object *md, struct g_class *mp,
  827                               struct g_consumer *cp, struct g_geom **gp)
  828 {
  829         struct g_consumer *rcp;
  830         struct g_provider *pp;
  831         struct g_raid_md_nvidia_object *mdi, *mdi1;
  832         struct g_raid_softc *sc;
  833         struct g_raid_disk *disk;
  834         struct nvidia_raid_conf *meta;
  835         struct g_raid_md_nvidia_perdisk *pd;
  836         struct g_geom *geom;
  837         int result, spare, len;
  838         char name[32];
  839         uint16_t vendor;
  840 
  841         G_RAID_DEBUG(1, "Tasting NVIDIA on %s", cp->provider->name);
  842         mdi = (struct g_raid_md_nvidia_object *)md;
  843         pp = cp->provider;
  844 
  845         /* Read metadata from device. */
  846         meta = NULL;
  847         g_topology_unlock();
  848         vendor = 0xffff;
  849         len = sizeof(vendor);
  850         if (pp->geom->rank == 1)
  851                 g_io_getattr("GEOM::hba_vendor", cp, &len, &vendor);
  852         meta = nvidia_meta_read(cp);
  853         g_topology_lock();
  854         if (meta == NULL) {
  855                 if (g_raid_aggressive_spare) {
  856                         if (vendor == 0x10de) {
  857                                 G_RAID_DEBUG(1,
  858                                     "No NVIDIA metadata, forcing spare.");
  859                                 spare = 2;
  860                                 goto search;
  861                         } else {
  862                                 G_RAID_DEBUG(1,
  863                                     "NVIDIA vendor mismatch 0x%04x != 0x10de",
  864                                     vendor);
  865                         }
  866                 }
  867                 return (G_RAID_MD_TASTE_FAIL);
  868         }
  869 
  870         /* Metadata valid. Print it. */
  871         g_raid_md_nvidia_print(meta);
  872         G_RAID_DEBUG(1, "NVIDIA disk position %d", meta->disk_number);
  873         spare = 0;//(meta->type == NVIDIA_T_SPARE) ? 1 : 0;
  874 
  875 search:
  876         /* Search for matching node. */
  877         sc = NULL;
  878         mdi1 = NULL;
  879         LIST_FOREACH(geom, &mp->geom, geom) {
  880                 sc = geom->softc;
  881                 if (sc == NULL)
  882                         continue;
  883                 if (sc->sc_stopping != 0)
  884                         continue;
  885                 if (sc->sc_md->mdo_class != md->mdo_class)
  886                         continue;
  887                 mdi1 = (struct g_raid_md_nvidia_object *)sc->sc_md;
  888                 if (spare) {
  889                         if (mdi1->mdio_incomplete)
  890                                 break;
  891                 } else {
  892                         if (memcmp(&mdi1->mdio_volume_id,
  893                              &meta->volume_id, 16) == 0)
  894                                 break;
  895                 }
  896         }
  897 
  898         /* Found matching node. */
  899         if (geom != NULL) {
  900                 G_RAID_DEBUG(1, "Found matching array %s", sc->sc_name);
  901                 result = G_RAID_MD_TASTE_EXISTING;
  902 
  903         } else if (spare) { /* Not found needy node -- left for later. */
  904                 G_RAID_DEBUG(1, "Spare is not needed at this time");
  905                 goto fail1;
  906 
  907         } else { /* Not found matching node -- create one. */
  908                 result = G_RAID_MD_TASTE_NEW;
  909                 memcpy(&mdi->mdio_volume_id, &meta->volume_id, 16);
  910                 snprintf(name, sizeof(name), "NVIDIA-%d",
  911                     atomic_fetchadd_int(&NVIDIANodeID, 1));
  912                 sc = g_raid_create_node(mp, name, md);
  913                 md->mdo_softc = sc;
  914                 geom = sc->sc_geom;
  915                 callout_init(&mdi->mdio_start_co, 1);
  916                 callout_reset(&mdi->mdio_start_co, g_raid_start_timeout * hz,
  917                     g_raid_nvidia_go, sc);
  918                 mdi->mdio_rootmount = root_mount_hold("GRAID-NVIDIA");
  919                 G_RAID_DEBUG1(1, sc, "root_mount_hold %p", mdi->mdio_rootmount);
  920         }
  921 
  922         /* There is no return after this point, so we close passed consumer. */
  923         g_access(cp, -1, 0, 0);
  924 
  925         rcp = g_new_consumer(geom);
  926         rcp->flags |= G_CF_DIRECT_RECEIVE;
  927         g_attach(rcp, pp);
  928         if (g_access(rcp, 1, 1, 1) != 0)
  929                 ; //goto fail1;
  930 
  931         g_topology_unlock();
  932         sx_xlock(&sc->sc_lock);
  933 
  934         pd = malloc(sizeof(*pd), M_MD_NVIDIA, M_WAITOK | M_ZERO);
  935         pd->pd_meta = meta;
  936         if (spare == 2) {
  937                 pd->pd_disk_pos = -3;
  938         } else {
  939                 pd->pd_disk_pos = -1;
  940         }
  941         pd->pd_disk_size = pp->mediasize;
  942         disk = g_raid_create_disk(sc);
  943         disk->d_md_data = (void *)pd;
  944         disk->d_consumer = rcp;
  945         rcp->private = disk;
  946 
  947         g_raid_get_disk_info(disk);
  948 
  949         g_raid_md_nvidia_new_disk(disk);
  950 
  951         sx_xunlock(&sc->sc_lock);
  952         g_topology_lock();
  953         *gp = geom;
  954         return (result);
  955 fail1:
  956         free(meta, M_MD_NVIDIA);
  957         return (G_RAID_MD_TASTE_FAIL);
  958 }
  959 
  960 static int
  961 g_raid_md_event_nvidia(struct g_raid_md_object *md,
  962     struct g_raid_disk *disk, u_int event)
  963 {
  964         struct g_raid_softc *sc;
  965         struct g_raid_subdisk *sd;
  966         struct g_raid_md_nvidia_object *mdi;
  967         struct g_raid_md_nvidia_perdisk *pd;
  968 
  969         sc = md->mdo_softc;
  970         mdi = (struct g_raid_md_nvidia_object *)md;
  971         if (disk == NULL) {
  972                 switch (event) {
  973                 case G_RAID_NODE_E_START:
  974                         if (!mdi->mdio_started) {
  975                                 /* Bump volume ID to drop missing disks. */
  976                                 arc4rand(&mdi->mdio_volume_id, 16, 0);
  977                                 g_raid_md_nvidia_start(sc);
  978                         }
  979                         return (0);
  980                 }
  981                 return (-1);
  982         }
  983         pd = (struct g_raid_md_nvidia_perdisk *)disk->d_md_data;
  984         switch (event) {
  985         case G_RAID_DISK_E_DISCONNECTED:
  986                 /* If disk was assigned, just update statuses. */
  987                 if (pd->pd_disk_pos >= 0) {
  988                         g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE);
  989                         if (disk->d_consumer) {
  990                                 g_raid_kill_consumer(sc, disk->d_consumer);
  991                                 disk->d_consumer = NULL;
  992                         }
  993                         TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
  994                                 g_raid_change_subdisk_state(sd,
  995                                     G_RAID_SUBDISK_S_NONE);
  996                                 g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
  997                                     G_RAID_EVENT_SUBDISK);
  998                         }
  999                 } else {
 1000                         /* Otherwise -- delete. */
 1001                         g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE);
 1002                         g_raid_destroy_disk(disk);
 1003                 }
 1004 
 1005                 if (mdi->mdio_started) {
 1006                         /* Bump volume ID to prevent disk resurrection. */
 1007                         if (pd->pd_disk_pos >= 0)
 1008                                 arc4rand(&mdi->mdio_volume_id, 16, 0);
 1009 
 1010                         /* Write updated metadata to all disks. */
 1011                         g_raid_md_write_nvidia(md, NULL, NULL, NULL);
 1012                 }
 1013 
 1014                 /* Check if anything left except placeholders. */
 1015                 if (g_raid_ndisks(sc, -1) ==
 1016                     g_raid_ndisks(sc, G_RAID_DISK_S_OFFLINE))
 1017                         g_raid_destroy_node(sc, 0);
 1018                 else
 1019                         g_raid_md_nvidia_refill(sc);
 1020                 return (0);
 1021         }
 1022         return (-2);
 1023 }
 1024 
 1025 static int
 1026 g_raid_md_ctl_nvidia(struct g_raid_md_object *md,
 1027     struct gctl_req *req)
 1028 {
 1029         struct g_raid_softc *sc;
 1030         struct g_raid_volume *vol;
 1031         struct g_raid_subdisk *sd;
 1032         struct g_raid_disk *disk;
 1033         struct g_raid_md_nvidia_object *mdi;
 1034         struct g_raid_md_nvidia_perdisk *pd;
 1035         struct g_consumer *cp;
 1036         struct g_provider *pp;
 1037         char arg[16];
 1038         const char *verb, *volname, *levelname, *diskname;
 1039         int *nargs, *force;
 1040         off_t size, sectorsize, strip, volsize;
 1041         intmax_t *sizearg, *striparg;
 1042         int numdisks, i, len, level, qual, update;
 1043         int error;
 1044 
 1045         sc = md->mdo_softc;
 1046         mdi = (struct g_raid_md_nvidia_object *)md;
 1047         verb = gctl_get_param(req, "verb", NULL);
 1048         nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
 1049         error = 0;
 1050         if (strcmp(verb, "label") == 0) {
 1051                 if (*nargs < 4) {
 1052                         gctl_error(req, "Invalid number of arguments.");
 1053                         return (-1);
 1054                 }
 1055                 volname = gctl_get_asciiparam(req, "arg1");
 1056                 if (volname == NULL) {
 1057                         gctl_error(req, "No volume name.");
 1058                         return (-2);
 1059                 }
 1060                 levelname = gctl_get_asciiparam(req, "arg2");
 1061                 if (levelname == NULL) {
 1062                         gctl_error(req, "No RAID level.");
 1063                         return (-3);
 1064                 }
 1065                 if (strcasecmp(levelname, "RAID5") == 0)
 1066                         levelname = "RAID5-LS";
 1067                 if (g_raid_volume_str2level(levelname, &level, &qual)) {
 1068                         gctl_error(req, "Unknown RAID level '%s'.", levelname);
 1069                         return (-4);
 1070                 }
 1071                 numdisks = *nargs - 3;
 1072                 force = gctl_get_paraml(req, "force", sizeof(*force));
 1073                 if (!g_raid_md_nvidia_supported(level, qual, numdisks,
 1074                     force ? *force : 0)) {
 1075                         gctl_error(req, "Unsupported RAID level "
 1076                             "(0x%02x/0x%02x), or number of disks (%d).",
 1077                             level, qual, numdisks);
 1078                         return (-5);
 1079                 }
 1080 
 1081                 /* Search for disks, connect them and probe. */
 1082                 size = 0x7fffffffffffffffllu;
 1083                 sectorsize = 0;
 1084                 for (i = 0; i < numdisks; i++) {
 1085                         snprintf(arg, sizeof(arg), "arg%d", i + 3);
 1086                         diskname = gctl_get_asciiparam(req, arg);
 1087                         if (diskname == NULL) {
 1088                                 gctl_error(req, "No disk name (%s).", arg);
 1089                                 error = -6;
 1090                                 break;
 1091                         }
 1092                         if (strcmp(diskname, "NONE") == 0) {
 1093                                 cp = NULL;
 1094                                 pp = NULL;
 1095                         } else {
 1096                                 g_topology_lock();
 1097                                 cp = g_raid_open_consumer(sc, diskname);
 1098                                 if (cp == NULL) {
 1099                                         gctl_error(req, "Can't open '%s'.",
 1100                                             diskname);
 1101                                         g_topology_unlock();
 1102                                         error = -7;
 1103                                         break;
 1104                                 }
 1105                                 pp = cp->provider;
 1106                         }
 1107                         pd = malloc(sizeof(*pd), M_MD_NVIDIA, M_WAITOK | M_ZERO);
 1108                         pd->pd_disk_pos = i;
 1109                         disk = g_raid_create_disk(sc);
 1110                         disk->d_md_data = (void *)pd;
 1111                         disk->d_consumer = cp;
 1112                         if (cp == NULL)
 1113                                 continue;
 1114                         cp->private = disk;
 1115                         g_topology_unlock();
 1116 
 1117                         g_raid_get_disk_info(disk);
 1118 
 1119                         pd->pd_disk_size = pp->mediasize;
 1120                         if (size > pp->mediasize)
 1121                                 size = pp->mediasize;
 1122                         if (sectorsize < pp->sectorsize)
 1123                                 sectorsize = pp->sectorsize;
 1124                 }
 1125                 if (error != 0)
 1126                         return (error);
 1127 
 1128                 if (sectorsize <= 0) {
 1129                         gctl_error(req, "Can't get sector size.");
 1130                         return (-8);
 1131                 }
 1132 
 1133                 /* Reserve space for metadata. */
 1134                 size -= 2 * sectorsize;
 1135 
 1136                 /* Handle size argument. */
 1137                 len = sizeof(*sizearg);
 1138                 sizearg = gctl_get_param(req, "size", &len);
 1139                 if (sizearg != NULL && len == sizeof(*sizearg) &&
 1140                     *sizearg > 0) {
 1141                         if (*sizearg > size) {
 1142                                 gctl_error(req, "Size too big %lld > %lld.",
 1143                                     (long long)*sizearg, (long long)size);
 1144                                 return (-9);
 1145                         }
 1146                         size = *sizearg;
 1147                 }
 1148 
 1149                 /* Handle strip argument. */
 1150                 strip = 131072;
 1151                 len = sizeof(*striparg);
 1152                 striparg = gctl_get_param(req, "strip", &len);
 1153                 if (striparg != NULL && len == sizeof(*striparg) &&
 1154                     *striparg > 0) {
 1155                         if (*striparg < sectorsize) {
 1156                                 gctl_error(req, "Strip size too small.");
 1157                                 return (-10);
 1158                         }
 1159                         if (*striparg % sectorsize != 0) {
 1160                                 gctl_error(req, "Incorrect strip size.");
 1161                                 return (-11);
 1162                         }
 1163                         if (strip > 65535 * sectorsize) {
 1164                                 gctl_error(req, "Strip size too big.");
 1165                                 return (-12);
 1166                         }
 1167                         strip = *striparg;
 1168                 }
 1169 
 1170                 /* Round size down to strip or sector. */
 1171                 if (level == G_RAID_VOLUME_RL_RAID1)
 1172                         size -= (size % sectorsize);
 1173                 else if (level == G_RAID_VOLUME_RL_RAID1E &&
 1174                     (numdisks & 1) != 0)
 1175                         size -= (size % (2 * strip));
 1176                 else
 1177                         size -= (size % strip);
 1178                 if (size <= 0) {
 1179                         gctl_error(req, "Size too small.");
 1180                         return (-13);
 1181                 }
 1182 
 1183                 if (level == G_RAID_VOLUME_RL_RAID0 ||
 1184                     level == G_RAID_VOLUME_RL_CONCAT ||
 1185                     level == G_RAID_VOLUME_RL_SINGLE)
 1186                         volsize = size * numdisks;
 1187                 else if (level == G_RAID_VOLUME_RL_RAID1)
 1188                         volsize = size;
 1189                 else if (level == G_RAID_VOLUME_RL_RAID5)
 1190                         volsize = size * (numdisks - 1);
 1191                 else { /* RAID1E */
 1192                         volsize = ((size * numdisks) / strip / 2) *
 1193                             strip;
 1194                 }
 1195                 if (volsize > 0xffffffffllu * sectorsize) {
 1196                         gctl_error(req, "Size too big.");
 1197                         return (-14);
 1198                 }
 1199 
 1200                 /* We have all we need, create things: volume, ... */
 1201                 mdi->mdio_total_disks = numdisks;
 1202                 mdi->mdio_started = 1;
 1203                 vol = g_raid_create_volume(sc, volname, -1);
 1204                 vol->v_md_data = (void *)(intptr_t)0;
 1205                 vol->v_raid_level = level;
 1206                 vol->v_raid_level_qualifier = qual;
 1207                 vol->v_strip_size = strip;
 1208                 vol->v_disks_count = numdisks;
 1209                 vol->v_mediasize = volsize;
 1210                 vol->v_sectorsize = sectorsize;
 1211                 g_raid_start_volume(vol);
 1212 
 1213                 /* , and subdisks. */
 1214                 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 1215                         pd = (struct g_raid_md_nvidia_perdisk *)disk->d_md_data;
 1216                         sd = &vol->v_subdisks[pd->pd_disk_pos];
 1217                         sd->sd_disk = disk;
 1218                         sd->sd_offset = 0;
 1219                         sd->sd_size = size;
 1220                         TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
 1221                         if (sd->sd_disk->d_consumer != NULL) {
 1222                                 g_raid_change_disk_state(disk,
 1223                                     G_RAID_DISK_S_ACTIVE);
 1224                                 g_raid_change_subdisk_state(sd,
 1225                                     G_RAID_SUBDISK_S_ACTIVE);
 1226                                 g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
 1227                                     G_RAID_EVENT_SUBDISK);
 1228                         } else {
 1229                                 g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE);
 1230                         }
 1231                 }
 1232 
 1233                 /* Write metadata based on created entities. */
 1234                 G_RAID_DEBUG1(0, sc, "Array started.");
 1235                 g_raid_md_write_nvidia(md, NULL, NULL, NULL);
 1236 
 1237                 /* Pickup any STALE/SPARE disks to refill array if needed. */
 1238                 g_raid_md_nvidia_refill(sc);
 1239 
 1240                 g_raid_event_send(vol, G_RAID_VOLUME_E_START,
 1241                     G_RAID_EVENT_VOLUME);
 1242                 return (0);
 1243         }
 1244         if (strcmp(verb, "delete") == 0) {
 1245                 /* Check if some volume is still open. */
 1246                 force = gctl_get_paraml(req, "force", sizeof(*force));
 1247                 if (force != NULL && *force == 0 &&
 1248                     g_raid_nopens(sc) != 0) {
 1249                         gctl_error(req, "Some volume is still open.");
 1250                         return (-4);
 1251                 }
 1252 
 1253                 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 1254                         if (disk->d_consumer)
 1255                                 nvidia_meta_erase(disk->d_consumer);
 1256                 }
 1257                 g_raid_destroy_node(sc, 0);
 1258                 return (0);
 1259         }
 1260         if (strcmp(verb, "remove") == 0 ||
 1261             strcmp(verb, "fail") == 0) {
 1262                 if (*nargs < 2) {
 1263                         gctl_error(req, "Invalid number of arguments.");
 1264                         return (-1);
 1265                 }
 1266                 for (i = 1; i < *nargs; i++) {
 1267                         snprintf(arg, sizeof(arg), "arg%d", i);
 1268                         diskname = gctl_get_asciiparam(req, arg);
 1269                         if (diskname == NULL) {
 1270                                 gctl_error(req, "No disk name (%s).", arg);
 1271                                 error = -2;
 1272                                 break;
 1273                         }
 1274                         if (strncmp(diskname, _PATH_DEV, 5) == 0)
 1275                                 diskname += 5;
 1276 
 1277                         TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 1278                                 if (disk->d_consumer != NULL && 
 1279                                     disk->d_consumer->provider != NULL &&
 1280                                     strcmp(disk->d_consumer->provider->name,
 1281                                      diskname) == 0)
 1282                                         break;
 1283                         }
 1284                         if (disk == NULL) {
 1285                                 gctl_error(req, "Disk '%s' not found.",
 1286                                     diskname);
 1287                                 error = -3;
 1288                                 break;
 1289                         }
 1290 
 1291                         if (strcmp(verb, "fail") == 0) {
 1292                                 g_raid_md_fail_disk_nvidia(md, NULL, disk);
 1293                                 continue;
 1294                         }
 1295 
 1296                         pd = (struct g_raid_md_nvidia_perdisk *)disk->d_md_data;
 1297 
 1298                         /* Erase metadata on deleting disk. */
 1299                         nvidia_meta_erase(disk->d_consumer);
 1300 
 1301                         /* If disk was assigned, just update statuses. */
 1302                         if (pd->pd_disk_pos >= 0) {
 1303                                 g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE);
 1304                                 g_raid_kill_consumer(sc, disk->d_consumer);
 1305                                 disk->d_consumer = NULL;
 1306                                 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
 1307                                         g_raid_change_subdisk_state(sd,
 1308                                             G_RAID_SUBDISK_S_NONE);
 1309                                         g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
 1310                                             G_RAID_EVENT_SUBDISK);
 1311                                 }
 1312                         } else {
 1313                                 /* Otherwise -- delete. */
 1314                                 g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE);
 1315                                 g_raid_destroy_disk(disk);
 1316                         }
 1317                 }
 1318 
 1319                 /* Write updated metadata to remaining disks. */
 1320                 g_raid_md_write_nvidia(md, NULL, NULL, NULL);
 1321 
 1322                 /* Check if anything left except placeholders. */
 1323                 if (g_raid_ndisks(sc, -1) ==
 1324                     g_raid_ndisks(sc, G_RAID_DISK_S_OFFLINE))
 1325                         g_raid_destroy_node(sc, 0);
 1326                 else
 1327                         g_raid_md_nvidia_refill(sc);
 1328                 return (error);
 1329         }
 1330         if (strcmp(verb, "insert") == 0) {
 1331                 if (*nargs < 2) {
 1332                         gctl_error(req, "Invalid number of arguments.");
 1333                         return (-1);
 1334                 }
 1335                 update = 0;
 1336                 for (i = 1; i < *nargs; i++) {
 1337                         /* Get disk name. */
 1338                         snprintf(arg, sizeof(arg), "arg%d", i);
 1339                         diskname = gctl_get_asciiparam(req, arg);
 1340                         if (diskname == NULL) {
 1341                                 gctl_error(req, "No disk name (%s).", arg);
 1342                                 error = -3;
 1343                                 break;
 1344                         }
 1345 
 1346                         /* Try to find provider with specified name. */
 1347                         g_topology_lock();
 1348                         cp = g_raid_open_consumer(sc, diskname);
 1349                         if (cp == NULL) {
 1350                                 gctl_error(req, "Can't open disk '%s'.",
 1351                                     diskname);
 1352                                 g_topology_unlock();
 1353                                 error = -4;
 1354                                 break;
 1355                         }
 1356                         pp = cp->provider;
 1357 
 1358                         pd = malloc(sizeof(*pd), M_MD_NVIDIA, M_WAITOK | M_ZERO);
 1359                         pd->pd_disk_pos = -3;
 1360                         pd->pd_disk_size = pp->mediasize;
 1361 
 1362                         disk = g_raid_create_disk(sc);
 1363                         disk->d_consumer = cp;
 1364                         disk->d_md_data = (void *)pd;
 1365                         cp->private = disk;
 1366                         g_topology_unlock();
 1367 
 1368                         g_raid_get_disk_info(disk);
 1369 
 1370                         /* Welcome the "new" disk. */
 1371                         update += g_raid_md_nvidia_start_disk(disk);
 1372                         if (disk->d_state != G_RAID_DISK_S_SPARE &&
 1373                             disk->d_state != G_RAID_DISK_S_ACTIVE) {
 1374                                 gctl_error(req, "Disk '%s' doesn't fit.",
 1375                                     diskname);
 1376                                 g_raid_destroy_disk(disk);
 1377                                 error = -8;
 1378                                 break;
 1379                         }
 1380                 }
 1381 
 1382                 /* Write new metadata if we changed something. */
 1383                 if (update)
 1384                         g_raid_md_write_nvidia(md, NULL, NULL, NULL);
 1385                 return (error);
 1386         }
 1387         gctl_error(req, "Command '%s' is not supported.", verb);
 1388         return (-100);
 1389 }
 1390 
 1391 static int
 1392 g_raid_md_write_nvidia(struct g_raid_md_object *md, struct g_raid_volume *tvol,
 1393     struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
 1394 {
 1395         struct g_raid_softc *sc;
 1396         struct g_raid_volume *vol;
 1397         struct g_raid_subdisk *sd;
 1398         struct g_raid_disk *disk;
 1399         struct g_raid_md_nvidia_object *mdi;
 1400         struct g_raid_md_nvidia_perdisk *pd;
 1401         struct nvidia_raid_conf *meta;
 1402         int i, spares;
 1403 
 1404         sc = md->mdo_softc;
 1405         mdi = (struct g_raid_md_nvidia_object *)md;
 1406 
 1407         if (sc->sc_stopping == G_RAID_DESTROY_HARD)
 1408                 return (0);
 1409 
 1410         /* There is only one volume. */
 1411         vol = TAILQ_FIRST(&sc->sc_volumes);
 1412 
 1413         /* Fill global fields. */
 1414         meta = malloc(sizeof(*meta), M_MD_NVIDIA, M_WAITOK | M_ZERO);
 1415         if (mdi->mdio_meta)
 1416                 memcpy(meta, mdi->mdio_meta, sizeof(*meta));
 1417         memcpy(meta->nvidia_id, NVIDIA_MAGIC, sizeof(NVIDIA_MAGIC) - 1);
 1418         meta->config_size = 30;
 1419         meta->version = 0x0064;
 1420         meta->total_sectors = vol->v_mediasize / vol->v_sectorsize;
 1421         meta->sector_size = vol->v_sectorsize;
 1422         nvidia_meta_put_name(meta, vol->v_name);
 1423         meta->magic_0 = NVIDIA_MAGIC0;
 1424         memcpy(&meta->volume_id, &mdi->mdio_volume_id, 16);
 1425         meta->state = NVIDIA_S_IDLE;
 1426         if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1)
 1427                 meta->array_width = 1;
 1428         else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E)
 1429                 meta->array_width = vol->v_disks_count / 2;
 1430         else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID5)
 1431                 meta->array_width = vol->v_disks_count - 1;
 1432         else
 1433                 meta->array_width = vol->v_disks_count;
 1434         meta->total_disks = vol->v_disks_count;
 1435         meta->orig_array_width = meta->array_width;
 1436         if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID0)
 1437                 meta->type = NVIDIA_T_RAID0;
 1438         else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1)
 1439                 meta->type = NVIDIA_T_RAID1;
 1440         else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E)
 1441                 meta->type = NVIDIA_T_RAID01;
 1442         else if (vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT ||
 1443             vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE)
 1444                 meta->type = NVIDIA_T_CONCAT;
 1445         else if (vol->v_raid_level_qualifier == G_RAID_VOLUME_RLQ_R5LA)
 1446                 meta->type = NVIDIA_T_RAID5;
 1447         else
 1448                 meta->type = NVIDIA_T_RAID5_SYM;
 1449         meta->strip_sectors = vol->v_strip_size / vol->v_sectorsize;
 1450         meta->strip_bytes = vol->v_strip_size;
 1451         meta->strip_shift = ffs(meta->strip_sectors) - 1;
 1452         meta->strip_mask = meta->strip_sectors - 1;
 1453         meta->stripe_sectors = meta->strip_sectors * meta->orig_array_width;
 1454         meta->stripe_bytes = meta->stripe_sectors * vol->v_sectorsize;
 1455         meta->rebuild_lba = 0;
 1456         meta->orig_type = meta->type;
 1457         meta->orig_total_sectors = meta->total_sectors;
 1458         meta->status = 0;
 1459 
 1460         for (i = 0; i < vol->v_disks_count; i++) {
 1461                 sd = &vol->v_subdisks[i];
 1462                 if ((sd->sd_state == G_RAID_SUBDISK_S_STALE ||
 1463                      sd->sd_state == G_RAID_SUBDISK_S_RESYNC ||
 1464                      vol->v_dirty) &&
 1465                      meta->state != NVIDIA_S_REBUILD)
 1466                         meta->state = NVIDIA_S_SYNC;
 1467                 else if (sd->sd_state == G_RAID_SUBDISK_S_NEW ||
 1468                      sd->sd_state == G_RAID_SUBDISK_S_REBUILD)
 1469                         meta->state = NVIDIA_S_REBUILD;
 1470         }
 1471 
 1472         /* We are done. Print meta data and store them to disks. */
 1473         if (mdi->mdio_meta != NULL)
 1474                 free(mdi->mdio_meta, M_MD_NVIDIA);
 1475         mdi->mdio_meta = meta;
 1476         spares = 0;
 1477         TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
 1478                 pd = (struct g_raid_md_nvidia_perdisk *)disk->d_md_data;
 1479                 if (disk->d_state != G_RAID_DISK_S_ACTIVE &&
 1480                     disk->d_state != G_RAID_DISK_S_SPARE)
 1481                         continue;
 1482                 if (pd->pd_meta != NULL) {
 1483                         free(pd->pd_meta, M_MD_NVIDIA);
 1484                         pd->pd_meta = NULL;
 1485                 }
 1486                 pd->pd_meta = nvidia_meta_copy(meta);
 1487                 if ((sd = TAILQ_FIRST(&disk->d_subdisks)) != NULL) {
 1488                         /* For RAID0+1 we need to translate order. */
 1489                         pd->pd_meta->disk_number =
 1490                             nvidia_meta_translate_disk(meta, sd->sd_pos);
 1491                         if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE) {
 1492                                 pd->pd_meta->disk_status = 0x100;
 1493                                 pd->pd_meta->rebuild_lba =
 1494                                     sd->sd_rebuild_pos / vol->v_sectorsize *
 1495                                     meta->array_width;
 1496                         }
 1497                 } else
 1498                         pd->pd_meta->disk_number = meta->total_disks + spares++;
 1499                 G_RAID_DEBUG(1, "Writing NVIDIA metadata to %s",
 1500                     g_raid_get_diskname(disk));
 1501                 g_raid_md_nvidia_print(pd->pd_meta);
 1502                 nvidia_meta_write(disk->d_consumer, pd->pd_meta);
 1503         }
 1504         return (0);
 1505 }
 1506 
 1507 static int
 1508 g_raid_md_fail_disk_nvidia(struct g_raid_md_object *md,
 1509     struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
 1510 {
 1511         struct g_raid_softc *sc;
 1512         struct g_raid_md_nvidia_perdisk *pd;
 1513         struct g_raid_subdisk *sd;
 1514 
 1515         sc = md->mdo_softc;
 1516         pd = (struct g_raid_md_nvidia_perdisk *)tdisk->d_md_data;
 1517 
 1518         /* We can't fail disk that is not a part of array now. */
 1519         if (pd->pd_disk_pos < 0)
 1520                 return (-1);
 1521 
 1522         /* Erase metadata to prevent disks's later resurrection. */
 1523         if (tdisk->d_consumer)
 1524                 nvidia_meta_erase(tdisk->d_consumer);
 1525 
 1526         /* Change states. */
 1527         g_raid_change_disk_state(tdisk, G_RAID_DISK_S_FAILED);
 1528         TAILQ_FOREACH(sd, &tdisk->d_subdisks, sd_next) {
 1529                 g_raid_change_subdisk_state(sd,
 1530                     G_RAID_SUBDISK_S_FAILED);
 1531                 g_raid_event_send(sd, G_RAID_SUBDISK_E_FAILED,
 1532                     G_RAID_EVENT_SUBDISK);
 1533         }
 1534 
 1535         /* Write updated metadata to remaining disks. */
 1536         g_raid_md_write_nvidia(md, NULL, NULL, tdisk);
 1537 
 1538         /* Check if anything left except placeholders. */
 1539         if (g_raid_ndisks(sc, -1) ==
 1540             g_raid_ndisks(sc, G_RAID_DISK_S_OFFLINE))
 1541                 g_raid_destroy_node(sc, 0);
 1542         else
 1543                 g_raid_md_nvidia_refill(sc);
 1544         return (0);
 1545 }
 1546 
 1547 static int
 1548 g_raid_md_free_disk_nvidia(struct g_raid_md_object *md,
 1549     struct g_raid_disk *disk)
 1550 {
 1551         struct g_raid_md_nvidia_perdisk *pd;
 1552 
 1553         pd = (struct g_raid_md_nvidia_perdisk *)disk->d_md_data;
 1554         if (pd->pd_meta != NULL) {
 1555                 free(pd->pd_meta, M_MD_NVIDIA);
 1556                 pd->pd_meta = NULL;
 1557         }
 1558         free(pd, M_MD_NVIDIA);
 1559         disk->d_md_data = NULL;
 1560         return (0);
 1561 }
 1562 
 1563 static int
 1564 g_raid_md_free_nvidia(struct g_raid_md_object *md)
 1565 {
 1566         struct g_raid_md_nvidia_object *mdi;
 1567 
 1568         mdi = (struct g_raid_md_nvidia_object *)md;
 1569         if (!mdi->mdio_started) {
 1570                 mdi->mdio_started = 0;
 1571                 callout_stop(&mdi->mdio_start_co);
 1572                 G_RAID_DEBUG1(1, md->mdo_softc,
 1573                     "root_mount_rel %p", mdi->mdio_rootmount);
 1574                 root_mount_rel(mdi->mdio_rootmount);
 1575                 mdi->mdio_rootmount = NULL;
 1576         }
 1577         if (mdi->mdio_meta != NULL) {
 1578                 free(mdi->mdio_meta, M_MD_NVIDIA);
 1579                 mdi->mdio_meta = NULL;
 1580         }
 1581         return (0);
 1582 }
 1583 
 1584 G_RAID_MD_DECLARE(nvidia, "NVIDIA");

Cache object: 41fff91677bdf52cee7bb07a74089eef


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.