The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/nvme/nvme_ns.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (C) 2012-2013 Intel Corporation
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include <sys/param.h>
   33 #include <sys/bio.h>
   34 #include <sys/bus.h>
   35 #include <sys/conf.h>
   36 #include <sys/disk.h>
   37 #include <sys/fcntl.h>
   38 #include <sys/ioccom.h>
   39 #include <sys/malloc.h>
   40 #include <sys/module.h>
   41 #include <sys/proc.h>
   42 #include <sys/systm.h>
   43 
   44 #include <dev/pci/pcivar.h>
   45 
   46 #include <geom/geom.h>
   47 
   48 #include "nvme_private.h"
   49 
   50 static void             nvme_bio_child_inbed(struct bio *parent, int bio_error);
   51 static void             nvme_bio_child_done(void *arg,
   52                                             const struct nvme_completion *cpl);
   53 static uint32_t         nvme_get_num_segments(uint64_t addr, uint64_t size,
   54                                               uint32_t alignment);
   55 static void             nvme_free_child_bios(int num_bios,
   56                                              struct bio **child_bios);
   57 static struct bio **    nvme_allocate_child_bios(int num_bios);
   58 static struct bio **    nvme_construct_child_bios(struct bio *bp,
   59                                                   uint32_t alignment,
   60                                                   int *num_bios);
   61 static int              nvme_ns_split_bio(struct nvme_namespace *ns,
   62                                           struct bio *bp,
   63                                           uint32_t alignment);
   64 
   65 static int
   66 nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
   67     struct thread *td)
   68 {
   69         struct nvme_namespace                   *ns;
   70         struct nvme_controller                  *ctrlr;
   71         struct nvme_pt_command                  *pt;
   72 
   73         ns = cdev->si_drv1;
   74         ctrlr = ns->ctrlr;
   75 
   76         switch (cmd) {
   77         case NVME_IO_TEST:
   78         case NVME_BIO_TEST:
   79                 nvme_ns_test(ns, cmd, arg);
   80                 break;
   81         case NVME_PASSTHROUGH_CMD:
   82                 pt = (struct nvme_pt_command *)arg;
   83                 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id, 
   84                     1 /* is_user_buffer */, 0 /* is_admin_cmd */));
   85         case NVME_GET_NSID:
   86         {
   87                 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
   88                 strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
   89                     sizeof(gnsid->cdev));
   90                 gnsid->cdev[sizeof(gnsid->cdev) - 1] = '\0';
   91                 gnsid->nsid = ns->id;
   92                 break;
   93         }
   94         case DIOCGMEDIASIZE:
   95                 *(off_t *)arg = (off_t)nvme_ns_get_size(ns);
   96                 break;
   97         case DIOCGSECTORSIZE:
   98                 *(u_int *)arg = nvme_ns_get_sector_size(ns);
   99                 break;
  100         default:
  101                 return (ENOTTY);
  102         }
  103 
  104         return (0);
  105 }
  106 
  107 static int
  108 nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused,
  109     struct thread *td)
  110 {
  111         int error = 0;
  112 
  113         if (flags & FWRITE)
  114                 error = securelevel_gt(td->td_ucred, 0);
  115 
  116         return (error);
  117 }
  118 
  119 static int
  120 nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
  121     struct thread *td)
  122 {
  123 
  124         return (0);
  125 }
  126 
  127 static void
  128 nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
  129 {
  130         struct bio *bp = arg;
  131 
  132         /*
  133          * TODO: add more extensive translation of NVMe status codes
  134          *  to different bio error codes (i.e. EIO, EINVAL, etc.)
  135          */
  136         if (nvme_completion_is_error(cpl)) {
  137                 bp->bio_error = EIO;
  138                 bp->bio_flags |= BIO_ERROR;
  139                 bp->bio_resid = bp->bio_bcount;
  140         } else
  141                 bp->bio_resid = 0;
  142 
  143         biodone(bp);
  144 }
  145 
  146 static void
  147 nvme_ns_strategy(struct bio *bp)
  148 {
  149         struct nvme_namespace   *ns;
  150         int                     err;
  151 
  152         ns = bp->bio_dev->si_drv1;
  153         err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done);
  154 
  155         if (err) {
  156                 bp->bio_error = err;
  157                 bp->bio_flags |= BIO_ERROR;
  158                 bp->bio_resid = bp->bio_bcount;
  159                 biodone(bp);
  160         }
  161 
  162 }
  163 
  164 static struct cdevsw nvme_ns_cdevsw = {
  165         .d_version =    D_VERSION,
  166         .d_flags =      D_DISK,
  167         .d_read =       physread,
  168         .d_write =      physwrite,
  169         .d_open =       nvme_ns_open,
  170         .d_close =      nvme_ns_close,
  171         .d_strategy =   nvme_ns_strategy,
  172         .d_ioctl =      nvme_ns_ioctl
  173 };
  174 
  175 uint32_t
  176 nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
  177 {
  178         return ns->ctrlr->max_xfer_size;
  179 }
  180 
  181 uint32_t
  182 nvme_ns_get_sector_size(struct nvme_namespace *ns)
  183 {
  184         uint8_t flbas_fmt, lbads;
  185 
  186         flbas_fmt = (ns->data.flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) &
  187                 NVME_NS_DATA_FLBAS_FORMAT_MASK;
  188         lbads = (ns->data.lbaf[flbas_fmt] >> NVME_NS_DATA_LBAF_LBADS_SHIFT) &
  189                 NVME_NS_DATA_LBAF_LBADS_MASK;
  190 
  191         return (1 << lbads);
  192 }
  193 
  194 uint64_t
  195 nvme_ns_get_num_sectors(struct nvme_namespace *ns)
  196 {
  197         return (ns->data.nsze);
  198 }
  199 
  200 uint64_t
  201 nvme_ns_get_size(struct nvme_namespace *ns)
  202 {
  203         return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns));
  204 }
  205 
  206 uint32_t
  207 nvme_ns_get_flags(struct nvme_namespace *ns)
  208 {
  209         return (ns->flags);
  210 }
  211 
  212 const char *
  213 nvme_ns_get_serial_number(struct nvme_namespace *ns)
  214 {
  215         return ((const char *)ns->ctrlr->cdata.sn);
  216 }
  217 
  218 const char *
  219 nvme_ns_get_model_number(struct nvme_namespace *ns)
  220 {
  221         return ((const char *)ns->ctrlr->cdata.mn);
  222 }
  223 
  224 const struct nvme_namespace_data *
  225 nvme_ns_get_data(struct nvme_namespace *ns)
  226 {
  227 
  228         return (&ns->data);
  229 }
  230 
  231 uint32_t
  232 nvme_ns_get_stripesize(struct nvme_namespace *ns)
  233 {
  234         uint32_t ss;
  235 
  236         if (((ns->data.nsfeat >> NVME_NS_DATA_NSFEAT_NPVALID_SHIFT) &
  237             NVME_NS_DATA_NSFEAT_NPVALID_MASK) != 0) {
  238                 ss = nvme_ns_get_sector_size(ns);
  239                 if (ns->data.npwa != 0)
  240                         return ((ns->data.npwa + 1) * ss);
  241                 else if (ns->data.npwg != 0)
  242                         return ((ns->data.npwg + 1) * ss);
  243         }
  244         return (ns->boundary);
  245 }
  246 
  247 static void
  248 nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
  249 {
  250         struct bio      *bp = arg;
  251         nvme_cb_fn_t    bp_cb_fn;
  252 
  253         bp_cb_fn = bp->bio_driver1;
  254 
  255         if (bp->bio_driver2)
  256                 free(bp->bio_driver2, M_NVME);
  257 
  258         if (nvme_completion_is_error(status)) {
  259                 bp->bio_flags |= BIO_ERROR;
  260                 if (bp->bio_error == 0)
  261                         bp->bio_error = EIO;
  262         }
  263 
  264         if ((bp->bio_flags & BIO_ERROR) == 0)
  265                 bp->bio_resid = 0;
  266         else
  267                 bp->bio_resid = bp->bio_bcount;
  268 
  269         bp_cb_fn(bp, status);
  270 }
  271 
  272 static void
  273 nvme_bio_child_inbed(struct bio *parent, int bio_error)
  274 {
  275         struct nvme_completion  parent_cpl;
  276         int                     children, inbed;
  277 
  278         if (bio_error != 0) {
  279                 parent->bio_flags |= BIO_ERROR;
  280                 parent->bio_error = bio_error;
  281         }
  282 
  283         /*
  284          * atomic_fetchadd will return value before adding 1, so we still
  285          *  must add 1 to get the updated inbed number.  Save bio_children
  286          *  before incrementing to guard against race conditions when
  287          *  two children bios complete on different queues.
  288          */
  289         children = atomic_load_acq_int(&parent->bio_children);
  290         inbed = atomic_fetchadd_int(&parent->bio_inbed, 1) + 1;
  291         if (inbed == children) {
  292                 bzero(&parent_cpl, sizeof(parent_cpl));
  293                 if (parent->bio_flags & BIO_ERROR) {
  294                         parent_cpl.status &= ~(NVME_STATUS_SC_MASK << NVME_STATUS_SC_SHIFT);
  295                         parent_cpl.status |= (NVME_SC_DATA_TRANSFER_ERROR) << NVME_STATUS_SC_SHIFT;
  296                 }
  297                 nvme_ns_bio_done(parent, &parent_cpl);
  298         }
  299 }
  300 
  301 static void
  302 nvme_bio_child_done(void *arg, const struct nvme_completion *cpl)
  303 {
  304         struct bio              *child = arg;
  305         struct bio              *parent;
  306         int                     bio_error;
  307 
  308         parent = child->bio_parent;
  309         g_destroy_bio(child);
  310         bio_error = nvme_completion_is_error(cpl) ? EIO : 0;
  311         nvme_bio_child_inbed(parent, bio_error);
  312 }
  313 
  314 static uint32_t
  315 nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t align)
  316 {
  317         uint32_t        num_segs, offset, remainder;
  318 
  319         if (align == 0)
  320                 return (1);
  321 
  322         KASSERT((align & (align - 1)) == 0, ("alignment not power of 2\n"));
  323 
  324         num_segs = size / align;
  325         remainder = size & (align - 1);
  326         offset = addr & (align - 1);
  327         if (remainder > 0 || offset > 0)
  328                 num_segs += 1 + (remainder + offset - 1) / align;
  329         return (num_segs);
  330 }
  331 
  332 static void
  333 nvme_free_child_bios(int num_bios, struct bio **child_bios)
  334 {
  335         int i;
  336 
  337         for (i = 0; i < num_bios; i++) {
  338                 if (child_bios[i] != NULL)
  339                         g_destroy_bio(child_bios[i]);
  340         }
  341 
  342         free(child_bios, M_NVME);
  343 }
  344 
  345 static struct bio **
  346 nvme_allocate_child_bios(int num_bios)
  347 {
  348         struct bio **child_bios;
  349         int err = 0, i;
  350 
  351         child_bios = malloc(num_bios * sizeof(struct bio *), M_NVME, M_NOWAIT);
  352         if (child_bios == NULL)
  353                 return (NULL);
  354 
  355         for (i = 0; i < num_bios; i++) {
  356                 child_bios[i] = g_new_bio();
  357                 if (child_bios[i] == NULL)
  358                         err = ENOMEM;
  359         }
  360 
  361         if (err == ENOMEM) {
  362                 nvme_free_child_bios(num_bios, child_bios);
  363                 return (NULL);
  364         }
  365 
  366         return (child_bios);
  367 }
  368 
  369 static struct bio **
  370 nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios)
  371 {
  372         struct bio      **child_bios;
  373         struct bio      *child;
  374         uint64_t        cur_offset;
  375         caddr_t         data;
  376         uint32_t        rem_bcount;
  377         int             i;
  378         struct vm_page  **ma;
  379         uint32_t        ma_offset;
  380 
  381         *num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount,
  382             alignment);
  383         child_bios = nvme_allocate_child_bios(*num_bios);
  384         if (child_bios == NULL)
  385                 return (NULL);
  386 
  387         bp->bio_children = *num_bios;
  388         bp->bio_inbed = 0;
  389         cur_offset = bp->bio_offset;
  390         rem_bcount = bp->bio_bcount;
  391         data = bp->bio_data;
  392         ma_offset = bp->bio_ma_offset;
  393         ma = bp->bio_ma;
  394 
  395         for (i = 0; i < *num_bios; i++) {
  396                 child = child_bios[i];
  397                 child->bio_parent = bp;
  398                 child->bio_cmd = bp->bio_cmd;
  399                 child->bio_offset = cur_offset;
  400                 child->bio_bcount = min(rem_bcount,
  401                     alignment - (cur_offset & (alignment - 1)));
  402                 child->bio_flags = bp->bio_flags;
  403                 if (bp->bio_flags & BIO_UNMAPPED) {
  404                         child->bio_ma_offset = ma_offset;
  405                         child->bio_ma = ma;
  406                         child->bio_ma_n =
  407                             nvme_get_num_segments(child->bio_ma_offset,
  408                                 child->bio_bcount, PAGE_SIZE);
  409                         ma_offset = (ma_offset + child->bio_bcount) &
  410                             PAGE_MASK;
  411                         ma += child->bio_ma_n;
  412                         if (ma_offset != 0)
  413                                 ma -= 1;
  414                 } else {
  415                         child->bio_data = data;
  416                         data += child->bio_bcount;
  417                 }
  418                 cur_offset += child->bio_bcount;
  419                 rem_bcount -= child->bio_bcount;
  420         }
  421 
  422         return (child_bios);
  423 }
  424 
  425 static int
  426 nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp,
  427     uint32_t alignment)
  428 {
  429         struct bio      *child;
  430         struct bio      **child_bios;
  431         int             err, i, num_bios;
  432 
  433         child_bios = nvme_construct_child_bios(bp, alignment, &num_bios);
  434         if (child_bios == NULL)
  435                 return (ENOMEM);
  436 
  437         for (i = 0; i < num_bios; i++) {
  438                 child = child_bios[i];
  439                 err = nvme_ns_bio_process(ns, child, nvme_bio_child_done);
  440                 if (err != 0) {
  441                         nvme_bio_child_inbed(bp, err);
  442                         g_destroy_bio(child);
  443                 }
  444         }
  445 
  446         free(child_bios, M_NVME);
  447         return (0);
  448 }
  449 
  450 int
  451 nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
  452         nvme_cb_fn_t cb_fn)
  453 {
  454         struct nvme_dsm_range   *dsm_range;
  455         uint32_t                num_bios;
  456         int                     err;
  457 
  458         bp->bio_driver1 = cb_fn;
  459 
  460         if (ns->boundary > 0 &&
  461             (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
  462                 num_bios = nvme_get_num_segments(bp->bio_offset,
  463                     bp->bio_bcount, ns->boundary);
  464                 if (num_bios > 1)
  465                         return (nvme_ns_split_bio(ns, bp, ns->boundary));
  466         }
  467 
  468         switch (bp->bio_cmd) {
  469         case BIO_READ:
  470                 err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp);
  471                 break;
  472         case BIO_WRITE:
  473                 err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp);
  474                 break;
  475         case BIO_FLUSH:
  476                 err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
  477                 break;
  478         case BIO_DELETE:
  479                 dsm_range =
  480                     malloc(sizeof(struct nvme_dsm_range), M_NVME,
  481                     M_ZERO | M_NOWAIT);
  482                 if (!dsm_range) {
  483                         err = ENOMEM;
  484                         break;
  485                 }
  486                 dsm_range->length =
  487                     htole32(bp->bio_bcount/nvme_ns_get_sector_size(ns));
  488                 dsm_range->starting_lba =
  489                     htole64(bp->bio_offset/nvme_ns_get_sector_size(ns));
  490                 bp->bio_driver2 = dsm_range;
  491                 err = nvme_ns_cmd_deallocate(ns, dsm_range, 1,
  492                         nvme_ns_bio_done, bp);
  493                 if (err != 0)
  494                         free(dsm_range, M_NVME);
  495                 break;
  496         default:
  497                 err = EOPNOTSUPP;
  498                 break;
  499         }
  500 
  501         return (err);
  502 }
  503 
  504 int
  505 nvme_ns_ioctl_process(struct nvme_namespace *ns, u_long cmd, caddr_t arg,
  506     int flag, struct thread *td)
  507 {
  508         return (nvme_ns_ioctl(ns->cdev, cmd, arg, flag, td));
  509 }
  510 
  511 int
  512 nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
  513     struct nvme_controller *ctrlr)
  514 {
  515         struct make_dev_args                    md_args;
  516         struct nvme_completion_poll_status      status;
  517         int                                     res;
  518         int                                     unit;
  519         uint8_t                                 flbas_fmt;
  520         uint8_t                                 vwc_present;
  521 
  522         ns->ctrlr = ctrlr;
  523         ns->id = id;
  524 
  525         /*
  526          * Namespaces are reconstructed after a controller reset, so check
  527          *  to make sure we only call mtx_init once on each mtx.
  528          *
  529          * TODO: Move this somewhere where it gets called at controller
  530          *  construction time, which is not invoked as part of each
  531          *  controller reset.
  532          */
  533         if (!mtx_initialized(&ns->lock))
  534                 mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF);
  535 
  536         status.done = 0;
  537         nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
  538             nvme_completion_poll_cb, &status);
  539         nvme_completion_poll(&status);
  540         if (nvme_completion_is_error(&status.cpl)) {
  541                 nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
  542                 return (ENXIO);
  543         }
  544 
  545         /* Convert data to host endian */
  546         nvme_namespace_data_swapbytes(&ns->data);
  547 
  548         /*
  549          * If the size of is zero, chances are this isn't a valid
  550          * namespace (eg one that's not been configured yet). The
  551          * standard says the entire id will be zeros, so this is a
  552          * cheap way to test for that.
  553          */
  554         if (ns->data.nsze == 0)
  555                 return (ENXIO);
  556 
  557         flbas_fmt = (ns->data.flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) &
  558                 NVME_NS_DATA_FLBAS_FORMAT_MASK;
  559         /*
  560          * Note: format is a 0-based value, so > is appropriate here,
  561          *  not >=.
  562          */
  563         if (flbas_fmt > ns->data.nlbaf) {
  564                 printf("lba format %d exceeds number supported (%d)\n",
  565                     flbas_fmt, ns->data.nlbaf + 1);
  566                 return (ENXIO);
  567         }
  568 
  569         /*
  570          * Older Intel devices advertise in vendor specific space an alignment
  571          * that improves performance.  If present use for the stripe size.  NVMe
  572          * 1.3 standardized this as NOIOB, and newer Intel drives use that.
  573          */
  574         switch (pci_get_devid(ctrlr->dev)) {
  575         case 0x09538086:                /* Intel DC PC3500 */
  576         case 0x0a538086:                /* Intel DC PC3520 */
  577         case 0x0a548086:                /* Intel DC PC4500 */
  578         case 0x0a558086:                /* Dell Intel P4600 */
  579                 if (ctrlr->cdata.vs[3] != 0)
  580                         ns->boundary =
  581                             (1 << ctrlr->cdata.vs[3]) * ctrlr->min_page_size;
  582                 else
  583                         ns->boundary = 0;
  584                 break;
  585         default:
  586                 ns->boundary = ns->data.noiob * nvme_ns_get_sector_size(ns);
  587                 break;
  588         }
  589 
  590         if (nvme_ctrlr_has_dataset_mgmt(&ctrlr->cdata))
  591                 ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
  592 
  593         vwc_present = (ctrlr->cdata.vwc >> NVME_CTRLR_DATA_VWC_PRESENT_SHIFT) &
  594                 NVME_CTRLR_DATA_VWC_PRESENT_MASK;
  595         if (vwc_present)
  596                 ns->flags |= NVME_NS_FLUSH_SUPPORTED;
  597 
  598         /*
  599          * cdev may have already been created, if we are reconstructing the
  600          *  namespace after a controller-level reset.
  601          */
  602         if (ns->cdev != NULL)
  603                 return (0);
  604 
  605         /*
  606          * Namespace IDs start at 1, so we need to subtract 1 to create a
  607          *  correct unit number.
  608          */
  609         unit = device_get_unit(ctrlr->dev) * NVME_MAX_NAMESPACES + ns->id - 1;
  610 
  611         make_dev_args_init(&md_args);
  612         md_args.mda_devsw = &nvme_ns_cdevsw;
  613         md_args.mda_unit = unit;
  614         md_args.mda_mode = 0600;
  615         md_args.mda_si_drv1 = ns;
  616         res = make_dev_s(&md_args, &ns->cdev, "nvme%dns%d",
  617             device_get_unit(ctrlr->dev), ns->id);
  618         if (res != 0)
  619                 return (ENXIO);
  620 
  621         ns->cdev->si_flags |= SI_UNMAPPED;
  622 
  623         return (0);
  624 }
  625 
  626 void
  627 nvme_ns_destruct(struct nvme_namespace *ns)
  628 {
  629 
  630         if (ns->cdev != NULL)
  631                 destroy_dev(ns->cdev);
  632 }

Cache object: 3e8568503ddeb55bc2a279812d17a543


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.