The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/scsi/sd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: sd.c,v 1.333 2022/10/23 14:39:19 krw Exp $    */
    2 /*      $NetBSD: sd.c,v 1.111 1997/04/02 02:29:41 mycroft Exp $ */
    3 
    4 /*-
    5  * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
    6  * All rights reserved.
    7  *
    8  * This code is derived from software contributed to The NetBSD Foundation
    9  * by Charles M. Hannum.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30  * POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 /*
   34  * Originally written by Julian Elischer (julian@dialix.oz.au)
   35  * for TRW Financial Systems for use under the MACH(2.5) operating system.
   36  *
   37  * TRW Financial Systems, in accordance with their agreement with Carnegie
   38  * Mellon University, makes this software available to CMU to distribute
   39  * or use in any manner that they see fit as long as this message is kept with
   40  * the software. For this reason TFS also grants any other persons or
   41  * organisations permission to use or modify this software.
   42  *
   43  * TFS supplies this software to be publicly redistributed
   44  * on the understanding that TFS is not responsible for the correct
   45  * functioning of this software in any circumstances.
   46  *
   47  * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
   48  */
   49 
   50 #include <sys/stdint.h>
   51 #include <sys/param.h>
   52 #include <sys/systm.h>
   53 #include <sys/timeout.h>
   54 #include <sys/fcntl.h>
   55 #include <sys/stat.h>
   56 #include <sys/ioctl.h>
   57 #include <sys/mtio.h>
   58 #include <sys/mutex.h>
   59 #include <sys/buf.h>
   60 #include <sys/uio.h>
   61 #include <sys/malloc.h>
   62 #include <sys/pool.h>
   63 #include <sys/errno.h>
   64 #include <sys/device.h>
   65 #include <sys/disklabel.h>
   66 #include <sys/disk.h>
   67 #include <sys/conf.h>
   68 #include <sys/scsiio.h>
   69 #include <sys/dkio.h>
   70 #include <sys/reboot.h>
   71 
   72 #include <scsi/scsi_all.h>
   73 #include <scsi/scsi_debug.h>
   74 #include <scsi/scsi_disk.h>
   75 #include <scsi/scsiconf.h>
   76 #include <scsi/sdvar.h>
   77 
   78 #include <ufs/ffs/fs.h>                 /* for BBSIZE and SBSIZE */
   79 
   80 #include <sys/vnode.h>
   81 
   82 int     sdmatch(struct device *, void *, void *);
   83 void    sdattach(struct device *, struct device *, void *);
   84 int     sdactivate(struct device *, int);
   85 int     sddetach(struct device *, int);
   86 
   87 void    sdminphys(struct buf *);
   88 int     sdgetdisklabel(dev_t, struct sd_softc *, struct disklabel *, int);
   89 void    sdstart(struct scsi_xfer *);
   90 int     sd_interpret_sense(struct scsi_xfer *);
   91 int     sd_read_cap_10(struct sd_softc *, int);
   92 int     sd_read_cap_16(struct sd_softc *, int);
   93 int     sd_read_cap(struct sd_softc *, int);
   94 int     sd_thin_pages(struct sd_softc *, int);
   95 int     sd_vpd_block_limits(struct sd_softc *, int);
   96 int     sd_vpd_thin(struct sd_softc *, int);
   97 int     sd_thin_params(struct sd_softc *, int);
   98 int     sd_get_parms(struct sd_softc *, int);
   99 int     sd_flush(struct sd_softc *, int);
  100 
  101 void    viscpy(u_char *, u_char *, int);
  102 
  103 int     sd_ioctl_inquiry(struct sd_softc *, struct dk_inquiry *);
  104 int     sd_ioctl_cache(struct sd_softc *, long, struct dk_cache *);
  105 
  106 int     sd_cmd_rw6(struct scsi_generic *, int, u_int64_t, u_int32_t);
  107 int     sd_cmd_rw10(struct scsi_generic *, int, u_int64_t, u_int32_t);
  108 int     sd_cmd_rw12(struct scsi_generic *, int, u_int64_t, u_int32_t);
  109 int     sd_cmd_rw16(struct scsi_generic *, int, u_int64_t, u_int32_t);
  110 
  111 void    sd_buf_done(struct scsi_xfer *);
  112 
  113 const struct cfattach sd_ca = {
  114         sizeof(struct sd_softc), sdmatch, sdattach,
  115         sddetach, sdactivate
  116 };
  117 
  118 struct cfdriver sd_cd = {
  119         NULL, "sd", DV_DISK
  120 };
  121 
  122 const struct scsi_inquiry_pattern sd_patterns[] = {
  123         {T_DIRECT, T_FIXED,
  124          "",         "",                 ""},
  125         {T_DIRECT, T_REMOV,
  126          "",         "",                 ""},
  127         {T_RDIRECT, T_FIXED,
  128          "",         "",                 ""},
  129         {T_RDIRECT, T_REMOV,
  130          "",         "",                 ""},
  131         {T_OPTICAL, T_FIXED,
  132          "",         "",                 ""},
  133         {T_OPTICAL, T_REMOV,
  134          "",         "",                 ""},
  135 };
  136 
  137 #define sdlookup(unit) (struct sd_softc *)disk_lookup(&sd_cd, (unit))
  138 
  139 int
  140 sdmatch(struct device *parent, void *match, void *aux)
  141 {
  142         struct scsi_attach_args         *sa = aux;
  143         struct scsi_inquiry_data        *inq = &sa->sa_sc_link->inqdata;
  144         int                              priority;
  145 
  146         (void)scsi_inqmatch(inq, sd_patterns, nitems(sd_patterns),
  147             sizeof(sd_patterns[0]), &priority);
  148 
  149         return priority;
  150 }
  151 
  152 /*
  153  * The routine called by the low level scsi routine when it discovers
  154  * a device suitable for this driver.
  155  */
  156 void
  157 sdattach(struct device *parent, struct device *self, void *aux)
  158 {
  159         struct dk_cache                  dkc;
  160         struct sd_softc                 *sc = (struct sd_softc *)self;
  161         struct scsi_attach_args         *sa = aux;
  162         struct disk_parms               *dp = &sc->params;
  163         struct scsi_link                *link = sa->sa_sc_link;
  164         int                              error, sd_autoconf;
  165         int                              sortby = BUFQ_DEFAULT;
  166 
  167         SC_DEBUG(link, SDEV_DB2, ("sdattach:\n"));
  168 
  169         sd_autoconf = scsi_autoconf | SCSI_SILENT |
  170             SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE;
  171 
  172         /*
  173          * Store information needed to contact our base driver.
  174          */
  175         sc->sc_link = link;
  176         link->interpret_sense = sd_interpret_sense;
  177         link->device_softc = sc;
  178 
  179         if (ISSET(link->flags, SDEV_ATAPI) && ISSET(link->flags,
  180             SDEV_REMOVABLE))
  181                 SET(link->quirks, SDEV_NOSYNCCACHE);
  182 
  183         /*
  184          * Use the subdriver to request information regarding the drive. We
  185          * cannot use interrupts yet, so the request must specify this.
  186          */
  187         printf("\n");
  188 
  189         scsi_xsh_set(&sc->sc_xsh, link, sdstart);
  190 
  191         /* Spin up non-UMASS devices ready or not. */
  192         if (!ISSET(link->flags, SDEV_UMASS))
  193                 scsi_start(link, SSS_START, sd_autoconf);
  194 
  195         /*
  196          * Some devices (e.g. BlackBerry Pearl) won't admit they have
  197          * media loaded unless its been locked in.
  198          */
  199         if (ISSET(link->flags, SDEV_REMOVABLE))
  200                 scsi_prevent(link, PR_PREVENT, sd_autoconf);
  201 
  202         /* Check that it is still responding and ok. */
  203         error = scsi_test_unit_ready(sc->sc_link, TEST_READY_RETRIES * 3,
  204             sd_autoconf);
  205         if (error == 0)
  206                 error = sd_get_parms(sc, sd_autoconf);
  207 
  208         if (ISSET(link->flags, SDEV_REMOVABLE))
  209                 scsi_prevent(link, PR_ALLOW, sd_autoconf);
  210 
  211         if (error == 0) {
  212                 printf("%s: %lluMB, %u bytes/sector, %llu sectors",
  213                     sc->sc_dev.dv_xname,
  214                     dp->disksize / (1048576 / dp->secsize), dp->secsize,
  215                     dp->disksize);
  216                 if (ISSET(sc->flags, SDF_THIN)) {
  217                         sortby = BUFQ_FIFO;
  218                         printf(", thin");
  219                 }
  220                 if (ISSET(link->flags, SDEV_READONLY))
  221                         printf(", readonly");
  222                 printf("\n");
  223         }
  224 
  225         /*
  226          * Initialize disk structures.
  227          */
  228         sc->sc_dk.dk_name = sc->sc_dev.dv_xname;
  229         bufq_init(&sc->sc_bufq, sortby);
  230 
  231         /*
  232          * Enable write cache by default.
  233          */
  234         memset(&dkc, 0, sizeof(dkc));
  235         if (sd_ioctl_cache(sc, DIOCGCACHE, &dkc) == 0 && dkc.wrcache == 0) {
  236                 dkc.wrcache = 1;
  237                 sd_ioctl_cache(sc, DIOCSCACHE, &dkc);
  238         }
  239 
  240         /* Attach disk. */
  241         disk_attach(&sc->sc_dev, &sc->sc_dk);
  242 }
  243 
  244 int
  245 sdactivate(struct device *self, int act)
  246 {
  247         struct scsi_link                *link;
  248         struct sd_softc                 *sc = (struct sd_softc *)self;
  249 
  250         if (ISSET(sc->flags, SDF_DYING))
  251                 return ENXIO;
  252         link = sc->sc_link;
  253 
  254         switch (act) {
  255         case DVACT_SUSPEND:
  256                 /*
  257                  * We flush the cache, since we our next step before
  258                  * DVACT_POWERDOWN might be a hibernate operation.
  259                  */
  260                 if (ISSET(sc->flags, SDF_DIRTY))
  261                         sd_flush(sc, SCSI_AUTOCONF);
  262                 break;
  263         case DVACT_POWERDOWN:
  264                 /*
  265                  * Stop the disk.  Stopping the disk should flush the
  266                  * cache, but we are paranoid so we flush the cache
  267                  * first.  We're cold at this point, so we poll for
  268                  * completion.
  269                  */
  270                 if (ISSET(sc->flags, SDF_DIRTY))
  271                         sd_flush(sc, SCSI_AUTOCONF);
  272                 if (ISSET(boothowto, RB_POWERDOWN))
  273                         scsi_start(link, SSS_STOP,
  274                             SCSI_IGNORE_ILLEGAL_REQUEST |
  275                             SCSI_IGNORE_NOT_READY | SCSI_AUTOCONF);
  276                 break;
  277         case DVACT_RESUME:
  278                 scsi_start(link, SSS_START,
  279                     SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_AUTOCONF);
  280                 break;
  281         case DVACT_DEACTIVATE:
  282                 SET(sc->flags, SDF_DYING);
  283                 scsi_xsh_del(&sc->sc_xsh);
  284                 break;
  285         }
  286         return 0;
  287 }
  288 
  289 int
  290 sddetach(struct device *self, int flags)
  291 {
  292         struct sd_softc *sc = (struct sd_softc *)self;
  293 
  294         bufq_drain(&sc->sc_bufq);
  295 
  296         disk_gone(sdopen, self->dv_unit);
  297 
  298         /* Detach disk. */
  299         bufq_destroy(&sc->sc_bufq);
  300         disk_detach(&sc->sc_dk);
  301 
  302         return 0;
  303 }
  304 
  305 /*
  306  * Open the device. Make sure the partition info is as up-to-date as can be.
  307  */
  308 int
  309 sdopen(dev_t dev, int flag, int fmt, struct proc *p)
  310 {
  311         struct scsi_link                *link;
  312         struct sd_softc                 *sc;
  313         int                              error = 0, part, rawopen, unit;
  314 
  315         unit = DISKUNIT(dev);
  316         part = DISKPART(dev);
  317 
  318         rawopen = (part == RAW_PART) && (fmt == S_IFCHR);
  319 
  320         sc = sdlookup(unit);
  321         if (sc == NULL)
  322                 return ENXIO;
  323         if (ISSET(sc->flags, SDF_DYING)) {
  324                 device_unref(&sc->sc_dev);
  325                 return ENXIO;
  326         }
  327         link = sc->sc_link;
  328 
  329         SC_DEBUG(link, SDEV_DB1,
  330             ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
  331             sd_cd.cd_ndevs, part));
  332 
  333         if (ISSET(flag, FWRITE) && ISSET(link->flags, SDEV_READONLY)) {
  334                 device_unref(&sc->sc_dev);
  335                 return EACCES;
  336         }
  337         if ((error = disk_lock(&sc->sc_dk)) != 0) {
  338                 device_unref(&sc->sc_dev);
  339                 return error;
  340         }
  341         if (ISSET(sc->flags, SDF_DYING)) {
  342                 error = ENXIO;
  343                 goto die;
  344         }
  345 
  346         if (sc->sc_dk.dk_openmask != 0) {
  347                 /*
  348                  * If any partition is open, but the disk has been invalidated,
  349                  * disallow further opens of non-raw partition.
  350                  */
  351                 if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) {
  352                         if (rawopen)
  353                                 goto out;
  354                         error = EIO;
  355                         goto bad;
  356                 }
  357         } else {
  358                 /* Spin up non-UMASS devices ready or not. */
  359                 if (!ISSET(link->flags, SDEV_UMASS))
  360                         scsi_start(link, SSS_START, (rawopen ? SCSI_SILENT :
  361                             0) | SCSI_IGNORE_ILLEGAL_REQUEST |
  362                             SCSI_IGNORE_MEDIA_CHANGE);
  363 
  364                 /*
  365                  * Use sd_interpret_sense() for sense errors.
  366                  *
  367                  * But only after spinning the disk up! Just in case a broken
  368                  * device returns "Initialization command required." and causes
  369                  * a loop of scsi_start() calls.
  370                  */
  371                 if (ISSET(sc->flags, SDF_DYING)) {
  372                         error = ENXIO;
  373                         goto die;
  374                 }
  375                 SET(link->flags, SDEV_OPEN);
  376 
  377                 /*
  378                  * Try to prevent the unloading of a removable device while
  379                  * it's open. But allow the open to proceed if the device can't
  380                  * be locked in.
  381                  */
  382                 if (ISSET(link->flags, SDEV_REMOVABLE)) {
  383                         scsi_prevent(link, PR_PREVENT, SCSI_SILENT |
  384                             SCSI_IGNORE_ILLEGAL_REQUEST |
  385                             SCSI_IGNORE_MEDIA_CHANGE);
  386                 }
  387 
  388                 /* Check that it is still responding and ok. */
  389                 if (ISSET(sc->flags, SDF_DYING)) {
  390                         error = ENXIO;
  391                         goto die;
  392                 }
  393                 error = scsi_test_unit_ready(link,
  394                     TEST_READY_RETRIES, SCSI_SILENT |
  395                     SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE);
  396                 if (error) {
  397                         if (rawopen) {
  398                                 error = 0;
  399                                 goto out;
  400                         } else
  401                                 goto bad;
  402                 }
  403 
  404                 /* Load the physical device parameters. */
  405                 if (ISSET(sc->flags, SDF_DYING)) {
  406                         error = ENXIO;
  407                         goto die;
  408                 }
  409                 SET(link->flags, SDEV_MEDIA_LOADED);
  410                 if (sd_get_parms(sc, (rawopen ? SCSI_SILENT : 0)) == -1) {
  411                         if (ISSET(sc->flags, SDF_DYING)) {
  412                                 error = ENXIO;
  413                                 goto die;
  414                         }
  415                         CLR(link->flags, SDEV_MEDIA_LOADED);
  416                         error = ENXIO;
  417                         goto bad;
  418                 }
  419                 SC_DEBUG(link, SDEV_DB3, ("Params loaded\n"));
  420 
  421                 /* Load the partition info if not already loaded. */
  422                 error = sdgetdisklabel(dev, sc, sc->sc_dk.dk_label, 0);
  423                 if (error == EIO || error == ENXIO)
  424                         goto bad;
  425                 SC_DEBUG(link, SDEV_DB3, ("Disklabel loaded\n"));
  426         }
  427 
  428 out:
  429         if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0)
  430                 goto bad;
  431 
  432         SC_DEBUG(link, SDEV_DB3, ("open complete\n"));
  433 
  434         /* It's OK to fall through because dk_openmask is now non-zero. */
  435 bad:
  436         if (sc->sc_dk.dk_openmask == 0) {
  437                 if (ISSET(sc->flags, SDF_DYING)) {
  438                         error = ENXIO;
  439                         goto die;
  440                 }
  441                 if (ISSET(link->flags, SDEV_REMOVABLE))
  442                         scsi_prevent(link, PR_ALLOW, SCSI_SILENT |
  443                             SCSI_IGNORE_ILLEGAL_REQUEST |
  444                             SCSI_IGNORE_MEDIA_CHANGE);
  445                 if (ISSET(sc->flags, SDF_DYING)) {
  446                         error = ENXIO;
  447                         goto die;
  448                 }
  449                 CLR(link->flags, SDEV_OPEN | SDEV_MEDIA_LOADED);
  450         }
  451 
  452 die:
  453         disk_unlock(&sc->sc_dk);
  454         device_unref(&sc->sc_dev);
  455         return error;
  456 }
  457 
  458 /*
  459  * Close the device. Only called if we are the last occurrence of an open
  460  * device.  Convenient now but usually a pain.
  461  */
  462 int
  463 sdclose(dev_t dev, int flag, int fmt, struct proc *p)
  464 {
  465         struct scsi_link                *link;
  466         struct sd_softc                 *sc;
  467         int                              part = DISKPART(dev);
  468         int                              error = 0;
  469 
  470         sc = sdlookup(DISKUNIT(dev));
  471         if (sc == NULL)
  472                 return ENXIO;
  473         if (ISSET(sc->flags, SDF_DYING)) {
  474                 device_unref(&sc->sc_dev);
  475                 return ENXIO;
  476         }
  477         link = sc->sc_link;
  478 
  479         disk_lock_nointr(&sc->sc_dk);
  480 
  481         disk_closepart(&sc->sc_dk, part, fmt);
  482 
  483         if ((ISSET(flag, FWRITE) || sc->sc_dk.dk_openmask == 0) &&
  484             ISSET(sc->flags, SDF_DIRTY))
  485                 sd_flush(sc, 0);
  486 
  487         if (sc->sc_dk.dk_openmask == 0) {
  488                 if (ISSET(sc->flags, SDF_DYING)) {
  489                         error = ENXIO;
  490                         goto die;
  491                 }
  492                 if (ISSET(link->flags, SDEV_REMOVABLE))
  493                         scsi_prevent(link, PR_ALLOW,
  494                             SCSI_IGNORE_ILLEGAL_REQUEST |
  495                             SCSI_IGNORE_NOT_READY | SCSI_SILENT);
  496                 if (ISSET(sc->flags, SDF_DYING)) {
  497                         error = ENXIO;
  498                         goto die;
  499                 }
  500                 CLR(link->flags, SDEV_OPEN | SDEV_MEDIA_LOADED);
  501 
  502                 if (ISSET(link->flags, SDEV_EJECTING)) {
  503                         scsi_start(link, SSS_STOP|SSS_LOEJ, 0);
  504                         if (ISSET(sc->flags, SDF_DYING)) {
  505                                 error = ENXIO;
  506                                 goto die;
  507                         }
  508                         CLR(link->flags, SDEV_EJECTING);
  509                 }
  510 
  511                 scsi_xsh_del(&sc->sc_xsh);
  512         }
  513 
  514 die:
  515         disk_unlock(&sc->sc_dk);
  516         device_unref(&sc->sc_dev);
  517         return error;
  518 }
  519 
  520 /*
  521  * Actually translate the requested transfer into one the physical driver
  522  * can understand.  The transfer is described by a buf and will include
  523  * only one physical transfer.
  524  */
  525 void
  526 sdstrategy(struct buf *bp)
  527 {
  528         struct scsi_link                *link;
  529         struct sd_softc                 *sc;
  530         int                              s;
  531 
  532         sc = sdlookup(DISKUNIT(bp->b_dev));
  533         if (sc == NULL) {
  534                 bp->b_error = ENXIO;
  535                 goto bad;
  536         }
  537         if (ISSET(sc->flags, SDF_DYING)) {
  538                 bp->b_error = ENXIO;
  539                 goto bad;
  540         }
  541         link = sc->sc_link;
  542 
  543         SC_DEBUG(link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %lld\n",
  544             bp->b_bcount, (long long)bp->b_blkno));
  545         /*
  546          * If the device has been made invalid, error out.
  547          */
  548         if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) {
  549                 if (ISSET(link->flags, SDEV_OPEN))
  550                         bp->b_error = EIO;
  551                 else
  552                         bp->b_error = ENODEV;
  553                 goto bad;
  554         }
  555 
  556         /* Validate the request. */
  557         if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1)
  558                 goto done;
  559 
  560         /* Place it in the queue of disk activities for this disk. */
  561         bufq_queue(&sc->sc_bufq, bp);
  562 
  563         /*
  564          * Tell the device to get going on the transfer if it's
  565          * not doing anything, otherwise just wait for completion
  566          */
  567         scsi_xsh_add(&sc->sc_xsh);
  568 
  569         device_unref(&sc->sc_dev);
  570         return;
  571 
  572 bad:
  573         SET(bp->b_flags, B_ERROR);
  574         bp->b_resid = bp->b_bcount;
  575 done:
  576         s = splbio();
  577         biodone(bp);
  578         splx(s);
  579         if (sc != NULL)
  580                 device_unref(&sc->sc_dev);
  581 }
  582 
  583 int
  584 sd_cmd_rw6(struct scsi_generic *generic, int read, u_int64_t secno,
  585     u_int32_t nsecs)
  586 {
  587         struct scsi_rw *cmd = (struct scsi_rw *)generic;
  588 
  589         cmd->opcode = read ? READ_COMMAND : WRITE_COMMAND;
  590         _lto3b(secno, cmd->addr);
  591         cmd->length = nsecs;
  592 
  593         return sizeof(*cmd);
  594 }
  595 
  596 int
  597 sd_cmd_rw10(struct scsi_generic *generic, int read, u_int64_t secno,
  598     u_int32_t nsecs)
  599 {
  600         struct scsi_rw_10 *cmd = (struct scsi_rw_10 *)generic;
  601 
  602         cmd->opcode = read ? READ_10 : WRITE_10;
  603         _lto4b(secno, cmd->addr);
  604         _lto2b(nsecs, cmd->length);
  605 
  606         return sizeof(*cmd);
  607 }
  608 
  609 int
  610 sd_cmd_rw12(struct scsi_generic *generic, int read, u_int64_t secno,
  611     u_int32_t nsecs)
  612 {
  613         struct scsi_rw_12 *cmd = (struct scsi_rw_12 *)generic;
  614 
  615         cmd->opcode = read ? READ_12 : WRITE_12;
  616         _lto4b(secno, cmd->addr);
  617         _lto4b(nsecs, cmd->length);
  618 
  619         return sizeof(*cmd);
  620 }
  621 
  622 int
  623 sd_cmd_rw16(struct scsi_generic *generic, int read, u_int64_t secno,
  624     u_int32_t nsecs)
  625 {
  626         struct scsi_rw_16 *cmd = (struct scsi_rw_16 *)generic;
  627 
  628         cmd->opcode = read ? READ_16 : WRITE_16;
  629         _lto8b(secno, cmd->addr);
  630         _lto4b(nsecs, cmd->length);
  631 
  632         return sizeof(*cmd);
  633 }
  634 
  635 /*
  636  * sdstart looks to see if there is a buf waiting for the device
  637  * and that the device is not already busy. If both are true,
  638  * It dequeues the buf and creates a scsi command to perform the
  639  * transfer in the buf. The transfer request will call scsi_done
  640  * on completion, which will in turn call this routine again
  641  * so that the next queued transfer is performed.
  642  * The bufs are queued by the strategy routine (sdstrategy)
  643  *
  644  * This routine is also called after other non-queued requests
  645  * have been made of the scsi driver, to ensure that the queue
  646  * continues to be drained.
  647  */
  648 void
  649 sdstart(struct scsi_xfer *xs)
  650 {
  651         struct scsi_link                *link = xs->sc_link;
  652         struct sd_softc                 *sc = link->device_softc;
  653         struct buf                      *bp;
  654         struct partition                *p;
  655         u_int64_t                        secno;
  656         u_int32_t                        nsecs;
  657         int                              read;
  658 
  659         if (ISSET(sc->flags, SDF_DYING)) {
  660                 scsi_xs_put(xs);
  661                 return;
  662         }
  663         if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) {
  664                 bufq_drain(&sc->sc_bufq);
  665                 scsi_xs_put(xs);
  666                 return;
  667         }
  668 
  669         bp = bufq_dequeue(&sc->sc_bufq);
  670         if (bp == NULL) {
  671                 scsi_xs_put(xs);
  672                 return;
  673         }
  674         read = ISSET(bp->b_flags, B_READ);
  675 
  676         SET(xs->flags, (read ? SCSI_DATA_IN : SCSI_DATA_OUT));
  677         xs->timeout = 60000;
  678         xs->data = bp->b_data;
  679         xs->datalen = bp->b_bcount;
  680         xs->done = sd_buf_done;
  681         xs->cookie = bp;
  682         xs->bp = bp;
  683 
  684         p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
  685         secno = DL_GETPOFFSET(p) + DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno);
  686         nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize);
  687 
  688         if (!ISSET(link->flags, SDEV_ATAPI | SDEV_UMASS) &&
  689             (SID_ANSII_REV(&link->inqdata) < SCSI_REV_2) &&
  690             ((secno & 0x1fffff) == secno) &&
  691             ((nsecs & 0xff) == nsecs))
  692                 xs->cmdlen = sd_cmd_rw6(&xs->cmd, read, secno, nsecs);
  693 
  694         else if (sc->params.disksize > UINT32_MAX)
  695                 xs->cmdlen = sd_cmd_rw16(&xs->cmd, read, secno, nsecs);
  696 
  697         else if (nsecs <= UINT16_MAX)
  698                 xs->cmdlen = sd_cmd_rw10(&xs->cmd, read, secno, nsecs);
  699 
  700         else
  701                 xs->cmdlen = sd_cmd_rw12(&xs->cmd, read, secno, nsecs);
  702 
  703         disk_busy(&sc->sc_dk);
  704         if (!read)
  705                 SET(sc->flags, SDF_DIRTY);
  706         scsi_xs_exec(xs);
  707 
  708         /* Move onto the next io. */
  709         if (bufq_peek(&sc->sc_bufq))
  710                 scsi_xsh_add(&sc->sc_xsh);
  711 }
  712 
  713 void
  714 sd_buf_done(struct scsi_xfer *xs)
  715 {
  716         struct sd_softc                 *sc = xs->sc_link->device_softc;
  717         struct buf                      *bp = xs->cookie;
  718         int                              error, s;
  719 
  720         switch (xs->error) {
  721         case XS_NOERROR:
  722                 bp->b_error = 0;
  723                 CLR(bp->b_flags, B_ERROR);
  724                 bp->b_resid = xs->resid;
  725                 break;
  726 
  727         case XS_SENSE:
  728         case XS_SHORTSENSE:
  729                 SC_DEBUG_SENSE(xs);
  730                 error = sd_interpret_sense(xs);
  731                 if (error == 0) {
  732                         bp->b_error = 0;
  733                         CLR(bp->b_flags, B_ERROR);
  734                         bp->b_resid = xs->resid;
  735                         break;
  736                 }
  737                 if (error != ERESTART) {
  738                         bp->b_error = error;
  739                         SET(bp->b_flags, B_ERROR);
  740                         xs->retries = 0;
  741                 }
  742                 goto retry;
  743 
  744         case XS_BUSY:
  745                 if (xs->retries) {
  746                         if (scsi_delay(xs, 1) != ERESTART)
  747                                 xs->retries = 0;
  748                 }
  749                 goto retry;
  750 
  751         case XS_TIMEOUT:
  752 retry:
  753                 if (xs->retries--) {
  754                         scsi_xs_exec(xs);
  755                         return;
  756                 }
  757                 /* FALLTHROUGH */
  758 
  759         default:
  760                 if (bp->b_error == 0)
  761                         bp->b_error = EIO;
  762                 SET(bp->b_flags, B_ERROR);
  763                 bp->b_resid = bp->b_bcount;
  764                 break;
  765         }
  766 
  767         disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid, bp->b_blkno,
  768             bp->b_flags & B_READ);
  769 
  770         s = splbio();
  771         biodone(bp);
  772         splx(s);
  773         scsi_xs_put(xs);
  774 }
  775 
  776 void
  777 sdminphys(struct buf *bp)
  778 {
  779         struct scsi_link                *link;
  780         struct sd_softc                 *sc;
  781         long                             max;
  782 
  783         sc = sdlookup(DISKUNIT(bp->b_dev));
  784         if (sc == NULL)
  785                 return;  /* XXX - right way to fail this? */
  786         if (ISSET(sc->flags, SDF_DYING)) {
  787                 device_unref(&sc->sc_dev);
  788                 return;
  789         }
  790         link = sc->sc_link;
  791 
  792         /*
  793          * If the device is ancient, we want to make sure that
  794          * the transfer fits into a 6-byte cdb.
  795          *
  796          * XXX Note that the SCSI-I spec says that 256-block transfers
  797          * are allowed in a 6-byte read/write, and are specified
  798          * by setting the "length" to 0.  However, we're conservative
  799          * here, allowing only 255-block transfers in case an
  800          * ancient device gets confused by length == 0.  A length of 0
  801          * in a 10-byte read/write actually means 0 blocks.
  802          */
  803         if (!ISSET(link->flags, SDEV_ATAPI | SDEV_UMASS) &&
  804             SID_ANSII_REV(&link->inqdata) < SCSI_REV_2) {
  805                 max = sc->sc_dk.dk_label->d_secsize * 0xff;
  806 
  807                 if (bp->b_bcount > max)
  808                         bp->b_bcount = max;
  809         }
  810 
  811         if (link->bus->sb_adapter->dev_minphys != NULL)
  812                 (*link->bus->sb_adapter->dev_minphys)(bp, link);
  813         else
  814                 minphys(bp);
  815 
  816         device_unref(&sc->sc_dev);
  817 }
  818 
  819 int
  820 sdread(dev_t dev, struct uio *uio, int ioflag)
  821 {
  822         return physio(sdstrategy, dev, B_READ, sdminphys, uio);
  823 }
  824 
  825 int
  826 sdwrite(dev_t dev, struct uio *uio, int ioflag)
  827 {
  828         return physio(sdstrategy, dev, B_WRITE, sdminphys, uio);
  829 }
  830 
  831 /*
  832  * Perform special action on behalf of the user. Knows about the internals of
  833  * this device
  834  */
  835 int
  836 sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
  837 {
  838         struct scsi_link                *link;
  839         struct sd_softc                 *sc;
  840         struct disklabel                *lp;
  841         int                              error = 0;
  842         int                              part = DISKPART(dev);
  843 
  844         sc = sdlookup(DISKUNIT(dev));
  845         if (sc == NULL)
  846                 return ENXIO;
  847         if (ISSET(sc->flags, SDF_DYING)) {
  848                 device_unref(&sc->sc_dev);
  849                 return ENXIO;
  850         }
  851         link = sc->sc_link;
  852 
  853         SC_DEBUG(link, SDEV_DB2, ("sdioctl 0x%lx\n", cmd));
  854 
  855         /*
  856          * If the device is not valid, abandon ship.
  857          */
  858         if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) {
  859                 switch (cmd) {
  860                 case DIOCLOCK:
  861                 case DIOCEJECT:
  862                 case SCIOCIDENTIFY:
  863                 case SCIOCCOMMAND:
  864                 case SCIOCDEBUG:
  865                         if (part == RAW_PART)
  866                                 break;
  867                 /* FALLTHROUGH */
  868                 default:
  869                         if (!ISSET(link->flags, SDEV_OPEN)) {
  870                                 error = ENODEV;
  871                                 goto exit;
  872                         } else {
  873                                 error = EIO;
  874                                 goto exit;
  875                         }
  876                 }
  877         }
  878 
  879         switch (cmd) {
  880         case DIOCRLDINFO:
  881                 lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK);
  882                 sdgetdisklabel(dev, sc, lp, 0);
  883                 memcpy(sc->sc_dk.dk_label, lp, sizeof(*lp));
  884                 free(lp, M_TEMP, sizeof(*lp));
  885                 goto exit;
  886 
  887         case DIOCGPDINFO:
  888                 sdgetdisklabel(dev, sc, (struct disklabel *)addr, 1);
  889                 goto exit;
  890 
  891         case DIOCGDINFO:
  892                 *(struct disklabel *)addr = *(sc->sc_dk.dk_label);
  893                 goto exit;
  894 
  895         case DIOCGPART:
  896                 ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
  897                 ((struct partinfo *)addr)->part =
  898                     &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)];
  899                 goto exit;
  900 
  901         case DIOCWDINFO:
  902         case DIOCSDINFO:
  903                 if (!ISSET(flag, FWRITE)) {
  904                         error = EBADF;
  905                         goto exit;
  906                 }
  907 
  908                 if ((error = disk_lock(&sc->sc_dk)) != 0)
  909                         goto exit;
  910 
  911                 error = setdisklabel(sc->sc_dk.dk_label,
  912                     (struct disklabel *)addr, sc->sc_dk.dk_openmask);
  913                 if (error == 0) {
  914                         if (cmd == DIOCWDINFO)
  915                                 error = writedisklabel(DISKLABELDEV(dev),
  916                                     sdstrategy, sc->sc_dk.dk_label);
  917                 }
  918 
  919                 disk_unlock(&sc->sc_dk);
  920                 goto exit;
  921 
  922         case DIOCLOCK:
  923                 error = scsi_prevent(link,
  924                     (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0);
  925                 goto exit;
  926 
  927         case MTIOCTOP:
  928                 if (((struct mtop *)addr)->mt_op != MTOFFL) {
  929                         error = EIO;
  930                         goto exit;
  931                 }
  932                 /* FALLTHROUGH */
  933         case DIOCEJECT:
  934                 if (!ISSET(link->flags, SDEV_REMOVABLE)) {
  935                         error = ENOTTY;
  936                         goto exit;
  937                 }
  938                 SET(link->flags, SDEV_EJECTING);
  939                 goto exit;
  940 
  941         case DIOCINQ:
  942                 error = scsi_do_ioctl(link, cmd, addr, flag);
  943                 if (error == ENOTTY)
  944                         error = sd_ioctl_inquiry(sc,
  945                             (struct dk_inquiry *)addr);
  946                 goto exit;
  947 
  948         case DIOCSCACHE:
  949                 if (!ISSET(flag, FWRITE)) {
  950                         error = EBADF;
  951                         goto exit;
  952                 }
  953                 /* FALLTHROUGH */
  954         case DIOCGCACHE:
  955                 error = sd_ioctl_cache(sc, cmd, (struct dk_cache *)addr);
  956                 goto exit;
  957 
  958         case DIOCCACHESYNC:
  959                 if (!ISSET(flag, FWRITE)) {
  960                         error = EBADF;
  961                         goto exit;
  962                 }
  963                 if (ISSET(sc->flags, SDF_DIRTY) || *(int *)addr != 0)
  964                         error = sd_flush(sc, 0);
  965                 goto exit;
  966 
  967         default:
  968                 if (part != RAW_PART) {
  969                         error = ENOTTY;
  970                         goto exit;
  971                 }
  972                 error = scsi_do_ioctl(link, cmd, addr, flag);
  973         }
  974 
  975  exit:
  976         device_unref(&sc->sc_dev);
  977         return error;
  978 }
  979 
  980 int
  981 sd_ioctl_inquiry(struct sd_softc *sc, struct dk_inquiry *di)
  982 {
  983         struct scsi_link                *link;
  984         struct scsi_vpd_serial          *vpd;
  985 
  986         vpd = dma_alloc(sizeof(*vpd), PR_WAITOK | PR_ZERO);
  987 
  988         if (ISSET(sc->flags, SDF_DYING)) {
  989                 dma_free(vpd, sizeof(*vpd));
  990                 return ENXIO;
  991         }
  992         link = sc->sc_link;
  993 
  994         bzero(di, sizeof(struct dk_inquiry));
  995         scsi_strvis(di->vendor, link->inqdata.vendor,
  996             sizeof(link->inqdata.vendor));
  997         scsi_strvis(di->product, link->inqdata.product,
  998             sizeof(link->inqdata.product));
  999         scsi_strvis(di->revision, link->inqdata.revision,
 1000             sizeof(link->inqdata.revision));
 1001 
 1002         /* the serial vpd page is optional */
 1003         if (scsi_inquire_vpd(link, vpd, sizeof(*vpd), SI_PG_SERIAL, 0) == 0)
 1004                 scsi_strvis(di->serial, vpd->serial, sizeof(vpd->serial));
 1005         else
 1006                 strlcpy(di->serial, "(unknown)", sizeof(vpd->serial));
 1007 
 1008         dma_free(vpd, sizeof(*vpd));
 1009         return 0;
 1010 }
 1011 
 1012 int
 1013 sd_ioctl_cache(struct sd_softc *sc, long cmd, struct dk_cache *dkc)
 1014 {
 1015         struct scsi_link                *link;
 1016         union scsi_mode_sense_buf       *buf;
 1017         struct page_caching_mode        *mode = NULL;
 1018         u_int                            wrcache, rdcache;
 1019         int                              big, rv;
 1020 
 1021         if (ISSET(sc->flags, SDF_DYING))
 1022                 return ENXIO;
 1023         link = sc->sc_link;
 1024 
 1025         if (ISSET(link->flags, SDEV_UMASS))
 1026                 return EOPNOTSUPP;
 1027 
 1028         /* See if the adapter has special handling. */
 1029         rv = scsi_do_ioctl(link, cmd, (caddr_t)dkc, 0);
 1030         if (rv != ENOTTY)
 1031                 return rv;
 1032 
 1033         buf = dma_alloc(sizeof(*buf), PR_WAITOK);
 1034         if (buf == NULL)
 1035                 return ENOMEM;
 1036 
 1037         if (ISSET(sc->flags, SDF_DYING)) {
 1038                 rv = ENXIO;
 1039                 goto done;
 1040         }
 1041         rv = scsi_do_mode_sense(link, PAGE_CACHING_MODE, buf, (void **)&mode,
 1042             sizeof(*mode) - 4, scsi_autoconf | SCSI_SILENT, &big);
 1043         if (rv == 0 && mode == NULL)
 1044                 rv = EIO;
 1045         if (rv != 0)
 1046                 goto done;
 1047 
 1048         wrcache = (ISSET(mode->flags, PG_CACHE_FL_WCE) ? 1 : 0);
 1049         rdcache = (ISSET(mode->flags, PG_CACHE_FL_RCD) ? 0 : 1);
 1050 
 1051         switch (cmd) {
 1052         case DIOCGCACHE:
 1053                 dkc->wrcache = wrcache;
 1054                 dkc->rdcache = rdcache;
 1055                 break;
 1056 
 1057         case DIOCSCACHE:
 1058                 if (dkc->wrcache == wrcache && dkc->rdcache == rdcache)
 1059                         break;
 1060 
 1061                 if (dkc->wrcache)
 1062                         SET(mode->flags, PG_CACHE_FL_WCE);
 1063                 else
 1064                         CLR(mode->flags, PG_CACHE_FL_WCE);
 1065 
 1066                 if (dkc->rdcache)
 1067                         CLR(mode->flags, PG_CACHE_FL_RCD);
 1068                 else
 1069                         SET(mode->flags, PG_CACHE_FL_RCD);
 1070 
 1071                 if (ISSET(sc->flags, SDF_DYING)) {
 1072                         rv = ENXIO;
 1073                         goto done;
 1074                 }
 1075                 if (big) {
 1076                         rv = scsi_mode_select_big(link, SMS_PF,
 1077                             &buf->hdr_big, scsi_autoconf | SCSI_SILENT, 20000);
 1078                 } else {
 1079                         rv = scsi_mode_select(link, SMS_PF,
 1080                             &buf->hdr, scsi_autoconf | SCSI_SILENT, 20000);
 1081                 }
 1082                 break;
 1083         }
 1084 
 1085 done:
 1086         dma_free(buf, sizeof(*buf));
 1087         return rv;
 1088 }
 1089 
 1090 /*
 1091  * Load the label information on the named device.
 1092  */
 1093 int
 1094 sdgetdisklabel(dev_t dev, struct sd_softc *sc, struct disklabel *lp,
 1095     int spoofonly)
 1096 {
 1097         char                             packname[sizeof(lp->d_packname) + 1];
 1098         char                             product[17], vendor[9];
 1099         struct scsi_link                *link;
 1100         size_t                           len;
 1101 
 1102         if (ISSET(sc->flags, SDF_DYING))
 1103                 return ENXIO;
 1104         link = sc->sc_link;
 1105 
 1106         bzero(lp, sizeof(struct disklabel));
 1107 
 1108         lp->d_secsize = sc->params.secsize;
 1109         lp->d_ntracks = sc->params.heads;
 1110         lp->d_nsectors = sc->params.sectors;
 1111         lp->d_ncylinders = sc->params.cyls;
 1112         lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
 1113         if (lp->d_secpercyl == 0) {
 1114                 lp->d_secpercyl = 100;
 1115                 /* As long as it's not 0 - readdisklabel divides by it. */
 1116         }
 1117 
 1118         lp->d_type = DTYPE_SCSI;
 1119         if ((link->inqdata.device & SID_TYPE) == T_OPTICAL)
 1120                 strncpy(lp->d_typename, "SCSI optical",
 1121                     sizeof(lp->d_typename));
 1122         else
 1123                 strncpy(lp->d_typename, "SCSI disk",
 1124                     sizeof(lp->d_typename));
 1125 
 1126         /*
 1127          * Try to fit '<vendor> <product>' into d_packname. If that doesn't fit
 1128          * then leave out '<vendor> ' and use only as much of '<product>' as
 1129          * does fit.
 1130          */
 1131         viscpy(vendor, link->inqdata.vendor, 8);
 1132         viscpy(product, link->inqdata.product, 16);
 1133         len = snprintf(packname, sizeof(packname), "%s %s", vendor, product);
 1134         if (len > sizeof(lp->d_packname)) {
 1135                 strlcpy(packname, product, sizeof(packname));
 1136                 len = strlen(packname);
 1137         }
 1138         /*
 1139          * It is safe to use len as the count of characters to copy because
 1140          * packname is sizeof(lp->d_packname)+1, the string in packname is
 1141          * always null terminated and len does not count the terminating null.
 1142          * d_packname is not a null terminated string.
 1143          */
 1144         memcpy(lp->d_packname, packname, len);
 1145 
 1146         DL_SETDSIZE(lp, sc->params.disksize);
 1147         lp->d_version = 1;
 1148 
 1149         lp->d_magic = DISKMAGIC;
 1150         lp->d_magic2 = DISKMAGIC;
 1151         lp->d_checksum = dkcksum(lp);
 1152 
 1153         /*
 1154          * Call the generic disklabel extraction routine.
 1155          */
 1156         return readdisklabel(DISKLABELDEV(dev), sdstrategy, lp, spoofonly);
 1157 }
 1158 
 1159 
 1160 /*
 1161  * Check Errors.
 1162  */
 1163 int
 1164 sd_interpret_sense(struct scsi_xfer *xs)
 1165 {
 1166         struct scsi_sense_data          *sense = &xs->sense;
 1167         struct scsi_link                *link = xs->sc_link;
 1168         int                              retval;
 1169         u_int8_t                         serr = sense->error_code & SSD_ERRCODE;
 1170 
 1171         /*
 1172          * Let the generic code handle everything except a few categories of
 1173          * LUN not ready errors on open devices.
 1174          */
 1175         if ((!ISSET(link->flags, SDEV_OPEN)) ||
 1176             (serr != SSD_ERRCODE_CURRENT && serr != SSD_ERRCODE_DEFERRED) ||
 1177             ((sense->flags & SSD_KEY) != SKEY_NOT_READY) ||
 1178             (sense->extra_len < 6))
 1179                 return scsi_interpret_sense(xs);
 1180 
 1181         if (ISSET(xs->flags, SCSI_IGNORE_NOT_READY))
 1182                 return 0;
 1183 
 1184         switch (ASC_ASCQ(sense)) {
 1185         case SENSE_NOT_READY_BECOMING_READY:
 1186                 SC_DEBUG(link, SDEV_DB1, ("becoming ready.\n"));
 1187                 retval = scsi_delay(xs, 5);
 1188                 break;
 1189 
 1190         case SENSE_NOT_READY_INIT_REQUIRED:
 1191                 SC_DEBUG(link, SDEV_DB1, ("spinning up\n"));
 1192                 retval = scsi_start(link, SSS_START,
 1193                     SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_NOSLEEP);
 1194                 if (retval == 0)
 1195                         retval = ERESTART;
 1196                 else if (retval == ENOMEM)
 1197                         /* Can't issue the command. Fall back on a delay. */
 1198                         retval = scsi_delay(xs, 5);
 1199                 else
 1200                         SC_DEBUG(link, SDEV_DB1, ("spin up failed (%#x)\n",
 1201                             retval));
 1202                 break;
 1203 
 1204         default:
 1205                 retval = scsi_interpret_sense(xs);
 1206                 break;
 1207         }
 1208 
 1209         return retval;
 1210 }
 1211 
 1212 daddr_t
 1213 sdsize(dev_t dev)
 1214 {
 1215         struct disklabel                *lp;
 1216         struct sd_softc                 *sc;
 1217         daddr_t                          size;
 1218         int                              part, omask;
 1219 
 1220         sc = sdlookup(DISKUNIT(dev));
 1221         if (sc == NULL)
 1222                 return -1;
 1223         if (ISSET(sc->flags, SDF_DYING)) {
 1224                 size = -1;
 1225                 goto exit;
 1226         }
 1227 
 1228         part = DISKPART(dev);
 1229         omask = sc->sc_dk.dk_openmask & (1 << part);
 1230 
 1231         if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) {
 1232                 size = -1;
 1233                 goto exit;
 1234         }
 1235 
 1236         lp = sc->sc_dk.dk_label;
 1237         if (ISSET(sc->flags, SDF_DYING)) {
 1238                 size = -1;
 1239                 goto exit;
 1240         }
 1241         if (!ISSET(sc->sc_link->flags, SDEV_MEDIA_LOADED))
 1242                 size = -1;
 1243         else if (lp->d_partitions[part].p_fstype != FS_SWAP)
 1244                 size = -1;
 1245         else
 1246                 size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part]));
 1247         if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
 1248                 size = -1;
 1249 
 1250  exit:
 1251         device_unref(&sc->sc_dev);
 1252         return size;
 1253 }
 1254 
 1255 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch. */
 1256 static int sddoingadump;
 1257 
 1258 /*
 1259  * Dump all of physical memory into the partition specified, starting
 1260  * at offset 'dumplo' into the partition.
 1261  */
 1262 int
 1263 sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
 1264 {
 1265         struct sd_softc                 *sc;
 1266         struct disklabel                *lp;
 1267         struct scsi_xfer                *xs;
 1268         u_int64_t                        nsects;        /* partition sectors */
 1269         u_int64_t                        sectoff;       /* partition offset */
 1270         u_int64_t                        totwrt;        /* sectors left */
 1271         int                              part, rv, unit;
 1272         u_int32_t                        sectorsize;
 1273         u_int32_t                        nwrt;          /* sectors to write */
 1274 
 1275         /* Check if recursive dump; if so, punt. */
 1276         if (sddoingadump)
 1277                 return EFAULT;
 1278         if (blkno < 0)
 1279                 return EINVAL;
 1280 
 1281         /* Mark as active early. */
 1282         sddoingadump = 1;
 1283 
 1284         unit = DISKUNIT(dev);   /* Decompose unit & partition. */
 1285         part = DISKPART(dev);
 1286 
 1287         /* Check for acceptable drive number. */
 1288         if (unit >= sd_cd.cd_ndevs || (sc = sd_cd.cd_devs[unit]) == NULL)
 1289                 return ENXIO;
 1290 
 1291         /*
 1292          * XXX Can't do this check, since the media might have been
 1293          * XXX marked `invalid' by successful unmounting of all
 1294          * XXX filesystems.
 1295          */
 1296 #if 0
 1297         /* Make sure it was initialized. */
 1298         if (!ISSET(sc->sc_link->flags, SDEV_MEDIA_LOADED))
 1299                 return ENXIO;
 1300 #endif /* 0 */
 1301 
 1302         /* Convert to disk sectors. Request must be a multiple of size. */
 1303         lp = sc->sc_dk.dk_label;
 1304         sectorsize = lp->d_secsize;
 1305         if ((size % sectorsize) != 0)
 1306                 return EFAULT;
 1307         if ((blkno % DL_BLKSPERSEC(lp)) != 0)
 1308                 return EFAULT;
 1309         totwrt = size / sectorsize;
 1310         blkno = DL_BLKTOSEC(lp, blkno);
 1311 
 1312         nsects = DL_GETPSIZE(&lp->d_partitions[part]);
 1313         sectoff = DL_GETPOFFSET(&lp->d_partitions[part]);
 1314 
 1315         /* Check transfer bounds against partition size. */
 1316         if ((blkno + totwrt) > nsects)
 1317                 return EINVAL;
 1318 
 1319         /* Offset block number to start of partition. */
 1320         blkno += sectoff;
 1321 
 1322         while (totwrt > 0) {
 1323                 if (totwrt > UINT32_MAX)
 1324                         nwrt = UINT32_MAX;
 1325                 else
 1326                         nwrt = totwrt;
 1327 
 1328 #ifndef SD_DUMP_NOT_TRUSTED
 1329                 xs = scsi_xs_get(sc->sc_link, SCSI_NOSLEEP);
 1330                 if (xs == NULL)
 1331                         return ENOMEM;
 1332 
 1333                 xs->timeout = 10000;
 1334                 SET(xs->flags, SCSI_DATA_OUT);
 1335                 xs->data = va;
 1336                 xs->datalen = nwrt * sectorsize;
 1337 
 1338                 xs->cmdlen = sd_cmd_rw10(&xs->cmd, 0, blkno, nwrt); /* XXX */
 1339 
 1340                 rv = scsi_xs_sync(xs);
 1341                 scsi_xs_put(xs);
 1342                 if (rv != 0)
 1343                         return ENXIO;
 1344 #else   /* SD_DUMP_NOT_TRUSTED */
 1345                 /* Let's just talk about this first. */
 1346                 printf("sd%d: dump addr 0x%x, blk %lld\n", unit, va,
 1347                     (long long)blkno);
 1348                 delay(500 * 1000);      /* 1/2 a second */
 1349 #endif  /* ~SD_DUMP_NOT_TRUSTED */
 1350 
 1351                 /* Update block count. */
 1352                 totwrt -= nwrt;
 1353                 blkno += nwrt;
 1354                 va += sectorsize * nwrt;
 1355         }
 1356 
 1357         sddoingadump = 0;
 1358 
 1359         return 0;
 1360 }
 1361 
 1362 /*
 1363  * Copy up to len chars from src to dst, ignoring non-printables.
 1364  * Must be room for len+1 chars in dst so we can write the NUL.
 1365  * Does not assume src is NUL-terminated.
 1366  */
 1367 void
 1368 viscpy(u_char *dst, u_char *src, int len)
 1369 {
 1370         while (len > 0 && *src != '\0') {
 1371                 if (*src < 0x20 || *src >= 0x80) {
 1372                         src++;
 1373                         continue;
 1374                 }
 1375                 *dst++ = *src++;
 1376                 len--;
 1377         }
 1378         *dst = '\0';
 1379 }
 1380 
 1381 int
 1382 sd_read_cap_10(struct sd_softc *sc, int flags)
 1383 {
 1384         struct scsi_read_cap_data       *rdcap;
 1385         int                              rv;
 1386 
 1387         rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
 1388             PR_NOWAIT : PR_WAITOK) | PR_ZERO);
 1389         if (rdcap == NULL)
 1390                 return -1;
 1391 
 1392         if (ISSET(sc->flags, SDF_DYING)) {
 1393                 rv = -1;
 1394                 goto done;
 1395         }
 1396 
 1397         rv = scsi_read_cap_10(sc->sc_link, rdcap, flags);
 1398         if (rv == 0) {
 1399                 if (_4btol(rdcap->addr) == 0) {
 1400                         rv = -1;
 1401                         goto done;
 1402                 }
 1403                 sc->params.disksize = _4btol(rdcap->addr) + 1ll;
 1404                 sc->params.secsize = _4btol(rdcap->length);
 1405                 CLR(sc->flags, SDF_THIN);
 1406         }
 1407 
 1408 done:
 1409         dma_free(rdcap, sizeof(*rdcap));
 1410         return rv;
 1411 }
 1412 
 1413 int
 1414 sd_read_cap_16(struct sd_softc *sc, int flags)
 1415 {
 1416         struct scsi_read_cap_data_16    *rdcap;
 1417         int                              rv;
 1418 
 1419         rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
 1420             PR_NOWAIT : PR_WAITOK) | PR_ZERO);
 1421         if (rdcap == NULL)
 1422                 return -1;
 1423 
 1424         if (ISSET(sc->flags, SDF_DYING)) {
 1425                 rv = -1;
 1426                 goto done;
 1427         }
 1428 
 1429         rv = scsi_read_cap_16(sc->sc_link, rdcap, flags);
 1430         if (rv == 0) {
 1431                 if (_8btol(rdcap->addr) == 0) {
 1432                         rv = -1;
 1433                         goto done;
 1434                 }
 1435                 sc->params.disksize = _8btol(rdcap->addr) + 1ll;
 1436                 sc->params.secsize = _4btol(rdcap->length);
 1437                 if (ISSET(_2btol(rdcap->lowest_aligned), READ_CAP_16_TPE))
 1438                         SET(sc->flags, SDF_THIN);
 1439                 else
 1440                         CLR(sc->flags, SDF_THIN);
 1441         }
 1442 
 1443 done:
 1444         dma_free(rdcap, sizeof(*rdcap));
 1445         return rv;
 1446 }
 1447 
 1448 int
 1449 sd_read_cap(struct sd_softc *sc, int flags)
 1450 {
 1451         int rv;
 1452 
 1453         CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);
 1454 
 1455         /*
 1456          * post-SPC2 (i.e. post-SCSI-3) devices can start with 16 byte
 1457          * read capacity commands. Older devices start with the 10 byte
 1458          * version and move up to the 16 byte version if the device
 1459          * says it has more sectors than can be reported via the 10 byte
 1460          * read capacity.
 1461          */
 1462         if (SID_ANSII_REV(&sc->sc_link->inqdata) > SCSI_REV_SPC2) {
 1463                 rv = sd_read_cap_16(sc, flags);
 1464                 if (rv != 0)
 1465                         rv = sd_read_cap_10(sc, flags);
 1466         } else {
 1467                 rv = sd_read_cap_10(sc, flags);
 1468                 if (rv == 0 && sc->params.disksize == 0x100000000ll)
 1469                         rv = sd_read_cap_16(sc, flags);
 1470         }
 1471 
 1472         return rv;
 1473 }
 1474 
 1475 int
 1476 sd_thin_pages(struct sd_softc *sc, int flags)
 1477 {
 1478         struct scsi_vpd_hdr             *pg;
 1479         u_int8_t                        *pages;
 1480         size_t                           len = 0;
 1481         int                              i, rv, score = 0;
 1482 
 1483         pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
 1484             PR_NOWAIT : PR_WAITOK) | PR_ZERO);
 1485         if (pg == NULL)
 1486                 return ENOMEM;
 1487 
 1488         if (ISSET(sc->flags, SDF_DYING)) {
 1489                 rv = ENXIO;
 1490                 goto done;
 1491         }
 1492         rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
 1493             SI_PG_SUPPORTED, flags);
 1494         if (rv != 0)
 1495                 goto done;
 1496 
 1497         len = _2btol(pg->page_length);
 1498 
 1499         dma_free(pg, sizeof(*pg));
 1500         pg = dma_alloc(sizeof(*pg) + len, (ISSET(flags, SCSI_NOSLEEP) ?
 1501             PR_NOWAIT : PR_WAITOK) | PR_ZERO);
 1502         if (pg == NULL)
 1503                 return ENOMEM;
 1504 
 1505         if (ISSET(sc->flags, SDF_DYING)) {
 1506                 rv = ENXIO;
 1507                 goto done;
 1508         }
 1509         rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg) + len,
 1510             SI_PG_SUPPORTED, flags);
 1511         if (rv != 0)
 1512                 goto done;
 1513 
 1514         pages = (u_int8_t *)(pg + 1);
 1515         if (pages[0] != SI_PG_SUPPORTED) {
 1516                 rv = EIO;
 1517                 goto done;
 1518         }
 1519 
 1520         for (i = 1; i < len; i++) {
 1521                 switch (pages[i]) {
 1522                 case SI_PG_DISK_LIMITS:
 1523                 case SI_PG_DISK_THIN:
 1524                         score++;
 1525                         break;
 1526                 }
 1527         }
 1528 
 1529         if (score < 2)
 1530                 rv = EOPNOTSUPP;
 1531 
 1532 done:
 1533         dma_free(pg, sizeof(*pg) + len);
 1534         return rv;
 1535 }
 1536 
 1537 int
 1538 sd_vpd_block_limits(struct sd_softc *sc, int flags)
 1539 {
 1540         struct scsi_vpd_disk_limits     *pg;
 1541         int                              rv;
 1542 
 1543         pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
 1544             PR_NOWAIT : PR_WAITOK) | PR_ZERO);
 1545         if (pg == NULL)
 1546                 return ENOMEM;
 1547 
 1548         if (ISSET(sc->flags, SDF_DYING)) {
 1549                 rv = ENXIO;
 1550                 goto done;
 1551         }
 1552         rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
 1553             SI_PG_DISK_LIMITS, flags);
 1554         if (rv != 0)
 1555                 goto done;
 1556 
 1557         if (_2btol(pg->hdr.page_length) == SI_PG_DISK_LIMITS_LEN_THIN) {
 1558                 sc->params.unmap_sectors = _4btol(pg->max_unmap_lba_count);
 1559                 sc->params.unmap_descs = _4btol(pg->max_unmap_desc_count);
 1560         } else
 1561                 rv = EOPNOTSUPP;
 1562 
 1563 done:
 1564         dma_free(pg, sizeof(*pg));
 1565         return rv;
 1566 }
 1567 
 1568 int
 1569 sd_vpd_thin(struct sd_softc *sc, int flags)
 1570 {
 1571         struct scsi_vpd_disk_thin       *pg;
 1572         int                              rv;
 1573 
 1574         pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
 1575             PR_NOWAIT : PR_WAITOK) | PR_ZERO);
 1576         if (pg == NULL)
 1577                 return ENOMEM;
 1578 
 1579         if (ISSET(sc->flags, SDF_DYING)) {
 1580                 rv = ENXIO;
 1581                 goto done;
 1582         }
 1583         rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
 1584             SI_PG_DISK_THIN, flags);
 1585         if (rv != 0)
 1586                 goto done;
 1587 
 1588 #ifdef notyet
 1589         if (ISSET(pg->flags, VPD_DISK_THIN_TPU))
 1590                 sc->sc_delete = sd_unmap;
 1591         else if (ISSET(pg->flags, VPD_DISK_THIN_TPWS)) {
 1592                 sc->sc_delete = sd_write_same_16;
 1593                 sc->params.unmap_descs = 1; /* WRITE SAME 16 only does one */
 1594         } else
 1595                 rv = EOPNOTSUPP;
 1596 #endif /* notyet */
 1597 
 1598 done:
 1599         dma_free(pg, sizeof(*pg));
 1600         return rv;
 1601 }
 1602 
 1603 int
 1604 sd_thin_params(struct sd_softc *sc, int flags)
 1605 {
 1606         int rv;
 1607 
 1608         rv = sd_thin_pages(sc, flags);
 1609         if (rv != 0)
 1610                 return rv;
 1611 
 1612         rv = sd_vpd_block_limits(sc, flags);
 1613         if (rv != 0)
 1614                 return rv;
 1615 
 1616         rv = sd_vpd_thin(sc, flags);
 1617         if (rv != 0)
 1618                 return rv;
 1619 
 1620         return 0;
 1621 }
 1622 
 1623 /*
 1624  * Fill out the disk parameter structure. Return 0 if the structure is correctly
 1625  * filled in, otherwise return -1.
 1626  *
 1627  * The caller is responsible for clearing the SDEV_MEDIA_LOADED flag if the
 1628  * structure cannot be completed.
 1629  */
 1630 int
 1631 sd_get_parms(struct sd_softc *sc, int flags)
 1632 {
 1633         struct disk_parms                dp;
 1634         struct scsi_link                *link = sc->sc_link;
 1635         union scsi_mode_sense_buf       *buf = NULL;
 1636         struct page_rigid_geometry      *rigid = NULL;
 1637         struct page_flex_geometry       *flex = NULL;
 1638         struct page_reduced_geometry    *reduced = NULL;
 1639         u_char                          *page0 = NULL;
 1640         int                              big, err = 0;
 1641 
 1642         if (sd_read_cap(sc, flags) != 0)
 1643                 return -1;
 1644 
 1645         if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) {
 1646                 /* we dont know the unmap limits, so we cant use thin shizz */
 1647                 CLR(sc->flags, SDF_THIN);
 1648         }
 1649 
 1650         /*
 1651          * Work on a copy of the values initialized by sd_read_cap() and
 1652          * sd_thin_params().
 1653          */
 1654         dp = sc->params;
 1655 
 1656         buf = dma_alloc(sizeof(*buf), PR_NOWAIT);
 1657         if (buf == NULL)
 1658                 goto validate;
 1659 
 1660         if (ISSET(sc->flags, SDF_DYING))
 1661                 goto die;
 1662 
 1663         /*
 1664          * Ask for page 0 (vendor specific) mode sense data to find
 1665          * READONLY info. The only thing USB devices will ask for.
 1666          *
 1667          * page0 == NULL is a valid situation.
 1668          */
 1669         err = scsi_do_mode_sense(link, 0, buf, (void **)&page0, 1,
 1670             flags | SCSI_SILENT, &big);
 1671         if (ISSET(sc->flags, SDF_DYING))
 1672                 goto die;
 1673         if (err == 0) {
 1674                 if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT)
 1675                         SET(link->flags, SDEV_READONLY);
 1676                 else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT)
 1677                         SET(link->flags, SDEV_READONLY);
 1678                 else
 1679                         CLR(link->flags, SDEV_READONLY);
 1680         }
 1681 
 1682         /*
 1683          * Many UMASS devices choke when asked about their geometry. Most
 1684          * don't have a meaningful geometry anyway, so just fake it if
 1685          * sd_read_cap() worked.
 1686          */
 1687         if (ISSET(link->flags, SDEV_UMASS) && dp.disksize > 0)
 1688                 goto validate;
 1689 
 1690         switch (link->inqdata.device & SID_TYPE) {
 1691         case T_OPTICAL:
 1692                 /* No more information needed or available. */
 1693                 break;
 1694 
 1695         case T_RDIRECT:
 1696                 /* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */
 1697                 err = scsi_do_mode_sense(link, PAGE_REDUCED_GEOMETRY, buf,
 1698                     (void **)&reduced, sizeof(*reduced), flags | SCSI_SILENT,
 1699                     &big);
 1700                 if (err == 0) {
 1701                         scsi_parse_blkdesc(link, buf, big, NULL, NULL,
 1702                             &dp.secsize);
 1703                         if (reduced != NULL) {
 1704                                 if (dp.disksize == 0)
 1705                                         dp.disksize = _5btol(reduced->sectors);
 1706                                 if (dp.secsize == 0)
 1707                                         dp.secsize = _2btol(reduced->bytes_s);
 1708                         }
 1709                 }
 1710                 break;
 1711 
 1712         default:
 1713                 /*
 1714                  * NOTE: Some devices leave off the last four bytes of
 1715                  * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages.
 1716                  * The only information in those four bytes is RPM information
 1717                  * so accept the page. The extra bytes will be zero and RPM will
 1718                  * end up with the default value of 3600.
 1719                  */
 1720                 err = 0;
 1721                 if (!ISSET(link->flags, SDEV_ATAPI) ||
 1722                     !ISSET(link->flags, SDEV_REMOVABLE))
 1723                         err = scsi_do_mode_sense(link, PAGE_RIGID_GEOMETRY, buf,
 1724                             (void **)&rigid, sizeof(*rigid) - 4,
 1725                             flags | SCSI_SILENT, &big);
 1726                 if (err == 0) {
 1727                         scsi_parse_blkdesc(link, buf, big, NULL, NULL,
 1728                             &dp.secsize);
 1729                         if (rigid != NULL) {
 1730                                 dp.heads = rigid->nheads;
 1731                                 dp.cyls = _3btol(rigid->ncyl);
 1732                                 if (dp.heads * dp.cyls > 0)
 1733                                         dp.sectors = dp.disksize / (dp.heads *
 1734                                             dp.cyls);
 1735                         }
 1736                 } else {
 1737                         if (ISSET(sc->flags, SDF_DYING))
 1738                                 goto die;
 1739                         err = scsi_do_mode_sense(link, PAGE_FLEX_GEOMETRY, buf,
 1740                             (void **)&flex, sizeof(*flex) - 4,
 1741                             flags | SCSI_SILENT, &big);
 1742                         if (err == 0) {
 1743                                 scsi_parse_blkdesc(link, buf, big, NULL, NULL,
 1744                                     &dp.secsize);
 1745                                 if (flex != NULL) {
 1746                                         dp.sectors = flex->ph_sec_tr;
 1747                                         dp.heads = flex->nheads;
 1748                                         dp.cyls = _2btol(flex->ncyl);
 1749                                         if (dp.secsize == 0)
 1750                                                 dp.secsize =
 1751                                                     _2btol(flex->bytes_s);
 1752                                         if (dp.disksize == 0)
 1753                                                 dp.disksize =
 1754                                                     (u_int64_t)dp.cyls *
 1755                                                     dp.heads * dp.sectors;
 1756                                 }
 1757                         }
 1758                 }
 1759                 break;
 1760         }
 1761 
 1762 validate:
 1763         if (buf) {
 1764                 dma_free(buf, sizeof(*buf));
 1765                 buf = NULL;
 1766         }
 1767 
 1768         if (dp.disksize == 0)
 1769                 goto die;
 1770 
 1771         /*
 1772          * Restrict secsize values to powers of two between 512 and 64k.
 1773          */
 1774         switch (dp.secsize) {
 1775         case 0:
 1776                 dp.secsize = DEV_BSIZE;
 1777                 break;
 1778         case 0x200:     /* == 512, == DEV_BSIZE on all architectures. */
 1779         case 0x400:
 1780         case 0x800:
 1781         case 0x1000:
 1782         case 0x2000:
 1783         case 0x4000:
 1784         case 0x8000:
 1785         case 0x10000:
 1786                 break;
 1787         default:
 1788                 SC_DEBUG(sc->sc_link, SDEV_DB1,
 1789                     ("sd_get_parms: bad secsize: %#x\n", dp.secsize));
 1790                 return -1;
 1791         }
 1792 
 1793         /*
 1794          * XXX THINK ABOUT THIS!!  Using values such that sectors * heads *
 1795          * cyls is <= disk_size can lead to wasted space. We need a more
 1796          * careful calculation/validation to make everything work out
 1797          * optimally.
 1798          */
 1799         if (dp.disksize > 0xffffffff && (dp.heads * dp.sectors) < 0xffff) {
 1800                 dp.heads = 511;
 1801                 dp.sectors = 255;
 1802                 dp.cyls = 0;
 1803         }
 1804 
 1805         /*
 1806          * Use standard geometry values for anything we still don't
 1807          * know.
 1808          */
 1809         if (dp.heads == 0)
 1810                 dp.heads = 255;
 1811         if (dp.sectors == 0)
 1812                 dp.sectors = 63;
 1813         if (dp.cyls == 0) {
 1814                 dp.cyls = dp.disksize / (dp.heads * dp.sectors);
 1815                 if (dp.cyls == 0) {
 1816                         /* Put everything into one cylinder. */
 1817                         dp.heads = dp.cyls = 1;
 1818                         dp.sectors = dp.disksize;
 1819                 }
 1820         }
 1821 
 1822 #ifdef SCSIDEBUG
 1823         if (dp.disksize != (u_int64_t)dp.cyls * dp.heads * dp.sectors) {
 1824                 sc_print_addr(sc->sc_link);
 1825                 printf("disksize (%llu) != cyls (%u) * heads (%u) * "
 1826                     "sectors/track (%u) (%llu)\n", dp.disksize, dp.cyls,
 1827                     dp.heads, dp.sectors,
 1828                     (u_int64_t)dp.cyls * dp.heads * dp.sectors);
 1829         }
 1830 #endif /* SCSIDEBUG */
 1831 
 1832         sc->params = dp;
 1833         return 0;
 1834 
 1835 die:
 1836         dma_free(buf, sizeof(*buf));
 1837         return -1;
 1838 }
 1839 
 1840 int
 1841 sd_flush(struct sd_softc *sc, int flags)
 1842 {
 1843         struct scsi_link                *link;
 1844         struct scsi_xfer                *xs;
 1845         struct scsi_synchronize_cache   *cmd;
 1846         int                              error;
 1847 
 1848         if (ISSET(sc->flags, SDF_DYING))
 1849                 return ENXIO;
 1850         link = sc->sc_link;
 1851 
 1852         if (ISSET(link->quirks, SDEV_NOSYNCCACHE))
 1853                 return 0;
 1854 
 1855         /*
 1856          * Issue a SYNCHRONIZE CACHE. Address 0, length 0 means "all remaining
 1857          * blocks starting at address 0". Ignore ILLEGAL REQUEST in the event
 1858          * that the command is not supported by the device.
 1859          */
 1860 
 1861         xs = scsi_xs_get(link, flags);
 1862         if (xs == NULL) {
 1863                 SC_DEBUG(link, SDEV_DB1, ("cache sync failed to get xs\n"));
 1864                 return EIO;
 1865         }
 1866 
 1867         cmd = (struct scsi_synchronize_cache *)&xs->cmd;
 1868         cmd->opcode = SYNCHRONIZE_CACHE;
 1869 
 1870         xs->cmdlen = sizeof(*cmd);
 1871         xs->timeout = 100000;
 1872         SET(xs->flags, SCSI_IGNORE_ILLEGAL_REQUEST);
 1873 
 1874         error = scsi_xs_sync(xs);
 1875 
 1876         scsi_xs_put(xs);
 1877 
 1878         if (error)
 1879                 SC_DEBUG(link, SDEV_DB1, ("cache sync failed\n"));
 1880         else
 1881                 CLR(sc->flags, SDF_DIRTY);
 1882 
 1883         return error;
 1884 }

Cache object: 55af2fdebe275b9e999d083f688c0b66


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.