The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/scsi/scsi_da.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
    3  *
    4  * Copyright (c) 1997 Justin T. Gibbs.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions, and the following disclaimer,
   12  *    without modification, immediately at the beginning of the file.
   13  * 2. The name of the author may not be used to endorse or promote products
   14  *    derived from this software without specific prior written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  * $FreeBSD$
   29  */
   30 
   31 #include "opt_hw_wdog.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/kernel.h>
   36 #include <sys/buf.h>
   37 #include <sys/devicestat.h>
   38 #include <sys/dkbad.h>
   39 #include <sys/disklabel.h>
   40 #include <sys/diskslice.h>
   41 #include <sys/malloc.h>
   42 #include <sys/conf.h>
   43 
   44 #include <machine/cons.h>
   45 #include <machine/md_var.h>
   46 
   47 #include <vm/vm.h>
   48 #include <vm/vm_prot.h>
   49 #include <vm/pmap.h>
   50 
   51 #include <cam/cam.h>
   52 #include <cam/cam_ccb.h>
   53 #include <cam/cam_extend.h>
   54 #include <cam/cam_periph.h>
   55 #include <cam/cam_xpt_periph.h>
   56 
   57 #include <cam/scsi/scsi_message.h>
   58 
   59 typedef enum {
   60         DA_STATE_PROBE,
   61         DA_STATE_NORMAL
   62 } da_state;
   63 
   64 typedef enum {
   65         DA_FLAG_PACK_INVALID    = 0x001,
   66         DA_FLAG_NEW_PACK        = 0x002,
   67         DA_FLAG_PACK_LOCKED     = 0x004,
   68         DA_FLAG_PACK_REMOVABLE  = 0x008,
   69         DA_FLAG_TAGGED_QUEUING  = 0x010,
   70         DA_FLAG_NEED_OTAG       = 0x020,
   71         DA_FLAG_WENT_IDLE       = 0x040,
   72         DA_FLAG_RETRY_UA        = 0x080,
   73         DA_FLAG_OPEN            = 0x100
   74 } da_flags;
   75 
   76 typedef enum {
   77         DA_Q_NONE               = 0x00,
   78         DA_Q_NO_SYNC_CACHE      = 0x01,
   79         DA_Q_NO_6_BYTE          = 0x02
   80 } da_quirks;
   81 
   82 typedef enum {
   83         DA_CCB_PROBE            = 0x01,
   84         DA_CCB_BUFFER_IO        = 0x02,
   85         DA_CCB_WAITING          = 0x03,
   86         DA_CCB_DUMP             = 0x04,
   87         DA_CCB_TYPE_MASK        = 0x0F,
   88         DA_CCB_RETRY_UA         = 0x10
   89 } da_ccb_state;
   90 
   91 /* Offsets into our private area for storing information */
   92 #define ccb_state       ppriv_field0
   93 #define ccb_bp          ppriv_ptr1
   94 
   95 struct disk_params {
   96         u_int8_t  heads;
   97         u_int16_t cylinders;
   98         u_int8_t  secs_per_track;
   99         u_int32_t secsize;      /* Number of bytes/sector */
  100         u_int32_t sectors;      /* total number sectors */
  101 };
  102 
  103 struct da_softc {
  104         struct   buf_queue_head buf_queue;
  105         struct   devstat device_stats;
  106         SLIST_ENTRY(da_softc) links;
  107         LIST_HEAD(, ccb_hdr) pending_ccbs;
  108         da_state state;
  109         da_flags flags; 
  110         da_quirks quirks;
  111         int      minimum_cmd_size;
  112         int      ordered_tag_count;
  113         struct   disk_params params;
  114         struct   diskslices *dk_slices; /* virtual drives */
  115         union    ccb saved_ccb;
  116 };
  117 
  118 struct da_quirk_entry {
  119         struct scsi_inquiry_pattern inq_pat;
  120         da_quirks quirks;
  121 };
  122 
  123 static const char quantum[] = "QUANTUM";
  124 static const char microp[] = "MICROP";
  125 
  126 static struct da_quirk_entry da_quirk_table[] =
  127 {
  128         {
  129                 /*
  130                  * This particular Fujitsu drive doesn't like the
  131                  * synchronize cache command.
  132                  * Reported by: Tom Jackson <toj@gorilla.net>
  133                  */
  134                 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
  135                 /*quirks*/ DA_Q_NO_SYNC_CACHE
  136         
  137         },
  138         {
  139                 /*
  140                  * This drive doesn't like the synchronize cache command
  141                  * either.  Reported by: Matthew Jacob <mjacob@feral.com>
  142                  * in NetBSD PR kern/6027, August 24, 1998.
  143                  */
  144                 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
  145                 /*quirks*/ DA_Q_NO_SYNC_CACHE
  146         },
  147         {
  148                 /*
  149                  * This drive doesn't like the synchronize cache command
  150                  * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
  151                  * (PR 8882).
  152                  */
  153                 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
  154                 /*quirks*/ DA_Q_NO_SYNC_CACHE
  155         },
  156         {
  157                 /*
  158                  * Doesn't like the synchronize cache command.
  159                  * Reported by: Blaz Zupan <blaz@gold.amis.net>
  160                  */
  161                 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
  162                 /*quirks*/ DA_Q_NO_SYNC_CACHE
  163         },
  164         {
  165                 /*
  166                  * Doesn't like the synchronize cache command.
  167                  */
  168                 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
  169                 /*quirks*/ DA_Q_NO_SYNC_CACHE
  170         },
  171         {
  172                 /*
  173                  * Doesn't like the synchronize cache command.
  174                  */
  175                 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
  176                 /*quirks*/ DA_Q_NO_SYNC_CACHE
  177         },
  178         {
  179                 /*
  180                  * Doesn't work correctly with 6 byte reads/writes.
  181                  * Returns illegal request, and points to byte 9 of the
  182                  * 6-byte CDB.
  183                  * Reported by:  Adam McDougall <bsdx@spawnet.com>
  184                  */
  185                 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
  186                 /*quirks*/ DA_Q_NO_6_BYTE
  187         },
  188         {
  189                 /*
  190                  * See above.
  191                  */
  192                 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
  193                 /*quirks*/ DA_Q_NO_6_BYTE
  194         }
  195 };
  196 
  197 static  d_open_t        daopen;
  198 static  d_read_t        daread;
  199 static  d_write_t       dawrite;
  200 static  d_close_t       daclose;
  201 static  d_strategy_t    dastrategy;
  202 static  d_ioctl_t       daioctl;
  203 static  d_dump_t        dadump;
  204 static  d_psize_t       dasize;
  205 static  periph_init_t   dainit;
  206 static  void            daasync(void *callback_arg, u_int32_t code,
  207                                 struct cam_path *path, void *arg);
  208 static  periph_ctor_t   daregister;
  209 static  periph_dtor_t   dacleanup;
  210 static  periph_start_t  dastart;
  211 static  periph_oninv_t  daoninvalidate;
  212 static  void            dadone(struct cam_periph *periph,
  213                                union ccb *done_ccb);
  214 static  int             daerror(union ccb *ccb, u_int32_t cam_flags,
  215                                 u_int32_t sense_flags);
  216 static void             daprevent(struct cam_periph *periph, int action);
  217 static void             dasetgeom(struct cam_periph *periph,
  218                                   struct scsi_read_capacity_data * rdcap);
  219 static timeout_t        dasendorderedtag;
  220 static void             dashutdown(int howto, void *arg);
  221 
  222 #ifndef DA_DEFAULT_TIMEOUT
  223 #define DA_DEFAULT_TIMEOUT 60   /* Timeout in seconds */
  224 #endif
  225 
  226 /*
  227  * DA_ORDEREDTAG_INTERVAL determines how often, relative
  228  * to the default timeout, we check to see whether an ordered
  229  * tagged transaction is appropriate to prevent simple tag
  230  * starvation.  Since we'd like to ensure that there is at least
  231  * 1/2 of the timeout length left for a starved transaction to
  232  * complete after we've sent an ordered tag, we must poll at least
  233  * four times in every timeout period.  This takes care of the worst
  234  * case where a starved transaction starts during an interval that
  235  * meets the requirement "don't send an ordered tag" test so it takes
  236  * us two intervals to determine that a tag must be sent.
  237  */
  238 #ifndef DA_ORDEREDTAG_INTERVAL
  239 #define DA_ORDEREDTAG_INTERVAL 4
  240 #endif
  241 
  242 static struct periph_driver dadriver =
  243 {
  244         dainit, "da",
  245         TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
  246 };
  247 
  248 DATA_SET(periphdriver_set, dadriver);
  249 
  250 #define DA_CDEV_MAJOR 13
  251 #define DA_BDEV_MAJOR 4
  252 
  253 /* For 2.2-stable support */
  254 #ifndef D_DISK
  255 #define D_DISK 0
  256 #endif
  257 
  258 static struct cdevsw da_cdevsw = 
  259 {
  260         /*d_open*/      daopen,
  261         /*d_close*/     daclose,
  262         /*d_read*/      daread,
  263         /*d_write*/     dawrite,
  264         /*d_ioctl*/     daioctl,
  265         /*d_stop*/      nostop,
  266         /*d_reset*/     noreset,
  267         /*d_devtotty*/  nodevtotty,
  268         /*d_poll*/      seltrue,
  269         /*d_mmap*/      nommap,
  270         /*d_strategy*/  dastrategy,
  271         /*d_name*/      "da",
  272         /*d_spare*/     NULL,
  273         /*d_maj*/       -1,
  274         /*d_dump*/      dadump,
  275         /*d_psize*/     dasize,
  276         /*d_flags*/     D_DISK,
  277         /*d_maxio*/     0,
  278         /*b_maj*/       -1
  279 };
  280 
  281 static SLIST_HEAD(,da_softc) softc_list;
  282 static struct extend_array *daperiphs;
  283 
  284 static int
  285 daopen(dev_t dev, int flags, int fmt, struct proc *p)
  286 {
  287         struct cam_periph *periph;
  288         struct da_softc *softc;
  289         struct disklabel label; 
  290         int unit;
  291         int part;
  292         int error;
  293         int s;
  294 
  295         unit = dkunit(dev);
  296         part = dkpart(dev);
  297         periph = cam_extend_get(daperiphs, unit);
  298         if (periph == NULL)
  299                 return (ENXIO); 
  300 
  301         softc = (struct da_softc *)periph->softc;
  302 
  303         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
  304             ("daopen: dev=0x%lx (unit %d , partition %d)\n", (long) dev,
  305              unit, part));
  306 
  307         if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
  308                 return (error); /* error code from tsleep */
  309         }
  310 
  311         if ((softc->flags & DA_FLAG_OPEN) == 0) {
  312                 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
  313                         return(ENXIO);
  314                 softc->flags |= DA_FLAG_OPEN;
  315         }
  316 
  317         s = splsoftcam();
  318         if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
  319                 /*
  320                  * If any partition is open, although the disk has
  321                  * been invalidated, disallow further opens.
  322                  */
  323                 if (dsisopen(softc->dk_slices)) {
  324                         splx(s);
  325                         cam_periph_unlock(periph);
  326                         return (ENXIO);
  327                 }
  328 
  329                 /* Invalidate our pack information. */
  330                 dsgone(&softc->dk_slices);
  331                 softc->flags &= ~DA_FLAG_PACK_INVALID;
  332         }
  333         splx(s);
  334 
  335         /* Do a read capacity */
  336         {
  337                 struct scsi_read_capacity_data *rcap;
  338                 union  ccb *ccb;
  339 
  340                 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
  341                                                                 M_TEMP,
  342                                                                 M_WAITOK);
  343                 
  344                 ccb = cam_periph_getccb(periph, /*priority*/1);
  345                 scsi_read_capacity(&ccb->csio,
  346                                    /*retries*/1,
  347                                    /*cbfncp*/dadone,
  348                                    MSG_SIMPLE_Q_TAG,
  349                                    rcap,
  350                                    SSD_FULL_SIZE,
  351                                    /*timeout*/60000);
  352                 ccb->ccb_h.ccb_bp = NULL;
  353 
  354                 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
  355                                           /*sense_flags*/SF_RETRY_UA |
  356                                                          SF_RETRY_SELTO,
  357                                           &softc->device_stats);
  358 
  359                 xpt_release_ccb(ccb);
  360 
  361                 if (error == 0) {
  362                         dasetgeom(periph, rcap);
  363                 }
  364 
  365                 free(rcap, M_TEMP);
  366         }
  367 
  368         if (error == 0) {
  369                 struct ccb_getdev cgd;
  370 
  371                 /* Build label for whole disk. */
  372                 bzero(&label, sizeof(label));
  373                 label.d_type = DTYPE_SCSI;
  374 
  375                 /*
  376                  * Grab the inquiry data to get the vendor and product names.
  377                  * Put them in the typename and packname for the label.
  378                  */
  379                 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
  380                 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
  381                 xpt_action((union ccb *)&cgd);
  382 
  383                 strncpy(label.d_typename, cgd.inq_data.vendor,
  384                         min(SID_VENDOR_SIZE, sizeof(label.d_typename)));
  385                 strncpy(label.d_packname, cgd.inq_data.product,
  386                         min(SID_PRODUCT_SIZE, sizeof(label.d_packname)));
  387                 
  388                 label.d_secsize = softc->params.secsize;
  389                 label.d_nsectors = softc->params.secs_per_track;
  390                 label.d_ntracks = softc->params.heads;
  391                 label.d_ncylinders = softc->params.cylinders;
  392                 label.d_secpercyl = softc->params.heads
  393                                   * softc->params.secs_per_track;
  394                 label.d_secperunit = softc->params.sectors;
  395 
  396                 if ((dsisopen(softc->dk_slices) == 0)
  397                     && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
  398                         daprevent(periph, PR_PREVENT);
  399                 }
  400         
  401                 /* Initialize slice tables. */
  402                 error = dsopen("da", dev, fmt, 0, &softc->dk_slices, &label,
  403                                dastrategy, (ds_setgeom_t *)NULL,
  404                                &da_cdevsw);
  405 
  406                 /*
  407                  * Check to see whether or not the blocksize is set yet.
  408                  * If it isn't, set it and then clear the blocksize
  409                  * unavailable flag for the device statistics.
  410                  */
  411                 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
  412                         softc->device_stats.block_size = softc->params.secsize;
  413                         softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
  414                 }
  415         }
  416         
  417         if (error != 0) {
  418                 if ((dsisopen(softc->dk_slices) == 0)
  419                  && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
  420                         daprevent(periph, PR_ALLOW);
  421                 }
  422         }
  423         cam_periph_unlock(periph);
  424         return (error);
  425 }
  426 
  427 static int
  428 daclose(dev_t dev, int flag, int fmt, struct proc *p)
  429 {
  430         struct  cam_periph *periph;
  431         struct  da_softc *softc;
  432         int     unit;
  433         int     error;
  434 
  435         unit = dkunit(dev);
  436         periph = cam_extend_get(daperiphs, unit);
  437         if (periph == NULL)
  438                 return (ENXIO); 
  439 
  440         softc = (struct da_softc *)periph->softc;
  441 
  442         if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
  443                 return (error); /* error code from tsleep */
  444         }
  445 
  446         dsclose(dev, fmt, softc->dk_slices);
  447         if (dsisopen(softc->dk_slices)) {
  448                 cam_periph_unlock(periph);
  449                 return (0);
  450         }
  451 
  452         if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
  453                 union   ccb *ccb;
  454 
  455                 ccb = cam_periph_getccb(periph, /*priority*/1);
  456 
  457                 scsi_synchronize_cache(&ccb->csio,
  458                                        /*retries*/1,
  459                                        /*cbfcnp*/dadone,
  460                                        MSG_SIMPLE_Q_TAG,
  461                                        /*begin_lba*/0,/* Cover the whole disk */
  462                                        /*lb_count*/0,
  463                                        SSD_FULL_SIZE,
  464                                        5 * 60 * 1000);
  465 
  466                 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
  467                                   /*sense_flags*/SF_RETRY_UA,
  468                                   &softc->device_stats);
  469 
  470                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
  471                         if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
  472                              CAM_SCSI_STATUS_ERROR) {
  473                                 int asc, ascq;
  474                                 int sense_key, error_code;
  475 
  476                                 scsi_extract_sense(&ccb->csio.sense_data,
  477                                                    &error_code,
  478                                                    &sense_key, 
  479                                                    &asc, &ascq);
  480                                 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
  481                                         scsi_sense_print(&ccb->csio);
  482                         } else {
  483                                 xpt_print_path(periph->path);
  484                                 printf("Synchronize cache failed, status "
  485                                        "== 0x%x, scsi status == 0x%x\n",
  486                                        ccb->csio.ccb_h.status,
  487                                        ccb->csio.scsi_status);
  488                         }
  489                 }
  490 
  491                 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
  492                         cam_release_devq(ccb->ccb_h.path,
  493                                          /*relsim_flags*/0,
  494                                          /*reduction*/0,
  495                                          /*timeout*/0,
  496                                          /*getcount_only*/0);
  497 
  498                 xpt_release_ccb(ccb);
  499 
  500         }
  501 
  502         if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
  503                 daprevent(periph, PR_ALLOW);
  504                 /*
  505                  * If we've got removeable media, mark the blocksize as
  506                  * unavailable, since it could change when new media is
  507                  * inserted.
  508                  */
  509                 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
  510         }
  511 
  512         softc->flags &= ~DA_FLAG_OPEN;
  513         cam_periph_unlock(periph);
  514         cam_periph_release(periph);
  515         return (0);     
  516 }
  517 
  518 static int
  519 daread(dev_t dev, struct uio *uio, int ioflag)
  520 {
  521         return(physio(dastrategy, NULL, dev, 1, minphys, uio));
  522 }
  523 
  524 static int
  525 dawrite(dev_t dev, struct uio *uio, int ioflag)
  526 {
  527         return(physio(dastrategy, NULL, dev, 0, minphys, uio));
  528 }
  529 
  530 /*
  531  * Actually translate the requested transfer into one the physical driver
  532  * can understand.  The transfer is described by a buf and will include
  533  * only one physical transfer.
  534  */
  535 static void
  536 dastrategy(struct buf *bp)
  537 {
  538         struct cam_periph *periph;
  539         struct da_softc *softc;
  540         u_int  unit;
  541         u_int  part;
  542         int    s;
  543         
  544         unit = dkunit(bp->b_dev);
  545         part = dkpart(bp->b_dev);
  546         periph = cam_extend_get(daperiphs, unit);
  547         if (periph == NULL) {
  548                 bp->b_error = ENXIO;
  549                 goto bad;               
  550         }
  551         softc = (struct da_softc *)periph->softc;
  552 #if 0
  553         /*
  554          * check it's not too big a transfer for our adapter
  555          */
  556         scsi_minphys(bp,&sd_switch);
  557 #endif
  558 
  559         /*
  560          * Do bounds checking, adjust transfer, set b_cylin and b_pbklno.
  561          */
  562         if (dscheck(bp, softc->dk_slices) <= 0)
  563                 goto done;
  564 
  565         /*
  566          * Mask interrupts so that the pack cannot be invalidated until
  567          * after we are in the queue.  Otherwise, we might not properly
  568          * clean up one of the buffers.
  569          */
  570         s = splbio();
  571         
  572         /*
  573          * If the device has been made invalid, error out
  574          */
  575         if ((softc->flags & DA_FLAG_PACK_INVALID)) {
  576                 splx(s);
  577                 bp->b_error = ENXIO;
  578                 goto bad;
  579         }
  580         
  581         /*
  582          * Place it in the queue of disk activities for this disk
  583          */
  584         bufqdisksort(&softc->buf_queue, bp);
  585 
  586         splx(s);
  587         
  588         /*
  589          * Schedule ourselves for performing the work.
  590          */
  591         xpt_schedule(periph, /* XXX priority */1);
  592 
  593         return;
  594 bad:
  595         bp->b_flags |= B_ERROR;
  596 done:
  597 
  598         /*
  599          * Correctly set the buf to indicate a completed xfer
  600          */
  601         bp->b_resid = bp->b_bcount;
  602         biodone(bp);
  603         return;
  604 }
  605 
  606 /* For 2.2-stable support */
  607 #ifndef ENOIOCTL
  608 #define ENOIOCTL -1
  609 #endif
  610 
  611 static int
  612 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
  613 {
  614         struct cam_periph *periph;
  615         struct da_softc *softc;
  616         int unit;
  617         int error;
  618 
  619         unit = dkunit(dev);
  620         periph = cam_extend_get(daperiphs, unit);
  621         if (periph == NULL)
  622                 return (ENXIO); 
  623 
  624         softc = (struct da_softc *)periph->softc;
  625 
  626         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
  627 
  628         if (cmd == DIOCSBAD)
  629                 return (EINVAL);        /* XXX */
  630 
  631         if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
  632                 return (error); /* error code from tsleep */
  633         }       
  634 
  635         error = dsioctl("da", dev, cmd, addr, flag, &softc->dk_slices,
  636                         dastrategy, (ds_setgeom_t *)NULL);
  637 
  638         if (error == ENOIOCTL)
  639                 error = cam_periph_ioctl(periph, cmd, addr, daerror);
  640 
  641         cam_periph_unlock(periph);
  642         
  643         return (error);
  644 }
  645 
  646 static int
  647 dadump(dev_t dev)
  648 {
  649         struct      cam_periph *periph;
  650         struct      da_softc *softc;
  651         struct      disklabel *lp;
  652         u_int       unit;
  653         u_int       part;
  654         long        num;        /* number of sectors to write */
  655         long        blkoff;
  656         long        blknum;
  657         long        blkcnt;
  658         vm_offset_t addr;       
  659         static  int dadoingadump = 0;
  660         struct      ccb_scsiio csio;
  661 
  662         /* toss any characters present prior to dump */
  663         while (cncheckc() != -1)
  664                 ;
  665 
  666         unit = dkunit(dev);
  667         part = dkpart(dev);
  668         periph = cam_extend_get(daperiphs, unit);
  669         if (periph == NULL) {
  670                 return (ENXIO);
  671         }
  672         softc = (struct da_softc *)periph->softc;
  673         
  674         if ((softc->flags & DA_FLAG_PACK_INVALID) != 0
  675          || (softc->dk_slices == NULL)
  676          || (lp = dsgetlabel(dev, softc->dk_slices)) == NULL)
  677                 return (ENXIO);
  678 
  679         /* Size of memory to dump, in disk sectors. */
  680         /* XXX Fix up for non DEV_BSIZE sectors!!! */
  681         num = (u_long)Maxmem * PAGE_SIZE / softc->params.secsize;
  682 
  683         blkoff = lp->d_partitions[part].p_offset;
  684         blkoff += softc->dk_slices->dss_slices[dkslice(dev)].ds_offset;
  685 
  686         /* check transfer bounds against partition size */
  687         if ((dumplo < 0) || ((dumplo + num) > lp->d_partitions[part].p_size))
  688                 return (EINVAL);
  689 
  690         if (dadoingadump != 0)
  691                 return (EFAULT);
  692 
  693         dadoingadump = 1;
  694 
  695         blknum = dumplo + blkoff;
  696         blkcnt = PAGE_SIZE / softc->params.secsize;
  697 
  698         addr = 0;       /* starting address */
  699 
  700         while (num > 0) {
  701 
  702                 if (is_physical_memory(addr)) {
  703                         pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
  704                                    trunc_page(addr), VM_PROT_READ, TRUE);
  705                 } else {
  706                         pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
  707                                    trunc_page(0), VM_PROT_READ, TRUE);
  708                 }
  709 
  710                 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
  711                 csio.ccb_h.ccb_state = DA_CCB_DUMP;
  712                 scsi_read_write(&csio,
  713                                 /*retries*/1,
  714                                 dadone,
  715                                 MSG_ORDERED_Q_TAG,
  716                                 /*read*/FALSE,
  717                                 /*byte2*/0,
  718                                 /*minimum_cmd_size*/ softc->minimum_cmd_size,
  719                                 blknum,
  720                                 blkcnt,
  721                                 /*data_ptr*/CADDR1,
  722                                 /*dxfer_len*/blkcnt * softc->params.secsize,
  723                                 /*sense_len*/SSD_FULL_SIZE,
  724                                 DA_DEFAULT_TIMEOUT * 1000);             
  725                 xpt_polled_action((union ccb *)&csio);
  726 
  727                 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
  728                         printf("Aborting dump due to I/O error.\n");
  729                         if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
  730                              CAM_SCSI_STATUS_ERROR)
  731                                 scsi_sense_print(&csio);
  732                         else
  733                                 printf("status == 0x%x, scsi status == 0x%x\n",
  734                                        csio.ccb_h.status, csio.scsi_status);
  735                         return(EIO);
  736                 }
  737                 
  738                 if (addr % (1024 * 1024) == 0) {
  739 #ifdef  HW_WDOG
  740                         if (wdog_tickler)
  741                                 (*wdog_tickler)();
  742 #endif /* HW_WDOG */
  743                         /* Count in MB of data left to write */
  744                         printf("%ld ", (num  * softc->params.secsize)
  745                                      / (1024 * 1024));
  746                 }
  747                 
  748                 /* update block count */
  749                 num -= blkcnt;
  750                 blknum += blkcnt;
  751                 addr += blkcnt * softc->params.secsize;
  752 
  753                 /* operator aborting dump? */
  754                 if (cncheckc() != -1)
  755                         return (EINTR);
  756         }
  757 
  758         /*
  759          * Sync the disk cache contents to the physical media.
  760          */
  761         if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
  762 
  763                 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
  764                 csio.ccb_h.ccb_state = DA_CCB_DUMP;
  765                 scsi_synchronize_cache(&csio,
  766                                        /*retries*/1,
  767                                        /*cbfcnp*/dadone,
  768                                        MSG_SIMPLE_Q_TAG,
  769                                        /*begin_lba*/0,/* Cover the whole disk */
  770                                        /*lb_count*/0,
  771                                        SSD_FULL_SIZE,
  772                                        5 * 60 * 1000);
  773                 xpt_polled_action((union ccb *)&csio);
  774 
  775                 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
  776                         if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
  777                              CAM_SCSI_STATUS_ERROR) {
  778                                 int asc, ascq;
  779                                 int sense_key, error_code;
  780 
  781                                 scsi_extract_sense(&csio.sense_data,
  782                                                    &error_code,
  783                                                    &sense_key, 
  784                                                    &asc, &ascq);
  785                                 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
  786                                         scsi_sense_print(&csio);
  787                         } else {
  788                                 xpt_print_path(periph->path);
  789                                 printf("Synchronize cache failed, status "
  790                                        "== 0x%x, scsi status == 0x%x\n",
  791                                        csio.ccb_h.status, csio.scsi_status);
  792                         }
  793                 }
  794         }
  795         return (0);
  796 }
  797 
  798 static int
  799 dasize(dev_t dev)
  800 {
  801         struct cam_periph *periph;
  802         struct da_softc *softc; 
  803 
  804         periph = cam_extend_get(daperiphs, dkunit(dev));
  805         if (periph == NULL)
  806                 return (ENXIO);
  807         
  808         softc = (struct da_softc *)periph->softc;
  809         
  810         return (dssize(dev, &softc->dk_slices, daopen, daclose));       
  811 }
  812 
  813 static void
  814 dainit(void)
  815 {
  816         cam_status status;
  817         struct cam_path *path;
  818 
  819         /*
  820          * Create our extend array for storing the devices we attach to.
  821          */
  822         daperiphs = cam_extend_new();
  823         SLIST_INIT(&softc_list);
  824         if (daperiphs == NULL) {
  825                 printf("da: Failed to alloc extend array!\n");
  826                 return;
  827         }
  828         
  829         /*
  830          * Install a global async callback.  This callback will
  831          * receive async callbacks like "new device found".
  832          */
  833         status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
  834                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
  835 
  836         if (status == CAM_REQ_CMP) {
  837                 struct ccb_setasync csa;
  838 
  839                 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
  840                 csa.ccb_h.func_code = XPT_SASYNC_CB;
  841                 csa.event_enable = AC_FOUND_DEVICE;
  842                 csa.callback = daasync;
  843                 csa.callback_arg = NULL;
  844                 xpt_action((union ccb *)&csa);
  845                 status = csa.ccb_h.status;
  846                 xpt_free_path(path);
  847         }
  848 
  849         if (status != CAM_REQ_CMP) {
  850                 printf("da: Failed to attach master async callback "
  851                        "due to status 0x%x!\n", status);
  852         } else {
  853                 int err;
  854 
  855                 /* If we were successfull, register our devsw */
  856                 cdevsw_add_generic(DA_BDEV_MAJOR, DA_CDEV_MAJOR, &da_cdevsw);
  857 
  858                 /*
  859                  * Schedule a periodic event to occasioanly send an
  860                  * ordered tag to a device.
  861                  */
  862                 timeout(dasendorderedtag, NULL,
  863                         (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
  864 
  865                 if ((err = at_shutdown(dashutdown, NULL,
  866                                        SHUTDOWN_POST_SYNC)) != 0)
  867                         printf("dainit: at_shutdown returned %d!\n", err);
  868         }
  869 }
  870 
  871 static void
  872 daoninvalidate(struct cam_periph *periph)
  873 {
  874         int s;
  875         struct da_softc *softc;
  876         struct buf *q_bp;
  877         struct ccb_setasync csa;
  878 
  879         softc = (struct da_softc *)periph->softc;
  880 
  881         /*
  882          * De-register any async callbacks.
  883          */
  884         xpt_setup_ccb(&csa.ccb_h, periph->path,
  885                       /* priority */ 5);
  886         csa.ccb_h.func_code = XPT_SASYNC_CB;
  887         csa.event_enable = 0;
  888         csa.callback = daasync;
  889         csa.callback_arg = periph;
  890         xpt_action((union ccb *)&csa);
  891 
  892         softc->flags |= DA_FLAG_PACK_INVALID;
  893 
  894         /*
  895          * Although the oninvalidate() routines are always called at
  896          * splsoftcam, we need to be at splbio() here to keep the buffer
  897          * queue from being modified while we traverse it.
  898          */
  899         s = splbio();
  900 
  901         /*
  902          * Return all queued I/O with ENXIO.
  903          * XXX Handle any transactions queued to the card
  904          *     with XPT_ABORT_CCB.
  905          */
  906         while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
  907                 bufq_remove(&softc->buf_queue, q_bp);
  908                 q_bp->b_resid = q_bp->b_bcount;
  909                 q_bp->b_error = ENXIO;
  910                 q_bp->b_flags |= B_ERROR;
  911                 biodone(q_bp);
  912         }
  913         splx(s);
  914 
  915         SLIST_REMOVE(&softc_list, softc, da_softc, links);
  916 
  917         xpt_print_path(periph->path);
  918         printf("lost device\n");
  919 }
  920 
  921 static void
  922 dacleanup(struct cam_periph *periph)
  923 {
  924         struct da_softc *softc;
  925 
  926         softc = (struct da_softc *)periph->softc;
  927 
  928         devstat_remove_entry(&softc->device_stats);
  929         cam_extend_release(daperiphs, periph->unit_number);
  930         xpt_print_path(periph->path);
  931         printf("removing device entry\n");
  932         free(softc, M_DEVBUF);
  933 }
  934 
  935 static void
  936 daasync(void *callback_arg, u_int32_t code,
  937         struct cam_path *path, void *arg)
  938 {
  939         struct cam_periph *periph;
  940 
  941         periph = (struct cam_periph *)callback_arg;
  942         switch (code) {
  943         case AC_FOUND_DEVICE:
  944         {
  945                 struct ccb_getdev *cgd;
  946                 cam_status status;
  947  
  948                 cgd = (struct ccb_getdev *)arg;
  949 
  950                 if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL))
  951                         break;
  952 
  953                 /*
  954                  * Allocate a peripheral instance for
  955                  * this device and start the probe
  956                  * process.
  957                  */
  958                 status = cam_periph_alloc(daregister, daoninvalidate,
  959                                           dacleanup, dastart,
  960                                           "da", CAM_PERIPH_BIO,
  961                                           cgd->ccb_h.path, daasync,
  962                                           AC_FOUND_DEVICE, cgd);
  963 
  964                 if (status != CAM_REQ_CMP
  965                  && status != CAM_REQ_INPROG)
  966                         printf("daasync: Unable to attach to new device "
  967                                 "due to status 0x%x\n", status);
  968                 break;
  969         }
  970         case AC_SENT_BDR:
  971         case AC_BUS_RESET:
  972         {
  973                 struct da_softc *softc;
  974                 struct ccb_hdr *ccbh;
  975                 int s;
  976 
  977                 softc = (struct da_softc *)periph->softc;
  978                 s = splsoftcam();
  979                 /*
  980                  * Don't fail on the expected unit attention
  981                  * that will occur.
  982                  */
  983                 softc->flags |= DA_FLAG_RETRY_UA;
  984                 for (ccbh = LIST_FIRST(&softc->pending_ccbs);
  985                      ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
  986                         ccbh->ccb_state |= DA_CCB_RETRY_UA;
  987                 splx(s);
  988                 /* FALLTHROUGH*/
  989         }
  990         default:
  991                 cam_periph_async(periph, code, path, arg);
  992                 break;
  993         }
  994 }
  995 
  996 static cam_status
  997 daregister(struct cam_periph *periph, void *arg)
  998 {
  999         int s;
 1000         struct da_softc *softc;
 1001         struct ccb_setasync csa;
 1002         struct ccb_getdev *cgd;
 1003         caddr_t match;
 1004 
 1005         cgd = (struct ccb_getdev *)arg;
 1006         if (periph == NULL) {
 1007                 printf("daregister: periph was NULL!!\n");
 1008                 return(CAM_REQ_CMP_ERR);
 1009         }
 1010 
 1011         if (cgd == NULL) {
 1012                 printf("daregister: no getdev CCB, can't register device\n");
 1013                 return(CAM_REQ_CMP_ERR);
 1014         }
 1015 
 1016         softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
 1017 
 1018         if (softc == NULL) {
 1019                 printf("daregister: Unable to probe new device. "
 1020                        "Unable to allocate softc\n");                           
 1021                 return(CAM_REQ_CMP_ERR);
 1022         }
 1023 
 1024         bzero(softc, sizeof(*softc));
 1025         LIST_INIT(&softc->pending_ccbs);
 1026         softc->state = DA_STATE_PROBE;
 1027         bufq_init(&softc->buf_queue);
 1028         if (SID_IS_REMOVABLE(&cgd->inq_data))
 1029                 softc->flags |= DA_FLAG_PACK_REMOVABLE;
 1030         if ((cgd->inq_data.flags & SID_CmdQue) != 0)
 1031                 softc->flags |= DA_FLAG_TAGGED_QUEUING;
 1032 
 1033         periph->softc = softc;
 1034         
 1035         cam_extend_set(daperiphs, periph->unit_number, periph);
 1036 
 1037         /*
 1038          * See if this device has any quirks.
 1039          */
 1040         match = cam_quirkmatch((caddr_t)&cgd->inq_data,
 1041                                (caddr_t)da_quirk_table,
 1042                                sizeof(da_quirk_table)/sizeof(*da_quirk_table),
 1043                                sizeof(*da_quirk_table), scsi_inquiry_match);
 1044 
 1045         if (match != NULL)
 1046                 softc->quirks = ((struct da_quirk_entry *)match)->quirks;
 1047         else
 1048                 softc->quirks = DA_Q_NONE;
 1049 
 1050         if (softc->quirks & DA_Q_NO_6_BYTE)
 1051                 softc->minimum_cmd_size = 10;
 1052         else
 1053                 softc->minimum_cmd_size = 6;
 1054 
 1055         /*
 1056          * Block our timeout handler while we
 1057          * add this softc to the dev list.
 1058          */
 1059         s = splsoftclock();
 1060         SLIST_INSERT_HEAD(&softc_list, softc, links);
 1061         splx(s);
 1062 
 1063         /*
 1064          * The DA driver supports a blocksize, but
 1065          * we don't know the blocksize until we do 
 1066          * a read capacity.  So, set a flag to
 1067          * indicate that the blocksize is 
 1068          * unavailable right now.  We'll clear the
 1069          * flag as soon as we've done a read capacity.
 1070          */
 1071         devstat_add_entry(&softc->device_stats, "da", 
 1072                           periph->unit_number, 0,
 1073                           DEVSTAT_BS_UNAVAILABLE,
 1074                           cgd->pd_type | DEVSTAT_TYPE_IF_SCSI,
 1075                           DEVSTAT_PRIORITY_DA);
 1076 
 1077         /*
 1078          * Add async callbacks for bus reset and
 1079          * bus device reset calls.  I don't bother
 1080          * checking if this fails as, in most cases,
 1081          * the system will function just fine without
 1082          * them and the only alternative would be to
 1083          * not attach the device on failure.
 1084          */
 1085         xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
 1086         csa.ccb_h.func_code = XPT_SASYNC_CB;
 1087         csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
 1088         csa.callback = daasync;
 1089         csa.callback_arg = periph;
 1090         xpt_action((union ccb *)&csa);
 1091         /*
 1092          * Lock this peripheral until we are setup.
 1093          * This first call can't block
 1094          */
 1095         (void)cam_periph_lock(periph, PRIBIO);
 1096         xpt_schedule(periph, /*priority*/5);
 1097 
 1098         return(CAM_REQ_CMP);
 1099 }
 1100 
 1101 static void
 1102 dastart(struct cam_periph *periph, union ccb *start_ccb)
 1103 {
 1104         struct da_softc *softc;
 1105 
 1106         softc = (struct da_softc *)periph->softc;
 1107 
 1108         
 1109         switch (softc->state) {
 1110         case DA_STATE_NORMAL:
 1111         {
 1112                 /* Pull a buffer from the queue and get going on it */          
 1113                 struct buf *bp;
 1114                 int s;
 1115 
 1116                 /*
 1117                  * See if there is a buf with work for us to do..
 1118                  */
 1119                 s = splbio();
 1120                 bp = bufq_first(&softc->buf_queue);
 1121                 if (periph->immediate_priority <= periph->pinfo.priority) {
 1122                         CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
 1123                                         ("queuing for immediate ccb\n"));
 1124                         start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
 1125                         SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
 1126                                           periph_links.sle);
 1127                         periph->immediate_priority = CAM_PRIORITY_NONE;
 1128                         splx(s);
 1129                         wakeup(&periph->ccb_list);
 1130                 } else if (bp == NULL) {
 1131                         splx(s);
 1132                         xpt_release_ccb(start_ccb);
 1133                 } else {
 1134                         int oldspl;
 1135                         u_int8_t tag_code;
 1136 
 1137                         bufq_remove(&softc->buf_queue, bp);
 1138 
 1139                         devstat_start_transaction(&softc->device_stats);
 1140 
 1141                         if ((bp->b_flags & B_ORDERED) != 0
 1142                          || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
 1143                                 softc->flags &= ~DA_FLAG_NEED_OTAG;
 1144                                 softc->ordered_tag_count++;
 1145                                 tag_code = MSG_ORDERED_Q_TAG;
 1146                         } else {
 1147                                 tag_code = MSG_SIMPLE_Q_TAG;
 1148                         }
 1149                         scsi_read_write(&start_ccb->csio,
 1150                                         /*retries*/4,
 1151                                         dadone,
 1152                                         tag_code,
 1153                                         bp->b_flags & B_READ,
 1154                                         /*byte2*/0,
 1155                                         softc->minimum_cmd_size,
 1156                                         bp->b_pblkno,
 1157                                         bp->b_bcount / softc->params.secsize,
 1158                                         bp->b_data,
 1159                                         bp->b_bcount,
 1160                                         /*sense_len*/SSD_FULL_SIZE,
 1161                                         DA_DEFAULT_TIMEOUT * 1000);
 1162                         start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
 1163 
 1164                         /*
 1165                          * Block out any asyncronous callbacks
 1166                          * while we touch the pending ccb list.
 1167                          */
 1168                         oldspl = splcam();
 1169                         LIST_INSERT_HEAD(&softc->pending_ccbs,
 1170                                          &start_ccb->ccb_h, periph_links.le);
 1171                         splx(oldspl);
 1172 
 1173                         /* We expect a unit attention from this device */
 1174                         if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
 1175                                 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
 1176                                 softc->flags &= ~DA_FLAG_RETRY_UA;
 1177                         }
 1178 
 1179                         start_ccb->ccb_h.ccb_bp = bp;
 1180                         bp = bufq_first(&softc->buf_queue);
 1181                         splx(s);
 1182 
 1183                         xpt_action(start_ccb);
 1184                 }
 1185                 
 1186                 if (bp != NULL) {
 1187                         /* Have more work to do, so ensure we stay scheduled */
 1188                         xpt_schedule(periph, /* XXX priority */1);
 1189                 }
 1190                 break;
 1191         }
 1192         case DA_STATE_PROBE:
 1193         {
 1194                 struct ccb_scsiio *csio;
 1195                 struct scsi_read_capacity_data *rcap;
 1196 
 1197                 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
 1198                                                                 M_TEMP,
 1199                                                                 M_NOWAIT);
 1200                 if (rcap == NULL) {
 1201                         printf("dastart: Couldn't malloc read_capacity data\n");
 1202                         /* da_free_periph??? */
 1203                         break;
 1204                 }
 1205                 csio = &start_ccb->csio;
 1206                 scsi_read_capacity(csio,
 1207                                    /*retries*/4,
 1208                                    dadone,
 1209                                    MSG_SIMPLE_Q_TAG,
 1210                                    rcap,
 1211                                    SSD_FULL_SIZE,
 1212                                    /*timeout*/5000);
 1213                 start_ccb->ccb_h.ccb_bp = NULL;
 1214                 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
 1215                 xpt_action(start_ccb);
 1216                 break;
 1217         }
 1218         }
 1219 }
 1220 
 1221 
 1222 static void
 1223 dadone(struct cam_periph *periph, union ccb *done_ccb)
 1224 {
 1225         struct da_softc *softc;
 1226         struct ccb_scsiio *csio;
 1227 
 1228         softc = (struct da_softc *)periph->softc;
 1229         csio = &done_ccb->csio;
 1230         switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
 1231         case DA_CCB_BUFFER_IO:
 1232         {
 1233                 struct buf *bp;
 1234                 int    oldspl;
 1235 
 1236                 bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
 1237                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 1238                         int error;
 1239                         int s;
 1240                         int sf;
 1241                         
 1242                         if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
 1243                                 sf = SF_RETRY_UA;
 1244                         else
 1245                                 sf = 0;
 1246 
 1247                         /* Retry selection timeouts */
 1248                         sf |= SF_RETRY_SELTO;
 1249 
 1250                         if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
 1251                                 /*
 1252                                  * A retry was scheuled, so
 1253                                  * just return.
 1254                                  */
 1255                                 return;
 1256                         }
 1257                         if (error != 0) {
 1258                                 struct buf *q_bp;
 1259 
 1260                                 s = splbio();
 1261 
 1262                                 if (error == ENXIO) {
 1263                                         /*
 1264                                          * Catastrophic error.  Mark our pack as
 1265                                          * invalid.
 1266                                          */
 1267                                         /* XXX See if this is really a media
 1268                                          *     change first.
 1269                                          */
 1270                                         xpt_print_path(periph->path);
 1271                                         printf("Invalidating pack\n");
 1272                                         softc->flags |= DA_FLAG_PACK_INVALID;
 1273                                 }
 1274 
 1275                                 /*
 1276                                  * return all queued I/O with EIO, so that
 1277                                  * the client can retry these I/Os in the
 1278                                  * proper order should it attempt to recover.
 1279                                  */
 1280                                 while ((q_bp = bufq_first(&softc->buf_queue))
 1281                                         != NULL) {
 1282                                         bufq_remove(&softc->buf_queue, q_bp);
 1283                                         q_bp->b_resid = q_bp->b_bcount;
 1284                                         q_bp->b_error = EIO;
 1285                                         q_bp->b_flags |= B_ERROR;
 1286                                         biodone(q_bp);
 1287                                 }
 1288                                 splx(s);
 1289                                 bp->b_error = error;
 1290                                 bp->b_resid = bp->b_bcount;
 1291                                 bp->b_flags |= B_ERROR;
 1292                         } else {
 1293                                 bp->b_resid = csio->resid;
 1294                                 bp->b_error = 0;
 1295                                 if (bp->b_resid != 0) {
 1296                                         /* Short transfer ??? */
 1297                                         bp->b_flags |= B_ERROR;
 1298                                 }
 1299                         }
 1300                         if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
 1301                                 cam_release_devq(done_ccb->ccb_h.path,
 1302                                                  /*relsim_flags*/0,
 1303                                                  /*reduction*/0,
 1304                                                  /*timeout*/0,
 1305                                                  /*getcount_only*/0);
 1306                 } else {
 1307                         bp->b_resid = csio->resid;
 1308                         if (csio->resid > 0)
 1309                                 bp->b_flags |= B_ERROR;
 1310                 }
 1311 
 1312                 /*
 1313                  * Block out any asyncronous callbacks
 1314                  * while we touch the pending ccb list.
 1315                  */
 1316                 oldspl = splcam();
 1317                 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
 1318                 splx(oldspl);
 1319 
 1320                 devstat_end_transaction(&softc->device_stats,
 1321                                         bp->b_bcount - bp->b_resid,
 1322                                         done_ccb->csio.tag_action & 0xf, 
 1323                                         (bp->b_flags & B_READ) ? DEVSTAT_READ
 1324                                                                : DEVSTAT_WRITE);
 1325 
 1326                 if (softc->device_stats.busy_count == 0)
 1327                         softc->flags |= DA_FLAG_WENT_IDLE;
 1328 
 1329                 biodone(bp);
 1330                 break;
 1331         }
 1332         case DA_CCB_PROBE:
 1333         {
 1334                 struct     scsi_read_capacity_data *rdcap;
 1335                 char       announce_buf[80];
 1336 
 1337                 rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
 1338                 
 1339                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
 1340                         struct disk_params *dp;
 1341 
 1342                         dasetgeom(periph, rdcap);
 1343                         dp = &softc->params;
 1344                         snprintf(announce_buf, sizeof(announce_buf),
 1345                                 "%luMB (%u %u byte sectors: %dH %dS/T %dC)",
 1346                                 (unsigned long) (((u_int64_t)dp->secsize *
 1347                                 dp->sectors) / (1024*1024)), dp->sectors,
 1348                                 dp->secsize, dp->heads, dp->secs_per_track,
 1349                                 dp->cylinders);
 1350                 } else {
 1351                         int     error;
 1352 
 1353                         announce_buf[0] = '\0';
 1354 
 1355                         /*
 1356                          * Retry any UNIT ATTENTION type errors.  They
 1357                          * are expected at boot.
 1358                          */
 1359                         error = daerror(done_ccb, 0, SF_RETRY_UA |
 1360                                         SF_RETRY_SELTO | SF_NO_PRINT);
 1361                         if (error == ERESTART) {
 1362                                 /*
 1363                                  * A retry was scheuled, so
 1364                                  * just return.
 1365                                  */
 1366                                 return;
 1367                         } else if (error != 0) {
 1368                                 struct scsi_sense_data *sense;
 1369                                 int asc, ascq;
 1370                                 int sense_key, error_code;
 1371                                 int have_sense;
 1372                                 cam_status status;
 1373                                 struct ccb_getdev cgd;
 1374 
 1375                                 /* Don't wedge this device's queue */
 1376                                 cam_release_devq(done_ccb->ccb_h.path,
 1377                                                  /*relsim_flags*/0,
 1378                                                  /*reduction*/0,
 1379                                                  /*timeout*/0,
 1380                                                  /*getcount_only*/0);
 1381 
 1382                                 status = done_ccb->ccb_h.status;
 1383 
 1384                                 xpt_setup_ccb(&cgd.ccb_h, 
 1385                                               done_ccb->ccb_h.path,
 1386                                               /* priority */ 1);
 1387                                 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 1388                                 xpt_action((union ccb *)&cgd);
 1389 
 1390                                 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
 1391                                  || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
 1392                                  || ((status & CAM_AUTOSNS_VALID) == 0))
 1393                                         have_sense = FALSE;
 1394                                 else
 1395                                         have_sense = TRUE;
 1396 
 1397                                 if (have_sense) {
 1398                                         sense = &csio->sense_data;
 1399                                         scsi_extract_sense(sense, &error_code,
 1400                                                            &sense_key, 
 1401                                                            &asc, &ascq);
 1402                                 }
 1403                                 /*
 1404                                  * Attach to anything that claims to be a
 1405                                  * direct access or optical disk device,
 1406                                  * as long as it doesn't return a "Logical
 1407                                  * unit not supported" (0x25) error.
 1408                                  */
 1409                                 if ((have_sense) && (asc != 0x25)
 1410                                  && (error_code == SSD_CURRENT_ERROR))
 1411                                         snprintf(announce_buf,
 1412                                             sizeof(announce_buf),
 1413                                                 "Attempt to query device "
 1414                                                 "size failed: %s, %s",
 1415                                                 scsi_sense_key_text[sense_key],
 1416                                                 scsi_sense_desc(asc,ascq,
 1417                                                                 &cgd.inq_data));
 1418                                 else { 
 1419                                         if (have_sense)
 1420                                                 scsi_sense_print(
 1421                                                         &done_ccb->csio);
 1422                                         else {
 1423                                                 xpt_print_path(periph->path);
 1424                                                 printf("got CAM status %#x\n",
 1425                                                        done_ccb->ccb_h.status);
 1426                                         }
 1427 
 1428                                         xpt_print_path(periph->path);
 1429                                         printf("fatal error, failed" 
 1430                                                " to attach to device\n");
 1431 
 1432                                         /*
 1433                                          * Free up resources.
 1434                                          */
 1435                                         cam_periph_invalidate(periph);
 1436                                 } 
 1437                         }
 1438                 }
 1439                 free(rdcap, M_TEMP);
 1440                 if (announce_buf[0] != '\0')
 1441                         xpt_announce_periph(periph, announce_buf);
 1442                 softc->state = DA_STATE_NORMAL;         
 1443                 /*
 1444                  * Since our peripheral may be invalidated by an error
 1445                  * above or an external event, we must release our CCB
 1446                  * before releasing the probe lock on the peripheral.
 1447                  * The peripheral will only go away once the last lock
 1448                  * is removed, and we need it around for the CCB release
 1449                  * operation.
 1450                  */
 1451                 xpt_release_ccb(done_ccb);
 1452                 cam_periph_unlock(periph);
 1453                 return;
 1454         }
 1455         case DA_CCB_WAITING:
 1456         {
 1457                 /* Caller will release the CCB */
 1458                 wakeup(&done_ccb->ccb_h.cbfcnp);
 1459                 return;
 1460         }
 1461         case DA_CCB_DUMP:
 1462                 /* No-op.  We're polling */
 1463                 return;
 1464         default:
 1465                 break;
 1466         }
 1467         xpt_release_ccb(done_ccb);
 1468 }
 1469 
 1470 static int
 1471 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
 1472 {
 1473         struct da_softc   *softc;
 1474         struct cam_periph *periph;
 1475 
 1476         periph = xpt_path_periph(ccb->ccb_h.path);
 1477         softc = (struct da_softc *)periph->softc;
 1478 
 1479         /*
 1480          * XXX
 1481          * Until we have a better way of doing pack validation,
 1482          * don't treat UAs as errors.
 1483          */
 1484         sense_flags |= SF_RETRY_UA;
 1485         return(cam_periph_error(ccb, cam_flags, sense_flags,
 1486                                 &softc->saved_ccb));
 1487 }
 1488 
 1489 static void
 1490 daprevent(struct cam_periph *periph, int action)
 1491 {
 1492         struct  da_softc *softc;
 1493         union   ccb *ccb;               
 1494         int     error;
 1495                 
 1496         softc = (struct da_softc *)periph->softc;
 1497 
 1498         if (((action == PR_ALLOW)
 1499           && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
 1500          || ((action == PR_PREVENT)
 1501           && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
 1502                 return;
 1503         }
 1504 
 1505         ccb = cam_periph_getccb(periph, /*priority*/1);
 1506 
 1507         scsi_prevent(&ccb->csio,
 1508                      /*retries*/1,
 1509                      /*cbcfp*/dadone,
 1510                      MSG_SIMPLE_Q_TAG,
 1511                      action,
 1512                      SSD_FULL_SIZE,
 1513                      5000);
 1514 
 1515         error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
 1516                                   /*sense_flags*/0, &softc->device_stats);
 1517 
 1518         if (error == 0) {
 1519                 if (action == PR_ALLOW)
 1520                         softc->flags &= ~DA_FLAG_PACK_LOCKED;
 1521                 else
 1522                         softc->flags |= DA_FLAG_PACK_LOCKED;
 1523         }
 1524 
 1525         xpt_release_ccb(ccb);
 1526 }
 1527 
 1528 static void
 1529 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
 1530 {
 1531         struct ccb_calc_geometry ccg;
 1532         struct da_softc *softc;
 1533         struct disk_params *dp;
 1534 
 1535         softc = (struct da_softc *)periph->softc;
 1536 
 1537         dp = &softc->params;
 1538         dp->secsize = scsi_4btoul(rdcap->length);
 1539         dp->sectors = scsi_4btoul(rdcap->addr) + 1;
 1540         /*
 1541          * Have the controller provide us with a geometry
 1542          * for this disk.  The only time the geometry
 1543          * matters is when we boot and the controller
 1544          * is the only one knowledgeable enough to come
 1545          * up with something that will make this a bootable
 1546          * device.
 1547          */
 1548         xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
 1549         ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
 1550         ccg.block_size = dp->secsize;
 1551         ccg.volume_size = dp->sectors;
 1552         ccg.heads = 0;
 1553         ccg.secs_per_track = 0;
 1554         ccg.cylinders = 0;
 1555         xpt_action((union ccb*)&ccg);
 1556         dp->heads = ccg.heads;
 1557         dp->secs_per_track = ccg.secs_per_track;
 1558         dp->cylinders = ccg.cylinders;
 1559 }
 1560 
 1561 static void
 1562 dasendorderedtag(void *arg)
 1563 {
 1564         struct da_softc *softc;
 1565         int s;
 1566 
 1567         for (softc = SLIST_FIRST(&softc_list);
 1568              softc != NULL;
 1569              softc = SLIST_NEXT(softc, links)) {
 1570                 s = splsoftcam();
 1571                 if ((softc->ordered_tag_count == 0) 
 1572                  && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
 1573                         softc->flags |= DA_FLAG_NEED_OTAG;
 1574                 }
 1575                 if (softc->device_stats.busy_count > 0)
 1576                         softc->flags &= ~DA_FLAG_WENT_IDLE;
 1577 
 1578                 softc->ordered_tag_count = 0;
 1579                 splx(s);
 1580         }
 1581         /* Queue us up again */
 1582         timeout(dasendorderedtag, NULL,
 1583                 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
 1584 }
 1585 
 1586 /*
 1587  * Step through all DA peripheral drivers, and if the device is still open,
 1588  * sync the disk cache to physical media.
 1589  */
 1590 static void
 1591 dashutdown(int howto, void *arg)
 1592 {
 1593         struct cam_periph *periph;
 1594         struct da_softc *softc;
 1595 
 1596         for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL;
 1597              periph = TAILQ_NEXT(periph, unit_links)) {
 1598                 union ccb ccb;
 1599                 softc = (struct da_softc *)periph->softc;
 1600 
 1601                 /*
 1602                  * We only sync the cache if the drive is still open, and
 1603                  * if the drive is capable of it..
 1604                  */
 1605                 if (((softc->flags & DA_FLAG_OPEN) == 0)
 1606                  || (softc->quirks & DA_Q_NO_SYNC_CACHE))
 1607                         continue;
 1608 
 1609                 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
 1610 
 1611                 ccb.ccb_h.ccb_state = DA_CCB_DUMP;
 1612                 scsi_synchronize_cache(&ccb.csio,
 1613                                        /*retries*/1,
 1614                                        /*cbfcnp*/dadone,
 1615                                        MSG_SIMPLE_Q_TAG,
 1616                                        /*begin_lba*/0, /* whole disk */
 1617                                        /*lb_count*/0,
 1618                                        SSD_FULL_SIZE,
 1619                                        5 * 60 * 1000);
 1620 
 1621                 xpt_polled_action(&ccb);
 1622 
 1623                 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 1624                         if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
 1625                              CAM_SCSI_STATUS_ERROR)
 1626                          && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
 1627                                 int error_code, sense_key, asc, ascq;
 1628 
 1629                                 scsi_extract_sense(&ccb.csio.sense_data,
 1630                                                    &error_code, &sense_key,
 1631                                                    &asc, &ascq);
 1632 
 1633                                 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
 1634                                         scsi_sense_print(&ccb.csio);
 1635                         } else {
 1636                                 xpt_print_path(periph->path);
 1637                                 printf("Synchronize cache failed, status "
 1638                                        "== 0x%x, scsi status == 0x%x\n",
 1639                                        ccb.ccb_h.status, ccb.csio.scsi_status);
 1640                         }
 1641                 }
 1642 
 1643                 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
 1644                         cam_release_devq(ccb.ccb_h.path,
 1645                                          /*relsim_flags*/0,
 1646                                          /*reduction*/0,
 1647                                          /*timeout*/0,
 1648                                          /*getcount_only*/0);
 1649 
 1650         }
 1651 }

Cache object: 1507a64d22448277d86bd0e37543d8a9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.