The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_periph.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Common functions for CAM "type" (peripheral) drivers.
    3  *
    4  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    5  *
    6  * Copyright (c) 1997, 1998 Justin T. Gibbs.
    7  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
    8  * All rights reserved.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions, and the following disclaimer,
   15  *    without modification, immediately at the beginning of the file.
   16  * 2. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/12.0/sys/cam/cam_periph.c 336590 2018-07-21 21:34:10Z mav $");
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/types.h>
   38 #include <sys/malloc.h>
   39 #include <sys/kernel.h>
   40 #include <sys/bio.h>
   41 #include <sys/conf.h>
   42 #include <sys/lock.h>
   43 #include <sys/mutex.h>
   44 #include <sys/buf.h>
   45 #include <sys/proc.h>
   46 #include <sys/devicestat.h>
   47 #include <sys/bus.h>
   48 #include <sys/sbuf.h>
   49 #include <vm/vm.h>
   50 #include <vm/vm_extern.h>
   51 
   52 #include <cam/cam.h>
   53 #include <cam/cam_ccb.h>
   54 #include <cam/cam_queue.h>
   55 #include <cam/cam_xpt_periph.h>
   56 #include <cam/cam_periph.h>
   57 #include <cam/cam_debug.h>
   58 #include <cam/cam_sim.h>
   59 
   60 #include <cam/scsi/scsi_all.h>
   61 #include <cam/scsi/scsi_message.h>
   62 #include <cam/scsi/scsi_pass.h>
   63 
   64 static  u_int           camperiphnextunit(struct periph_driver *p_drv,
   65                                           u_int newunit, int wired,
   66                                           path_id_t pathid, target_id_t target,
   67                                           lun_id_t lun);
   68 static  u_int           camperiphunit(struct periph_driver *p_drv,
   69                                       path_id_t pathid, target_id_t target,
   70                                       lun_id_t lun); 
   71 static  void            camperiphdone(struct cam_periph *periph, 
   72                                         union ccb *done_ccb);
   73 static  void            camperiphfree(struct cam_periph *periph);
   74 static int              camperiphscsistatuserror(union ccb *ccb,
   75                                                 union ccb **orig_ccb,
   76                                                  cam_flags camflags,
   77                                                  u_int32_t sense_flags,
   78                                                  int *openings,
   79                                                  u_int32_t *relsim_flags,
   80                                                  u_int32_t *timeout,
   81                                                  u_int32_t  *action,
   82                                                  const char **action_string);
   83 static  int             camperiphscsisenseerror(union ccb *ccb,
   84                                                 union ccb **orig_ccb,
   85                                                 cam_flags camflags,
   86                                                 u_int32_t sense_flags,
   87                                                 int *openings,
   88                                                 u_int32_t *relsim_flags,
   89                                                 u_int32_t *timeout,
   90                                                 u_int32_t *action,
   91                                                 const char **action_string);
   92 static void             cam_periph_devctl_notify(union ccb *ccb);
   93 
   94 static int nperiph_drivers;
   95 static int initialized = 0;
   96 struct periph_driver **periph_drivers;
   97 
   98 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
   99 
  100 static int periph_selto_delay = 1000;
  101 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
  102 static int periph_noresrc_delay = 500;
  103 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
  104 static int periph_busy_delay = 500;
  105 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
  106 
  107 
  108 void
  109 periphdriver_register(void *data)
  110 {
  111         struct periph_driver *drv = (struct periph_driver *)data;
  112         struct periph_driver **newdrivers, **old;
  113         int ndrivers;
  114 
  115 again:
  116         ndrivers = nperiph_drivers + 2;
  117         newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
  118                             M_WAITOK);
  119         xpt_lock_buses();
  120         if (ndrivers != nperiph_drivers + 2) {
  121                 /*
  122                  * Lost race against itself; go around.
  123                  */
  124                 xpt_unlock_buses();
  125                 free(newdrivers, M_CAMPERIPH);
  126                 goto again;
  127         }
  128         if (periph_drivers)
  129                 bcopy(periph_drivers, newdrivers,
  130                       sizeof(*newdrivers) * nperiph_drivers);
  131         newdrivers[nperiph_drivers] = drv;
  132         newdrivers[nperiph_drivers + 1] = NULL;
  133         old = periph_drivers;
  134         periph_drivers = newdrivers;
  135         nperiph_drivers++;
  136         xpt_unlock_buses();
  137         if (old)
  138                 free(old, M_CAMPERIPH);
  139         /* If driver marked as early or it is late now, initialize it. */
  140         if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
  141             initialized > 1)
  142                 (*drv->init)();
  143 }
  144 
  145 int
  146 periphdriver_unregister(void *data)
  147 {
  148         struct periph_driver *drv = (struct periph_driver *)data;
  149         int error, n;
  150 
  151         /* If driver marked as early or it is late now, deinitialize it. */
  152         if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
  153             initialized > 1) {
  154                 if (drv->deinit == NULL) {
  155                         printf("CAM periph driver '%s' doesn't have deinit.\n",
  156                             drv->driver_name);
  157                         return (EOPNOTSUPP);
  158                 }
  159                 error = drv->deinit();
  160                 if (error != 0)
  161                         return (error);
  162         }
  163 
  164         xpt_lock_buses();
  165         for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
  166                 ;
  167         KASSERT(n < nperiph_drivers,
  168             ("Periph driver '%s' was not registered", drv->driver_name));
  169         for (; n + 1 < nperiph_drivers; n++)
  170                 periph_drivers[n] = periph_drivers[n + 1];
  171         periph_drivers[n + 1] = NULL;
  172         nperiph_drivers--;
  173         xpt_unlock_buses();
  174         return (0);
  175 }
  176 
  177 void
  178 periphdriver_init(int level)
  179 {
  180         int     i, early;
  181 
  182         initialized = max(initialized, level);
  183         for (i = 0; periph_drivers[i] != NULL; i++) {
  184                 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
  185                 if (early == initialized)
  186                         (*periph_drivers[i]->init)();
  187         }
  188 }
  189 
  190 cam_status
  191 cam_periph_alloc(periph_ctor_t *periph_ctor,
  192                  periph_oninv_t *periph_oninvalidate,
  193                  periph_dtor_t *periph_dtor, periph_start_t *periph_start,
  194                  char *name, cam_periph_type type, struct cam_path *path,
  195                  ac_callback_t *ac_callback, ac_code code, void *arg)
  196 {
  197         struct          periph_driver **p_drv;
  198         struct          cam_sim *sim;
  199         struct          cam_periph *periph;
  200         struct          cam_periph *cur_periph;
  201         path_id_t       path_id;
  202         target_id_t     target_id;
  203         lun_id_t        lun_id;
  204         cam_status      status;
  205         u_int           init_level;
  206 
  207         init_level = 0;
  208         /*
  209          * Handle Hot-Plug scenarios.  If there is already a peripheral
  210          * of our type assigned to this path, we are likely waiting for
  211          * final close on an old, invalidated, peripheral.  If this is
  212          * the case, queue up a deferred call to the peripheral's async
  213          * handler.  If it looks like a mistaken re-allocation, complain.
  214          */
  215         if ((periph = cam_periph_find(path, name)) != NULL) {
  216 
  217                 if ((periph->flags & CAM_PERIPH_INVALID) != 0
  218                  && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
  219                         periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
  220                         periph->deferred_callback = ac_callback;
  221                         periph->deferred_ac = code;
  222                         return (CAM_REQ_INPROG);
  223                 } else {
  224                         printf("cam_periph_alloc: attempt to re-allocate "
  225                                "valid device %s%d rejected flags %#x "
  226                                "refcount %d\n", periph->periph_name,
  227                                periph->unit_number, periph->flags,
  228                                periph->refcount);
  229                 }
  230                 return (CAM_REQ_INVALID);
  231         }
  232         
  233         periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
  234                                              M_NOWAIT|M_ZERO);
  235 
  236         if (periph == NULL)
  237                 return (CAM_RESRC_UNAVAIL);
  238         
  239         init_level++;
  240 
  241 
  242         sim = xpt_path_sim(path);
  243         path_id = xpt_path_path_id(path);
  244         target_id = xpt_path_target_id(path);
  245         lun_id = xpt_path_lun_id(path);
  246         periph->periph_start = periph_start;
  247         periph->periph_dtor = periph_dtor;
  248         periph->periph_oninval = periph_oninvalidate;
  249         periph->type = type;
  250         periph->periph_name = name;
  251         periph->scheduled_priority = CAM_PRIORITY_NONE;
  252         periph->immediate_priority = CAM_PRIORITY_NONE;
  253         periph->refcount = 1;           /* Dropped by invalidation. */
  254         periph->sim = sim;
  255         SLIST_INIT(&periph->ccb_list);
  256         status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
  257         if (status != CAM_REQ_CMP)
  258                 goto failure;
  259         periph->path = path;
  260 
  261         xpt_lock_buses();
  262         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
  263                 if (strcmp((*p_drv)->driver_name, name) == 0)
  264                         break;
  265         }
  266         if (*p_drv == NULL) {
  267                 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
  268                 xpt_unlock_buses();
  269                 xpt_free_path(periph->path);
  270                 free(periph, M_CAMPERIPH);
  271                 return (CAM_REQ_INVALID);
  272         }
  273         periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
  274         cur_periph = TAILQ_FIRST(&(*p_drv)->units);
  275         while (cur_periph != NULL
  276             && cur_periph->unit_number < periph->unit_number)
  277                 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
  278         if (cur_periph != NULL) {
  279                 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
  280                 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
  281         } else {
  282                 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
  283                 (*p_drv)->generation++;
  284         }
  285         xpt_unlock_buses();
  286 
  287         init_level++;
  288 
  289         status = xpt_add_periph(periph);
  290         if (status != CAM_REQ_CMP)
  291                 goto failure;
  292 
  293         init_level++;
  294         CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
  295 
  296         status = periph_ctor(periph, arg);
  297 
  298         if (status == CAM_REQ_CMP)
  299                 init_level++;
  300 
  301 failure:
  302         switch (init_level) {
  303         case 4:
  304                 /* Initialized successfully */
  305                 break;
  306         case 3:
  307                 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
  308                 xpt_remove_periph(periph);
  309                 /* FALLTHROUGH */
  310         case 2:
  311                 xpt_lock_buses();
  312                 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
  313                 xpt_unlock_buses();
  314                 xpt_free_path(periph->path);
  315                 /* FALLTHROUGH */
  316         case 1:
  317                 free(periph, M_CAMPERIPH);
  318                 /* FALLTHROUGH */
  319         case 0:
  320                 /* No cleanup to perform. */
  321                 break;
  322         default:
  323                 panic("%s: Unknown init level", __func__);
  324         }
  325         return(status);
  326 }
  327 
  328 /*
  329  * Find a peripheral structure with the specified path, target, lun, 
  330  * and (optionally) type.  If the name is NULL, this function will return
  331  * the first peripheral driver that matches the specified path.
  332  */
  333 struct cam_periph *
  334 cam_periph_find(struct cam_path *path, char *name)
  335 {
  336         struct periph_driver **p_drv;
  337         struct cam_periph *periph;
  338 
  339         xpt_lock_buses();
  340         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
  341 
  342                 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
  343                         continue;
  344 
  345                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
  346                         if (xpt_path_comp(periph->path, path) == 0) {
  347                                 xpt_unlock_buses();
  348                                 cam_periph_assert(periph, MA_OWNED);
  349                                 return(periph);
  350                         }
  351                 }
  352                 if (name != NULL) {
  353                         xpt_unlock_buses();
  354                         return(NULL);
  355                 }
  356         }
  357         xpt_unlock_buses();
  358         return(NULL);
  359 }
  360 
  361 /*
  362  * Find peripheral driver instances attached to the specified path.
  363  */
  364 int
  365 cam_periph_list(struct cam_path *path, struct sbuf *sb)
  366 {
  367         struct sbuf local_sb;
  368         struct periph_driver **p_drv;
  369         struct cam_periph *periph;
  370         int count;
  371         int sbuf_alloc_len;
  372 
  373         sbuf_alloc_len = 16;
  374 retry:
  375         sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
  376         count = 0;
  377         xpt_lock_buses();
  378         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
  379 
  380                 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
  381                         if (xpt_path_comp(periph->path, path) != 0)
  382                                 continue;
  383 
  384                         if (sbuf_len(&local_sb) != 0)
  385                                 sbuf_cat(&local_sb, ",");
  386 
  387                         sbuf_printf(&local_sb, "%s%d", periph->periph_name,
  388                                     periph->unit_number);
  389 
  390                         if (sbuf_error(&local_sb) == ENOMEM) {
  391                                 sbuf_alloc_len *= 2;
  392                                 xpt_unlock_buses();
  393                                 sbuf_delete(&local_sb);
  394                                 goto retry;
  395                         }
  396                         count++;
  397                 }
  398         }
  399         xpt_unlock_buses();
  400         sbuf_finish(&local_sb);
  401         sbuf_cpy(sb, sbuf_data(&local_sb));
  402         sbuf_delete(&local_sb);
  403         return (count);
  404 }
  405 
  406 int
  407 cam_periph_acquire(struct cam_periph *periph)
  408 {
  409         int status;
  410 
  411         if (periph == NULL)
  412                 return (EINVAL);
  413 
  414         status = ENOENT;
  415         xpt_lock_buses();
  416         if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
  417                 periph->refcount++;
  418                 status = 0;
  419         }
  420         xpt_unlock_buses();
  421 
  422         return (status);
  423 }
  424 
  425 void
  426 cam_periph_doacquire(struct cam_periph *periph)
  427 {
  428 
  429         xpt_lock_buses();
  430         KASSERT(periph->refcount >= 1,
  431             ("cam_periph_doacquire() with refcount == %d", periph->refcount));
  432         periph->refcount++;
  433         xpt_unlock_buses();
  434 }
  435 
  436 void
  437 cam_periph_release_locked_buses(struct cam_periph *periph)
  438 {
  439 
  440         cam_periph_assert(periph, MA_OWNED);
  441         KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
  442         if (--periph->refcount == 0)
  443                 camperiphfree(periph);
  444 }
  445 
  446 void
  447 cam_periph_release_locked(struct cam_periph *periph)
  448 {
  449 
  450         if (periph == NULL)
  451                 return;
  452 
  453         xpt_lock_buses();
  454         cam_periph_release_locked_buses(periph);
  455         xpt_unlock_buses();
  456 }
  457 
  458 void
  459 cam_periph_release(struct cam_periph *periph)
  460 {
  461         struct mtx *mtx;
  462 
  463         if (periph == NULL)
  464                 return;
  465         
  466         cam_periph_assert(periph, MA_NOTOWNED);
  467         mtx = cam_periph_mtx(periph);
  468         mtx_lock(mtx);
  469         cam_periph_release_locked(periph);
  470         mtx_unlock(mtx);
  471 }
  472 
  473 int
  474 cam_periph_hold(struct cam_periph *periph, int priority)
  475 {
  476         int error;
  477 
  478         /*
  479          * Increment the reference count on the peripheral
  480          * while we wait for our lock attempt to succeed
  481          * to ensure the peripheral doesn't disappear out
  482          * from user us while we sleep.
  483          */
  484 
  485         if (cam_periph_acquire(periph) != 0)
  486                 return (ENXIO);
  487 
  488         cam_periph_assert(periph, MA_OWNED);
  489         while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
  490                 periph->flags |= CAM_PERIPH_LOCK_WANTED;
  491                 if ((error = cam_periph_sleep(periph, periph, priority,
  492                     "caplck", 0)) != 0) {
  493                         cam_periph_release_locked(periph);
  494                         return (error);
  495                 }
  496                 if (periph->flags & CAM_PERIPH_INVALID) {
  497                         cam_periph_release_locked(periph);
  498                         return (ENXIO);
  499                 }
  500         }
  501 
  502         periph->flags |= CAM_PERIPH_LOCKED;
  503         return (0);
  504 }
  505 
  506 void
  507 cam_periph_unhold(struct cam_periph *periph)
  508 {
  509 
  510         cam_periph_assert(periph, MA_OWNED);
  511 
  512         periph->flags &= ~CAM_PERIPH_LOCKED;
  513         if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
  514                 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
  515                 wakeup(periph);
  516         }
  517 
  518         cam_periph_release_locked(periph);
  519 }
  520 
  521 /*
  522  * Look for the next unit number that is not currently in use for this
  523  * peripheral type starting at "newunit".  Also exclude unit numbers that
  524  * are reserved by for future "hardwiring" unless we already know that this
  525  * is a potential wired device.  Only assume that the device is "wired" the
  526  * first time through the loop since after that we'll be looking at unit
  527  * numbers that did not match a wiring entry.
  528  */
  529 static u_int
  530 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
  531                   path_id_t pathid, target_id_t target, lun_id_t lun)
  532 {
  533         struct  cam_periph *periph;
  534         char    *periph_name;
  535         int     i, val, dunit, r;
  536         const char *dname, *strval;
  537 
  538         periph_name = p_drv->driver_name;
  539         for (;;newunit++) {
  540 
  541                 for (periph = TAILQ_FIRST(&p_drv->units);
  542                      periph != NULL && periph->unit_number != newunit;
  543                      periph = TAILQ_NEXT(periph, unit_links))
  544                         ;
  545 
  546                 if (periph != NULL && periph->unit_number == newunit) {
  547                         if (wired != 0) {
  548                                 xpt_print(periph->path, "Duplicate Wired "
  549                                     "Device entry!\n");
  550                                 xpt_print(periph->path, "Second device (%s "
  551                                     "device at scbus%d target %d lun %d) will "
  552                                     "not be wired\n", periph_name, pathid,
  553                                     target, lun);
  554                                 wired = 0;
  555                         }
  556                         continue;
  557                 }
  558                 if (wired)
  559                         break;
  560 
  561                 /*
  562                  * Don't match entries like "da 4" as a wired down
  563                  * device, but do match entries like "da 4 target 5"
  564                  * or even "da 4 scbus 1". 
  565                  */
  566                 i = 0;
  567                 dname = periph_name;
  568                 for (;;) {
  569                         r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
  570                         if (r != 0)
  571                                 break;
  572                         /* if no "target" and no specific scbus, skip */
  573                         if (resource_int_value(dname, dunit, "target", &val) &&
  574                             (resource_string_value(dname, dunit, "at",&strval)||
  575                              strcmp(strval, "scbus") == 0))
  576                                 continue;
  577                         if (newunit == dunit)
  578                                 break;
  579                 }
  580                 if (r != 0)
  581                         break;
  582         }
  583         return (newunit);
  584 }
  585 
  586 static u_int
  587 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
  588               target_id_t target, lun_id_t lun)
  589 {
  590         u_int   unit;
  591         int     wired, i, val, dunit;
  592         const char *dname, *strval;
  593         char    pathbuf[32], *periph_name;
  594 
  595         periph_name = p_drv->driver_name;
  596         snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
  597         unit = 0;
  598         i = 0;
  599         dname = periph_name;
  600         for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
  601              wired = 0) {
  602                 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
  603                         if (strcmp(strval, pathbuf) != 0)
  604                                 continue;
  605                         wired++;
  606                 }
  607                 if (resource_int_value(dname, dunit, "target", &val) == 0) {
  608                         if (val != target)
  609                                 continue;
  610                         wired++;
  611                 }
  612                 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
  613                         if (val != lun)
  614                                 continue;
  615                         wired++;
  616                 }
  617                 if (wired != 0) {
  618                         unit = dunit;
  619                         break;
  620                 }
  621         }
  622 
  623         /*
  624          * Either start from 0 looking for the next unit or from
  625          * the unit number given in the resource config.  This way,
  626          * if we have wildcard matches, we don't return the same
  627          * unit number twice.
  628          */
  629         unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
  630 
  631         return (unit);
  632 }
  633 
  634 void
  635 cam_periph_invalidate(struct cam_periph *periph)
  636 {
  637 
  638         cam_periph_assert(periph, MA_OWNED);
  639         /*
  640          * We only call this routine the first time a peripheral is
  641          * invalidated.
  642          */
  643         if ((periph->flags & CAM_PERIPH_INVALID) != 0)
  644                 return;
  645 
  646         CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
  647         if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) {
  648                 struct sbuf sb;
  649                 char buffer[160];
  650 
  651                 sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN);
  652                 xpt_denounce_periph_sbuf(periph, &sb);
  653                 sbuf_finish(&sb);
  654                 sbuf_putbuf(&sb);
  655         }
  656         periph->flags |= CAM_PERIPH_INVALID;
  657         periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
  658         if (periph->periph_oninval != NULL)
  659                 periph->periph_oninval(periph);
  660         cam_periph_release_locked(periph);
  661 }
  662 
  663 static void
  664 camperiphfree(struct cam_periph *periph)
  665 {
  666         struct periph_driver **p_drv;
  667         struct periph_driver *drv;
  668 
  669         cam_periph_assert(periph, MA_OWNED);
  670         KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
  671             periph->periph_name, periph->unit_number));
  672         for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
  673                 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
  674                         break;
  675         }
  676         if (*p_drv == NULL) {
  677                 printf("camperiphfree: attempt to free non-existant periph\n");
  678                 return;
  679         }
  680         /*
  681          * Cache a pointer to the periph_driver structure.  If a
  682          * periph_driver is added or removed from the array (see
  683          * periphdriver_register()) while we drop the toplogy lock
  684          * below, p_drv may change.  This doesn't protect against this
  685          * particular periph_driver going away.  That will require full
  686          * reference counting in the periph_driver infrastructure.
  687          */
  688         drv = *p_drv;
  689 
  690         /*
  691          * We need to set this flag before dropping the topology lock, to
  692          * let anyone who is traversing the list that this peripheral is
  693          * about to be freed, and there will be no more reference count
  694          * checks.
  695          */
  696         periph->flags |= CAM_PERIPH_FREE;
  697 
  698         /*
  699          * The peripheral destructor semantics dictate calling with only the
  700          * SIM mutex held.  Since it might sleep, it should not be called
  701          * with the topology lock held.
  702          */
  703         xpt_unlock_buses();
  704 
  705         /*
  706          * We need to call the peripheral destructor prior to removing the
  707          * peripheral from the list.  Otherwise, we risk running into a
  708          * scenario where the peripheral unit number may get reused
  709          * (because it has been removed from the list), but some resources
  710          * used by the peripheral are still hanging around.  In particular,
  711          * the devfs nodes used by some peripherals like the pass(4) driver
  712          * aren't fully cleaned up until the destructor is run.  If the
  713          * unit number is reused before the devfs instance is fully gone,
  714          * devfs will panic.
  715          */
  716         if (periph->periph_dtor != NULL)
  717                 periph->periph_dtor(periph);
  718 
  719         /*
  720          * The peripheral list is protected by the topology lock.
  721          */
  722         xpt_lock_buses();
  723 
  724         TAILQ_REMOVE(&drv->units, periph, unit_links);
  725         drv->generation++;
  726 
  727         xpt_remove_periph(periph);
  728 
  729         xpt_unlock_buses();
  730         if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
  731                 xpt_print(periph->path, "Periph destroyed\n");
  732         else
  733                 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
  734 
  735         if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
  736                 union ccb ccb;
  737                 void *arg;
  738 
  739                 switch (periph->deferred_ac) {
  740                 case AC_FOUND_DEVICE:
  741                         ccb.ccb_h.func_code = XPT_GDEV_TYPE;
  742                         xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
  743                         xpt_action(&ccb);
  744                         arg = &ccb;
  745                         break;
  746                 case AC_PATH_REGISTERED:
  747                         xpt_path_inq(&ccb.cpi, periph->path);
  748                         arg = &ccb;
  749                         break;
  750                 default:
  751                         arg = NULL;
  752                         break;
  753                 }
  754                 periph->deferred_callback(NULL, periph->deferred_ac,
  755                                           periph->path, arg);
  756         }
  757         xpt_free_path(periph->path);
  758         free(periph, M_CAMPERIPH);
  759         xpt_lock_buses();
  760 }
  761 
  762 /*
  763  * Map user virtual pointers into kernel virtual address space, so we can
  764  * access the memory.  This is now a generic function that centralizes most
  765  * of the sanity checks on the data flags, if any.
  766  * This also only works for up to MAXPHYS memory.  Since we use
  767  * buffers to map stuff in and out, we're limited to the buffer size.
  768  */
  769 int
  770 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
  771     u_int maxmap)
  772 {
  773         int numbufs, i, j;
  774         int flags[CAM_PERIPH_MAXMAPS];
  775         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
  776         u_int32_t lengths[CAM_PERIPH_MAXMAPS];
  777         u_int32_t dirs[CAM_PERIPH_MAXMAPS];
  778 
  779         if (maxmap == 0)
  780                 maxmap = DFLTPHYS;      /* traditional default */
  781         else if (maxmap > MAXPHYS)
  782                 maxmap = MAXPHYS;       /* for safety */
  783         switch(ccb->ccb_h.func_code) {
  784         case XPT_DEV_MATCH:
  785                 if (ccb->cdm.match_buf_len == 0) {
  786                         printf("cam_periph_mapmem: invalid match buffer "
  787                                "length 0\n");
  788                         return(EINVAL);
  789                 }
  790                 if (ccb->cdm.pattern_buf_len > 0) {
  791                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
  792                         lengths[0] = ccb->cdm.pattern_buf_len;
  793                         dirs[0] = CAM_DIR_OUT;
  794                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
  795                         lengths[1] = ccb->cdm.match_buf_len;
  796                         dirs[1] = CAM_DIR_IN;
  797                         numbufs = 2;
  798                 } else {
  799                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
  800                         lengths[0] = ccb->cdm.match_buf_len;
  801                         dirs[0] = CAM_DIR_IN;
  802                         numbufs = 1;
  803                 }
  804                 /*
  805                  * This request will not go to the hardware, no reason
  806                  * to be so strict. vmapbuf() is able to map up to MAXPHYS.
  807                  */
  808                 maxmap = MAXPHYS;
  809                 break;
  810         case XPT_SCSI_IO:
  811         case XPT_CONT_TARGET_IO:
  812                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
  813                         return(0);
  814                 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
  815                         return (EINVAL);
  816                 data_ptrs[0] = &ccb->csio.data_ptr;
  817                 lengths[0] = ccb->csio.dxfer_len;
  818                 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
  819                 numbufs = 1;
  820                 break;
  821         case XPT_ATA_IO:
  822                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
  823                         return(0);
  824                 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
  825                         return (EINVAL);
  826                 data_ptrs[0] = &ccb->ataio.data_ptr;
  827                 lengths[0] = ccb->ataio.dxfer_len;
  828                 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
  829                 numbufs = 1;
  830                 break;
  831         case XPT_MMC_IO:
  832                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
  833                         return(0);
  834                 /* Two mappings: one for cmd->data and one for cmd->data->data */
  835                 data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
  836                 lengths[0] = sizeof(struct mmc_data *);
  837                 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
  838                 data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
  839                 lengths[1] = ccb->mmcio.cmd.data->len;
  840                 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
  841                 numbufs = 2;
  842                 break;
  843         case XPT_SMP_IO:
  844                 data_ptrs[0] = &ccb->smpio.smp_request;
  845                 lengths[0] = ccb->smpio.smp_request_len;
  846                 dirs[0] = CAM_DIR_OUT;
  847                 data_ptrs[1] = &ccb->smpio.smp_response;
  848                 lengths[1] = ccb->smpio.smp_response_len;
  849                 dirs[1] = CAM_DIR_IN;
  850                 numbufs = 2;
  851                 break;
  852         case XPT_NVME_IO:
  853         case XPT_NVME_ADMIN:
  854                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
  855                         return (0);
  856                 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
  857                         return (EINVAL);
  858                 data_ptrs[0] = &ccb->nvmeio.data_ptr;
  859                 lengths[0] = ccb->nvmeio.dxfer_len;
  860                 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
  861                 numbufs = 1;
  862                 break;
  863         case XPT_DEV_ADVINFO:
  864                 if (ccb->cdai.bufsiz == 0)
  865                         return (0);
  866 
  867                 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
  868                 lengths[0] = ccb->cdai.bufsiz;
  869                 dirs[0] = CAM_DIR_IN;
  870                 numbufs = 1;
  871 
  872                 /*
  873                  * This request will not go to the hardware, no reason
  874                  * to be so strict. vmapbuf() is able to map up to MAXPHYS.
  875                  */
  876                 maxmap = MAXPHYS;
  877                 break;
  878         default:
  879                 return(EINVAL);
  880                 break; /* NOTREACHED */
  881         }
  882 
  883         /*
  884          * Check the transfer length and permissions first, so we don't
  885          * have to unmap any previously mapped buffers.
  886          */
  887         for (i = 0; i < numbufs; i++) {
  888 
  889                 flags[i] = 0;
  890 
  891                 /*
  892                  * The userland data pointer passed in may not be page
  893                  * aligned.  vmapbuf() truncates the address to a page
  894                  * boundary, so if the address isn't page aligned, we'll
  895                  * need enough space for the given transfer length, plus
  896                  * whatever extra space is necessary to make it to the page
  897                  * boundary.
  898                  */
  899                 if ((lengths[i] +
  900                     (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
  901                         printf("cam_periph_mapmem: attempt to map %lu bytes, "
  902                                "which is greater than %lu\n",
  903                                (long)(lengths[i] +
  904                                (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
  905                                (u_long)maxmap);
  906                         return(E2BIG);
  907                 }
  908 
  909                 if (dirs[i] & CAM_DIR_OUT) {
  910                         flags[i] = BIO_WRITE;
  911                 }
  912 
  913                 if (dirs[i] & CAM_DIR_IN) {
  914                         flags[i] = BIO_READ;
  915                 }
  916 
  917         }
  918 
  919         /*
  920          * This keeps the kernel stack of current thread from getting
  921          * swapped.  In low-memory situations where the kernel stack might
  922          * otherwise get swapped out, this holds it and allows the thread
  923          * to make progress and release the kernel mapped pages sooner.
  924          *
  925          * XXX KDM should I use P_NOSWAP instead?
  926          */
  927         PHOLD(curproc);
  928 
  929         for (i = 0; i < numbufs; i++) {
  930                 /*
  931                  * Get the buffer.
  932                  */
  933                 mapinfo->bp[i] = getpbuf(NULL);
  934 
  935                 /* put our pointer in the data slot */
  936                 mapinfo->bp[i]->b_data = *data_ptrs[i];
  937 
  938                 /* save the user's data address */
  939                 mapinfo->bp[i]->b_caller1 = *data_ptrs[i];
  940 
  941                 /* set the transfer length, we know it's < MAXPHYS */
  942                 mapinfo->bp[i]->b_bufsize = lengths[i];
  943 
  944                 /* set the direction */
  945                 mapinfo->bp[i]->b_iocmd = flags[i];
  946 
  947                 /*
  948                  * Map the buffer into kernel memory.
  949                  *
  950                  * Note that useracc() alone is not a  sufficient test.
  951                  * vmapbuf() can still fail due to a smaller file mapped
  952                  * into a larger area of VM, or if userland races against
  953                  * vmapbuf() after the useracc() check.
  954                  */
  955                 if (vmapbuf(mapinfo->bp[i], 1) < 0) {
  956                         for (j = 0; j < i; ++j) {
  957                                 *data_ptrs[j] = mapinfo->bp[j]->b_caller1;
  958                                 vunmapbuf(mapinfo->bp[j]);
  959                                 relpbuf(mapinfo->bp[j], NULL);
  960                         }
  961                         relpbuf(mapinfo->bp[i], NULL);
  962                         PRELE(curproc);
  963                         return(EACCES);
  964                 }
  965 
  966                 /* set our pointer to the new mapped area */
  967                 *data_ptrs[i] = mapinfo->bp[i]->b_data;
  968 
  969                 mapinfo->num_bufs_used++;
  970         }
  971 
  972         /*
  973          * Now that we've gotten this far, change ownership to the kernel
  974          * of the buffers so that we don't run afoul of returning to user
  975          * space with locks (on the buffer) held.
  976          */
  977         for (i = 0; i < numbufs; i++) {
  978                 BUF_KERNPROC(mapinfo->bp[i]);
  979         }
  980 
  981 
  982         return(0);
  983 }
  984 
  985 /*
  986  * Unmap memory segments mapped into kernel virtual address space by
  987  * cam_periph_mapmem().
  988  */
  989 void
  990 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
  991 {
  992         int numbufs, i;
  993         u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
  994 
  995         if (mapinfo->num_bufs_used <= 0) {
  996                 /* nothing to free and the process wasn't held. */
  997                 return;
  998         }
  999 
 1000         switch (ccb->ccb_h.func_code) {
 1001         case XPT_DEV_MATCH:
 1002                 numbufs = min(mapinfo->num_bufs_used, 2);
 1003 
 1004                 if (numbufs == 1) {
 1005                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
 1006                 } else {
 1007                         data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
 1008                         data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
 1009                 }
 1010                 break;
 1011         case XPT_SCSI_IO:
 1012         case XPT_CONT_TARGET_IO:
 1013                 data_ptrs[0] = &ccb->csio.data_ptr;
 1014                 numbufs = min(mapinfo->num_bufs_used, 1);
 1015                 break;
 1016         case XPT_ATA_IO:
 1017                 data_ptrs[0] = &ccb->ataio.data_ptr;
 1018                 numbufs = min(mapinfo->num_bufs_used, 1);
 1019                 break;
 1020         case XPT_SMP_IO:
 1021                 numbufs = min(mapinfo->num_bufs_used, 2);
 1022                 data_ptrs[0] = &ccb->smpio.smp_request;
 1023                 data_ptrs[1] = &ccb->smpio.smp_response;
 1024                 break;
 1025         case XPT_DEV_ADVINFO:
 1026                 numbufs = min(mapinfo->num_bufs_used, 1);
 1027                 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
 1028                 break;
 1029         case XPT_NVME_IO:
 1030         case XPT_NVME_ADMIN:
 1031                 data_ptrs[0] = &ccb->nvmeio.data_ptr;
 1032                 numbufs = min(mapinfo->num_bufs_used, 1);
 1033                 break;
 1034         default:
 1035                 /* allow ourselves to be swapped once again */
 1036                 PRELE(curproc);
 1037                 return;
 1038                 break; /* NOTREACHED */ 
 1039         }
 1040 
 1041         for (i = 0; i < numbufs; i++) {
 1042                 /* Set the user's pointer back to the original value */
 1043                 *data_ptrs[i] = mapinfo->bp[i]->b_caller1;
 1044 
 1045                 /* unmap the buffer */
 1046                 vunmapbuf(mapinfo->bp[i]);
 1047 
 1048                 /* release the buffer */
 1049                 relpbuf(mapinfo->bp[i], NULL);
 1050         }
 1051 
 1052         /* allow ourselves to be swapped once again */
 1053         PRELE(curproc);
 1054 }
 1055 
 1056 int
 1057 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
 1058                  int (*error_routine)(union ccb *ccb, 
 1059                                       cam_flags camflags,
 1060                                       u_int32_t sense_flags))
 1061 {
 1062         union ccb            *ccb;
 1063         int                  error;
 1064         int                  found;
 1065 
 1066         error = found = 0;
 1067 
 1068         switch(cmd){
 1069         case CAMGETPASSTHRU:
 1070                 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
 1071                 xpt_setup_ccb(&ccb->ccb_h,
 1072                               ccb->ccb_h.path,
 1073                               CAM_PRIORITY_NORMAL);
 1074                 ccb->ccb_h.func_code = XPT_GDEVLIST;
 1075 
 1076                 /*
 1077                  * Basically, the point of this is that we go through
 1078                  * getting the list of devices, until we find a passthrough
 1079                  * device.  In the current version of the CAM code, the
 1080                  * only way to determine what type of device we're dealing
 1081                  * with is by its name.
 1082                  */
 1083                 while (found == 0) {
 1084                         ccb->cgdl.index = 0;
 1085                         ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
 1086                         while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
 1087 
 1088                                 /* we want the next device in the list */
 1089                                 xpt_action(ccb);
 1090                                 if (strncmp(ccb->cgdl.periph_name, 
 1091                                     "pass", 4) == 0){
 1092                                         found = 1;
 1093                                         break;
 1094                                 }
 1095                         }
 1096                         if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
 1097                             (found == 0)) {
 1098                                 ccb->cgdl.periph_name[0] = '\0';
 1099                                 ccb->cgdl.unit_number = 0;
 1100                                 break;
 1101                         }
 1102                 }
 1103 
 1104                 /* copy the result back out */  
 1105                 bcopy(ccb, addr, sizeof(union ccb));
 1106 
 1107                 /* and release the ccb */
 1108                 xpt_release_ccb(ccb);
 1109 
 1110                 break;
 1111         default:
 1112                 error = ENOTTY;
 1113                 break;
 1114         }
 1115         return(error);
 1116 }
 1117 
 1118 static void
 1119 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
 1120 {
 1121 
 1122         panic("%s: already done with ccb %p", __func__, done_ccb);
 1123 }
 1124 
 1125 static void
 1126 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
 1127 {
 1128 
 1129         /* Caller will release the CCB */
 1130         xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
 1131         done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
 1132         wakeup(&done_ccb->ccb_h.cbfcnp);
 1133 }
 1134 
 1135 static void
 1136 cam_periph_ccbwait(union ccb *ccb)
 1137 {
 1138 
 1139         if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
 1140                 while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
 1141                         xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
 1142                             PRIBIO, "cbwait", 0);
 1143         }
 1144         KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
 1145             (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
 1146             ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
 1147              "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
 1148              ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
 1149 }
 1150 
 1151 /*
 1152  * Dispatch a CCB and wait for it to complete.  If the CCB has set a
 1153  * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost.
 1154  */
 1155 int
 1156 cam_periph_runccb(union ccb *ccb,
 1157                   int (*error_routine)(union ccb *ccb,
 1158                                        cam_flags camflags,
 1159                                        u_int32_t sense_flags),
 1160                   cam_flags camflags, u_int32_t sense_flags,
 1161                   struct devstat *ds)
 1162 {
 1163         struct bintime *starttime;
 1164         struct bintime ltime;
 1165         int error;
 1166         bool must_poll;
 1167         uint32_t timeout = 1;
 1168 
 1169         starttime = NULL;
 1170         xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
 1171         KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
 1172             ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
 1173              ccb->ccb_h.func_code, ccb->ccb_h.flags));
 1174 
 1175         /*
 1176          * If the user has supplied a stats structure, and if we understand
 1177          * this particular type of ccb, record the transaction start.
 1178          */
 1179         if (ds != NULL &&
 1180             (ccb->ccb_h.func_code == XPT_SCSI_IO ||
 1181             ccb->ccb_h.func_code == XPT_ATA_IO ||
 1182             ccb->ccb_h.func_code == XPT_NVME_IO)) {
 1183                 starttime = &ltime;
 1184                 binuptime(starttime);
 1185                 devstat_start_transaction(ds, starttime);
 1186         }
 1187 
 1188         /*
 1189          * We must poll the I/O while we're dumping. The scheduler is normally
 1190          * stopped for dumping, except when we call doadump from ddb. While the
 1191          * scheduler is running in this case, we still need to poll the I/O to
 1192          * avoid sleeping waiting for the ccb to complete.
 1193          *
 1194          * A panic triggered dump stops the scheduler, any callback from the
 1195          * shutdown_post_sync event will run with the scheduler stopped, but
 1196          * before we're officially dumping. To avoid hanging in adashutdown
 1197          * initiated commands (or other similar situations), we have to test for
 1198          * either SCHEDULER_STOPPED() here as well.
 1199          *
 1200          * To avoid locking problems, dumping/polling callers must call
 1201          * without a periph lock held.
 1202          */
 1203         must_poll = dumping || SCHEDULER_STOPPED();
 1204         ccb->ccb_h.cbfcnp = cam_periph_done;
 1205 
 1206         /*
 1207          * If we're polling, then we need to ensure that we have ample resources
 1208          * in the periph.  cam_periph_error can reschedule the ccb by calling
 1209          * xpt_action and returning ERESTART, so we have to effect the polling
 1210          * in the do loop below.
 1211          */
 1212         if (must_poll) {
 1213                 timeout = xpt_poll_setup(ccb);
 1214         }
 1215 
 1216         if (timeout == 0) {
 1217                 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
 1218                 error = EBUSY;
 1219         } else {
 1220                 xpt_action(ccb);
 1221                 do {
 1222                         if (must_poll) {
 1223                                 xpt_pollwait(ccb, timeout);
 1224                                 timeout = ccb->ccb_h.timeout * 10;
 1225                         } else {
 1226                                 cam_periph_ccbwait(ccb);
 1227                         }
 1228                         if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
 1229                                 error = 0;
 1230                         else if (error_routine != NULL) {
 1231                                 ccb->ccb_h.cbfcnp = cam_periph_done;
 1232                                 error = (*error_routine)(ccb, camflags, sense_flags);
 1233                         } else
 1234                                 error = 0;
 1235                 } while (error == ERESTART);
 1236         }
 1237 
 1238         if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
 1239                 cam_release_devq(ccb->ccb_h.path,
 1240                                  /* relsim_flags */0,
 1241                                  /* openings */0,
 1242                                  /* timeout */0,
 1243                                  /* getcount_only */ FALSE);
 1244                 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
 1245         }
 1246 
 1247         if (ds != NULL) {
 1248                 uint32_t bytes;
 1249                 devstat_tag_type tag;
 1250                 bool valid = true;
 1251 
 1252                 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
 1253                         bytes = ccb->csio.dxfer_len - ccb->csio.resid;
 1254                         tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3);
 1255                 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
 1256                         bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
 1257                         tag = (devstat_tag_type)0;
 1258                 } else if (ccb->ccb_h.func_code == XPT_NVME_IO) {
 1259                         bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */
 1260                         tag = (devstat_tag_type)0;
 1261                 } else {
 1262                         valid = false;
 1263                 }
 1264                 if (valid)
 1265                         devstat_end_transaction(ds, bytes, tag,
 1266                             ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ?
 1267                             DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ?
 1268                             DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime);
 1269         }
 1270 
 1271         return(error);
 1272 }
 1273 
 1274 void
 1275 cam_freeze_devq(struct cam_path *path)
 1276 {
 1277         struct ccb_hdr ccb_h;
 1278 
 1279         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
 1280         xpt_setup_ccb(&ccb_h, path, /*priority*/1);
 1281         ccb_h.func_code = XPT_NOOP;
 1282         ccb_h.flags = CAM_DEV_QFREEZE;
 1283         xpt_action((union ccb *)&ccb_h);
 1284 }
 1285 
 1286 u_int32_t
 1287 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
 1288                  u_int32_t openings, u_int32_t arg,
 1289                  int getcount_only)
 1290 {
 1291         struct ccb_relsim crs;
 1292 
 1293         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
 1294             relsim_flags, openings, arg, getcount_only));
 1295         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
 1296         crs.ccb_h.func_code = XPT_REL_SIMQ;
 1297         crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
 1298         crs.release_flags = relsim_flags;
 1299         crs.openings = openings;
 1300         crs.release_timeout = arg;
 1301         xpt_action((union ccb *)&crs);
 1302         return (crs.qfrozen_cnt);
 1303 }
 1304 
 1305 #define saved_ccb_ptr ppriv_ptr0
 1306 static void
 1307 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
 1308 {
 1309         union ccb      *saved_ccb;
 1310         cam_status      status;
 1311         struct scsi_start_stop_unit *scsi_cmd;
 1312         int             error = 0, error_code, sense_key, asc, ascq;
 1313 
 1314         scsi_cmd = (struct scsi_start_stop_unit *)
 1315             &done_ccb->csio.cdb_io.cdb_bytes;
 1316         status = done_ccb->ccb_h.status;
 1317 
 1318         if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
 1319                 if (scsi_extract_sense_ccb(done_ccb,
 1320                     &error_code, &sense_key, &asc, &ascq)) {
 1321                         /*
 1322                          * If the error is "invalid field in CDB",
 1323                          * and the load/eject flag is set, turn the
 1324                          * flag off and try again.  This is just in
 1325                          * case the drive in question barfs on the
 1326                          * load eject flag.  The CAM code should set
 1327                          * the load/eject flag by default for
 1328                          * removable media.
 1329                          */
 1330                         if ((scsi_cmd->opcode == START_STOP_UNIT) &&
 1331                             ((scsi_cmd->how & SSS_LOEJ) != 0) &&
 1332                              (asc == 0x24) && (ascq == 0x00)) {
 1333                                 scsi_cmd->how &= ~SSS_LOEJ;
 1334                                 if (status & CAM_DEV_QFRZN) {
 1335                                         cam_release_devq(done_ccb->ccb_h.path,
 1336                                             0, 0, 0, 0);
 1337                                         done_ccb->ccb_h.status &=
 1338                                             ~CAM_DEV_QFRZN;
 1339                                 }
 1340                                 xpt_action(done_ccb);
 1341                                 goto out;
 1342                         }
 1343                 }
 1344                 error = cam_periph_error(done_ccb, 0,
 1345                     SF_RETRY_UA | SF_NO_PRINT);
 1346                 if (error == ERESTART)
 1347                         goto out;
 1348                 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
 1349                         cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
 1350                         done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
 1351                 }
 1352         } else {
 1353                 /*
 1354                  * If we have successfully taken a device from the not
 1355                  * ready to ready state, re-scan the device and re-get
 1356                  * the inquiry information.  Many devices (mostly disks)
 1357                  * don't properly report their inquiry information unless
 1358                  * they are spun up.
 1359                  */
 1360                 if (scsi_cmd->opcode == START_STOP_UNIT)
 1361                         xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
 1362         }
 1363 
 1364         /*
 1365          * After recovery action(s) completed, return to the original CCB.
 1366          * If the recovery CCB has failed, considering its own possible
 1367          * retries and recovery, assume we are back in state where we have
 1368          * been originally, but without recovery hopes left.  In such case,
 1369          * after the final attempt below, we cancel any further retries,
 1370          * blocking by that also any new recovery attempts for this CCB,
 1371          * and the result will be the final one returned to the CCB owher.
 1372          */
 1373         saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
 1374         bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
 1375         xpt_free_ccb(saved_ccb);
 1376         if (done_ccb->ccb_h.cbfcnp != camperiphdone)
 1377                 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
 1378         if (error != 0)
 1379                 done_ccb->ccb_h.retry_count = 0;
 1380         xpt_action(done_ccb);
 1381 
 1382 out:
 1383         /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
 1384         cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
 1385 }
 1386 
 1387 /*
 1388  * Generic Async Event handler.  Peripheral drivers usually
 1389  * filter out the events that require personal attention,
 1390  * and leave the rest to this function.
 1391  */
 1392 void
 1393 cam_periph_async(struct cam_periph *periph, u_int32_t code,
 1394                  struct cam_path *path, void *arg)
 1395 {
 1396         switch (code) {
 1397         case AC_LOST_DEVICE:
 1398                 cam_periph_invalidate(periph);
 1399                 break; 
 1400         default:
 1401                 break;
 1402         }
 1403 }
 1404 
 1405 void
 1406 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
 1407 {
 1408         struct ccb_getdevstats cgds;
 1409 
 1410         xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
 1411         cgds.ccb_h.func_code = XPT_GDEV_STATS;
 1412         xpt_action((union ccb *)&cgds);
 1413         cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
 1414 }
 1415 
 1416 void
 1417 cam_periph_freeze_after_event(struct cam_periph *periph,
 1418                               struct timeval* event_time, u_int duration_ms)
 1419 {
 1420         struct timeval delta;
 1421         struct timeval duration_tv;
 1422 
 1423         if (!timevalisset(event_time))
 1424                 return;
 1425 
 1426         microtime(&delta);
 1427         timevalsub(&delta, event_time);
 1428         duration_tv.tv_sec = duration_ms / 1000;
 1429         duration_tv.tv_usec = (duration_ms % 1000) * 1000;
 1430         if (timevalcmp(&delta, &duration_tv, <)) {
 1431                 timevalsub(&duration_tv, &delta);
 1432 
 1433                 duration_ms = duration_tv.tv_sec * 1000;
 1434                 duration_ms += duration_tv.tv_usec / 1000;
 1435                 cam_freeze_devq(periph->path); 
 1436                 cam_release_devq(periph->path,
 1437                                 RELSIM_RELEASE_AFTER_TIMEOUT,
 1438                                 /*reduction*/0,
 1439                                 /*timeout*/duration_ms,
 1440                                 /*getcount_only*/0);
 1441         }
 1442 
 1443 }
 1444 
 1445 static int
 1446 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
 1447     cam_flags camflags, u_int32_t sense_flags,
 1448     int *openings, u_int32_t *relsim_flags,
 1449     u_int32_t *timeout, u_int32_t *action, const char **action_string)
 1450 {
 1451         int error;
 1452 
 1453         switch (ccb->csio.scsi_status) {
 1454         case SCSI_STATUS_OK:
 1455         case SCSI_STATUS_COND_MET:
 1456         case SCSI_STATUS_INTERMED:
 1457         case SCSI_STATUS_INTERMED_COND_MET:
 1458                 error = 0;
 1459                 break;
 1460         case SCSI_STATUS_CMD_TERMINATED:
 1461         case SCSI_STATUS_CHECK_COND:
 1462                 error = camperiphscsisenseerror(ccb, orig_ccb,
 1463                                                 camflags,
 1464                                                 sense_flags,
 1465                                                 openings,
 1466                                                 relsim_flags,
 1467                                                 timeout,
 1468                                                 action,
 1469                                                 action_string);
 1470                 break;
 1471         case SCSI_STATUS_QUEUE_FULL:
 1472         {
 1473                 /* no decrement */
 1474                 struct ccb_getdevstats cgds;
 1475 
 1476                 /*
 1477                  * First off, find out what the current
 1478                  * transaction counts are.
 1479                  */
 1480                 xpt_setup_ccb(&cgds.ccb_h,
 1481                               ccb->ccb_h.path,
 1482                               CAM_PRIORITY_NORMAL);
 1483                 cgds.ccb_h.func_code = XPT_GDEV_STATS;
 1484                 xpt_action((union ccb *)&cgds);
 1485 
 1486                 /*
 1487                  * If we were the only transaction active, treat
 1488                  * the QUEUE FULL as if it were a BUSY condition.
 1489                  */
 1490                 if (cgds.dev_active != 0) {
 1491                         int total_openings;
 1492 
 1493                         /*
 1494                          * Reduce the number of openings to
 1495                          * be 1 less than the amount it took
 1496                          * to get a queue full bounded by the
 1497                          * minimum allowed tag count for this
 1498                          * device.
 1499                          */
 1500                         total_openings = cgds.dev_active + cgds.dev_openings;
 1501                         *openings = cgds.dev_active;
 1502                         if (*openings < cgds.mintags)
 1503                                 *openings = cgds.mintags;
 1504                         if (*openings < total_openings)
 1505                                 *relsim_flags = RELSIM_ADJUST_OPENINGS;
 1506                         else {
 1507                                 /*
 1508                                  * Some devices report queue full for
 1509                                  * temporary resource shortages.  For
 1510                                  * this reason, we allow a minimum
 1511                                  * tag count to be entered via a
 1512                                  * quirk entry to prevent the queue
 1513                                  * count on these devices from falling
 1514                                  * to a pessimisticly low value.  We
 1515                                  * still wait for the next successful
 1516                                  * completion, however, before queueing
 1517                                  * more transactions to the device.
 1518                                  */
 1519                                 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
 1520                         }
 1521                         *timeout = 0;
 1522                         error = ERESTART;
 1523                         *action &= ~SSQ_PRINT_SENSE;
 1524                         break;
 1525                 }
 1526                 /* FALLTHROUGH */
 1527         }
 1528         case SCSI_STATUS_BUSY:
 1529                 /*
 1530                  * Restart the queue after either another
 1531                  * command completes or a 1 second timeout.
 1532                  */
 1533                 if ((sense_flags & SF_RETRY_BUSY) != 0 ||
 1534                     (ccb->ccb_h.retry_count--) > 0) {
 1535                         error = ERESTART;
 1536                         *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
 1537                                       | RELSIM_RELEASE_AFTER_CMDCMPLT;
 1538                         *timeout = 1000;
 1539                 } else {
 1540                         error = EIO;
 1541                 }
 1542                 break;
 1543         case SCSI_STATUS_RESERV_CONFLICT:
 1544         default:
 1545                 error = EIO;
 1546                 break;
 1547         }
 1548         return (error);
 1549 }
 1550 
 1551 static int
 1552 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
 1553     cam_flags camflags, u_int32_t sense_flags,
 1554     int *openings, u_int32_t *relsim_flags,
 1555     u_int32_t *timeout, u_int32_t *action, const char **action_string)
 1556 {
 1557         struct cam_periph *periph;
 1558         union ccb *orig_ccb = ccb;
 1559         int error, recoveryccb;
 1560 
 1561 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
 1562         if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
 1563                 biotrack(ccb->csio.bio, __func__);
 1564 #endif
 1565 
 1566         periph = xpt_path_periph(ccb->ccb_h.path);
 1567         recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
 1568         if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
 1569                 /*
 1570                  * If error recovery is already in progress, don't attempt
 1571                  * to process this error, but requeue it unconditionally
 1572                  * and attempt to process it once error recovery has
 1573                  * completed.  This failed command is probably related to
 1574                  * the error that caused the currently active error recovery
 1575                  * action so our  current recovery efforts should also
 1576                  * address this command.  Be aware that the error recovery
 1577                  * code assumes that only one recovery action is in progress
 1578                  * on a particular peripheral instance at any given time
 1579                  * (e.g. only one saved CCB for error recovery) so it is
 1580                  * imperitive that we don't violate this assumption.
 1581                  */
 1582                 error = ERESTART;
 1583                 *action &= ~SSQ_PRINT_SENSE;
 1584         } else {
 1585                 scsi_sense_action err_action;
 1586                 struct ccb_getdev cgd;
 1587 
 1588                 /*
 1589                  * Grab the inquiry data for this device.
 1590                  */
 1591                 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
 1592                 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
 1593                 xpt_action((union ccb *)&cgd);
 1594 
 1595                 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
 1596                     sense_flags);
 1597                 error = err_action & SS_ERRMASK;
 1598 
 1599                 /*
 1600                  * Do not autostart sequential access devices
 1601                  * to avoid unexpected tape loading.
 1602                  */
 1603                 if ((err_action & SS_MASK) == SS_START &&
 1604                     SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
 1605                         *action_string = "Will not autostart a "
 1606                             "sequential access device";
 1607                         goto sense_error_done;
 1608                 }
 1609 
 1610                 /*
 1611                  * Avoid recovery recursion if recovery action is the same.
 1612                  */
 1613                 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
 1614                         if (((err_action & SS_MASK) == SS_START &&
 1615                              ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
 1616                             ((err_action & SS_MASK) == SS_TUR &&
 1617                              (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
 1618                                 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
 1619                                 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
 1620                                 *timeout = 500;
 1621                         }
 1622                 }
 1623 
 1624                 /*
 1625                  * If the recovery action will consume a retry,
 1626                  * make sure we actually have retries available.
 1627                  */
 1628                 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
 1629                         if (ccb->ccb_h.retry_count > 0 &&
 1630                             (periph->flags & CAM_PERIPH_INVALID) == 0)
 1631                                 ccb->ccb_h.retry_count--;
 1632                         else {
 1633                                 *action_string = "Retries exhausted";
 1634                                 goto sense_error_done;
 1635                         }
 1636                 }
 1637 
 1638                 if ((err_action & SS_MASK) >= SS_START) {
 1639                         /*
 1640                          * Do common portions of commands that
 1641                          * use recovery CCBs.
 1642                          */
 1643                         orig_ccb = xpt_alloc_ccb_nowait();
 1644                         if (orig_ccb == NULL) {
 1645                                 *action_string = "Can't allocate recovery CCB";
 1646                                 goto sense_error_done;
 1647                         }
 1648                         /*
 1649                          * Clear freeze flag for original request here, as
 1650                          * this freeze will be dropped as part of ERESTART.
 1651                          */
 1652                         ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
 1653                         bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
 1654                 }
 1655 
 1656                 switch (err_action & SS_MASK) {
 1657                 case SS_NOP:
 1658                         *action_string = "No recovery action needed";
 1659                         error = 0;
 1660                         break;
 1661                 case SS_RETRY:
 1662                         *action_string = "Retrying command (per sense data)";
 1663                         error = ERESTART;
 1664                         break;
 1665                 case SS_FAIL:
 1666                         *action_string = "Unretryable error";
 1667                         break;
 1668                 case SS_START:
 1669                 {
 1670                         int le;
 1671 
 1672                         /*
 1673                          * Send a start unit command to the device, and
 1674                          * then retry the command.
 1675                          */
 1676                         *action_string = "Attempting to start unit";
 1677                         periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
 1678 
 1679                         /*
 1680                          * Check for removable media and set
 1681                          * load/eject flag appropriately.
 1682                          */
 1683                         if (SID_IS_REMOVABLE(&cgd.inq_data))
 1684                                 le = TRUE;
 1685                         else
 1686                                 le = FALSE;
 1687 
 1688                         scsi_start_stop(&ccb->csio,
 1689                                         /*retries*/1,
 1690                                         camperiphdone,
 1691                                         MSG_SIMPLE_Q_TAG,
 1692                                         /*start*/TRUE,
 1693                                         /*load/eject*/le,
 1694                                         /*immediate*/FALSE,
 1695                                         SSD_FULL_SIZE,
 1696                                         /*timeout*/50000);
 1697                         break;
 1698                 }
 1699                 case SS_TUR:
 1700                 {
 1701                         /*
 1702                          * Send a Test Unit Ready to the device.
 1703                          * If the 'many' flag is set, we send 120
 1704                          * test unit ready commands, one every half 
 1705                          * second.  Otherwise, we just send one TUR.
 1706                          * We only want to do this if the retry 
 1707                          * count has not been exhausted.
 1708                          */
 1709                         int retries;
 1710 
 1711                         if ((err_action & SSQ_MANY) != 0) {
 1712                                 *action_string = "Polling device for readiness";
 1713                                 retries = 120;
 1714                         } else {
 1715                                 *action_string = "Testing device for readiness";
 1716                                 retries = 1;
 1717                         }
 1718                         periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
 1719                         scsi_test_unit_ready(&ccb->csio,
 1720                                              retries,
 1721                                              camperiphdone,
 1722                                              MSG_SIMPLE_Q_TAG,
 1723                                              SSD_FULL_SIZE,
 1724                                              /*timeout*/5000);
 1725 
 1726                         /*
 1727                          * Accomplish our 500ms delay by deferring
 1728                          * the release of our device queue appropriately.
 1729                          */
 1730                         *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
 1731                         *timeout = 500;
 1732                         break;
 1733                 }
 1734                 default:
 1735                         panic("Unhandled error action %x", err_action);
 1736                 }
 1737                 
 1738                 if ((err_action & SS_MASK) >= SS_START) {
 1739                         /*
 1740                          * Drop the priority, so that the recovery
 1741                          * CCB is the first to execute.  Freeze the queue
 1742                          * after this command is sent so that we can
 1743                          * restore the old csio and have it queued in
 1744                          * the proper order before we release normal 
 1745                          * transactions to the device.
 1746                          */
 1747                         ccb->ccb_h.pinfo.priority--;
 1748                         ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
 1749                         ccb->ccb_h.saved_ccb_ptr = orig_ccb;
 1750                         error = ERESTART;
 1751                         *orig = orig_ccb;
 1752                 }
 1753 
 1754 sense_error_done:
 1755                 *action = err_action;
 1756         }
 1757         return (error);
 1758 }
 1759 
 1760 /*
 1761  * Generic error handler.  Peripheral drivers usually filter
 1762  * out the errors that they handle in a unique manner, then
 1763  * call this function.
 1764  */
 1765 int
 1766 cam_periph_error(union ccb *ccb, cam_flags camflags,
 1767                  u_int32_t sense_flags)
 1768 {
 1769         struct cam_path *newpath;
 1770         union ccb  *orig_ccb, *scan_ccb;
 1771         struct cam_periph *periph;
 1772         const char *action_string;
 1773         cam_status  status;
 1774         int         frozen, error, openings, devctl_err;
 1775         u_int32_t   action, relsim_flags, timeout;
 1776 
 1777         action = SSQ_PRINT_SENSE;
 1778         periph = xpt_path_periph(ccb->ccb_h.path);
 1779         action_string = NULL;
 1780         status = ccb->ccb_h.status;
 1781         frozen = (status & CAM_DEV_QFRZN) != 0;
 1782         status &= CAM_STATUS_MASK;
 1783         devctl_err = openings = relsim_flags = timeout = 0;
 1784         orig_ccb = ccb;
 1785 
 1786         /* Filter the errors that should be reported via devctl */
 1787         switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
 1788         case CAM_CMD_TIMEOUT:
 1789         case CAM_REQ_ABORTED:
 1790         case CAM_REQ_CMP_ERR:
 1791         case CAM_REQ_TERMIO:
 1792         case CAM_UNREC_HBA_ERROR:
 1793         case CAM_DATA_RUN_ERR:
 1794         case CAM_SCSI_STATUS_ERROR:
 1795         case CAM_ATA_STATUS_ERROR:
 1796         case CAM_SMP_STATUS_ERROR:
 1797                 devctl_err++;
 1798                 break;
 1799         default:
 1800                 break;
 1801         }
 1802 
 1803         switch (status) {
 1804         case CAM_REQ_CMP:
 1805                 error = 0;
 1806                 action &= ~SSQ_PRINT_SENSE;
 1807                 break;
 1808         case CAM_SCSI_STATUS_ERROR:
 1809                 error = camperiphscsistatuserror(ccb, &orig_ccb,
 1810                     camflags, sense_flags, &openings, &relsim_flags,
 1811                     &timeout, &action, &action_string);
 1812                 break;
 1813         case CAM_AUTOSENSE_FAIL:
 1814                 error = EIO;    /* we have to kill the command */
 1815                 break;
 1816         case CAM_UA_ABORT:
 1817         case CAM_UA_TERMIO:
 1818         case CAM_MSG_REJECT_REC:
 1819                 /* XXX Don't know that these are correct */
 1820                 error = EIO;
 1821                 break;
 1822         case CAM_SEL_TIMEOUT:
 1823                 if ((camflags & CAM_RETRY_SELTO) != 0) {
 1824                         if (ccb->ccb_h.retry_count > 0 &&
 1825                             (periph->flags & CAM_PERIPH_INVALID) == 0) {
 1826                                 ccb->ccb_h.retry_count--;
 1827                                 error = ERESTART;
 1828 
 1829                                 /*
 1830                                  * Wait a bit to give the device
 1831                                  * time to recover before we try again.
 1832                                  */
 1833                                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
 1834                                 timeout = periph_selto_delay;
 1835                                 break;
 1836                         }
 1837                         action_string = "Retries exhausted";
 1838                 }
 1839                 /* FALLTHROUGH */
 1840         case CAM_DEV_NOT_THERE:
 1841                 error = ENXIO;
 1842                 action = SSQ_LOST;
 1843                 break;
 1844         case CAM_REQ_INVALID:
 1845         case CAM_PATH_INVALID:
 1846         case CAM_NO_HBA:
 1847         case CAM_PROVIDE_FAIL:
 1848         case CAM_REQ_TOO_BIG:
 1849         case CAM_LUN_INVALID:
 1850         case CAM_TID_INVALID:
 1851         case CAM_FUNC_NOTAVAIL:
 1852                 error = EINVAL;
 1853                 break;
 1854         case CAM_SCSI_BUS_RESET:
 1855         case CAM_BDR_SENT:
 1856                 /*
 1857                  * Commands that repeatedly timeout and cause these
 1858                  * kinds of error recovery actions, should return
 1859                  * CAM_CMD_TIMEOUT, which allows us to safely assume
 1860                  * that this command was an innocent bystander to
 1861                  * these events and should be unconditionally
 1862                  * retried.
 1863                  */
 1864         case CAM_REQUEUE_REQ:
 1865                 /* Unconditional requeue if device is still there */
 1866                 if (periph->flags & CAM_PERIPH_INVALID) {
 1867                         action_string = "Periph was invalidated";
 1868                         error = EIO;
 1869                 } else if (sense_flags & SF_NO_RETRY) {
 1870                         error = EIO;
 1871                         action_string = "Retry was blocked";
 1872                 } else {
 1873                         error = ERESTART;
 1874                         action &= ~SSQ_PRINT_SENSE;
 1875                 }
 1876                 break;
 1877         case CAM_RESRC_UNAVAIL:
 1878                 /* Wait a bit for the resource shortage to abate. */
 1879                 timeout = periph_noresrc_delay;
 1880                 /* FALLTHROUGH */
 1881         case CAM_BUSY:
 1882                 if (timeout == 0) {
 1883                         /* Wait a bit for the busy condition to abate. */
 1884                         timeout = periph_busy_delay;
 1885                 }
 1886                 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
 1887                 /* FALLTHROUGH */
 1888         case CAM_ATA_STATUS_ERROR:
 1889         case CAM_REQ_CMP_ERR:
 1890         case CAM_CMD_TIMEOUT:
 1891         case CAM_UNEXP_BUSFREE:
 1892         case CAM_UNCOR_PARITY:
 1893         case CAM_DATA_RUN_ERR:
 1894         default:
 1895                 if (periph->flags & CAM_PERIPH_INVALID) {
 1896                         error = EIO;
 1897                         action_string = "Periph was invalidated";
 1898                 } else if (ccb->ccb_h.retry_count == 0) {
 1899                         error = EIO;
 1900                         action_string = "Retries exhausted";
 1901                 } else if (sense_flags & SF_NO_RETRY) {
 1902                         error = EIO;
 1903                         action_string = "Retry was blocked";
 1904                 } else {
 1905                         ccb->ccb_h.retry_count--;
 1906                         error = ERESTART;
 1907                 }
 1908                 break;
 1909         }
 1910 
 1911         if ((sense_flags & SF_PRINT_ALWAYS) ||
 1912             CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
 1913                 action |= SSQ_PRINT_SENSE;
 1914         else if (sense_flags & SF_NO_PRINT)
 1915                 action &= ~SSQ_PRINT_SENSE;
 1916         if ((action & SSQ_PRINT_SENSE) != 0)
 1917                 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
 1918         if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
 1919                 if (error != ERESTART) {
 1920                         if (action_string == NULL)
 1921                                 action_string = "Unretryable error";
 1922                         xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
 1923                             error, action_string);
 1924                 } else if (action_string != NULL)
 1925                         xpt_print(ccb->ccb_h.path, "%s\n", action_string);
 1926                 else {
 1927                         xpt_print(ccb->ccb_h.path,
 1928                             "Retrying command, %d more tries remain\n",
 1929                             ccb->ccb_h.retry_count);
 1930                 }
 1931         }
 1932 
 1933         if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
 1934                 cam_periph_devctl_notify(orig_ccb);
 1935 
 1936         if ((action & SSQ_LOST) != 0) {
 1937                 lun_id_t lun_id;
 1938 
 1939                 /*
 1940                  * For a selection timeout, we consider all of the LUNs on
 1941                  * the target to be gone.  If the status is CAM_DEV_NOT_THERE,
 1942                  * then we only get rid of the device(s) specified by the
 1943                  * path in the original CCB.
 1944                  */
 1945                 if (status == CAM_SEL_TIMEOUT)
 1946                         lun_id = CAM_LUN_WILDCARD;
 1947                 else
 1948                         lun_id = xpt_path_lun_id(ccb->ccb_h.path);
 1949 
 1950                 /* Should we do more if we can't create the path?? */
 1951                 if (xpt_create_path(&newpath, periph,
 1952                                     xpt_path_path_id(ccb->ccb_h.path),
 1953                                     xpt_path_target_id(ccb->ccb_h.path),
 1954                                     lun_id) == CAM_REQ_CMP) {
 1955 
 1956                         /*
 1957                          * Let peripheral drivers know that this
 1958                          * device has gone away.
 1959                          */
 1960                         xpt_async(AC_LOST_DEVICE, newpath, NULL);
 1961                         xpt_free_path(newpath);
 1962                 }
 1963         }
 1964 
 1965         /* Broadcast UNIT ATTENTIONs to all periphs. */
 1966         if ((action & SSQ_UA) != 0)
 1967                 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
 1968 
 1969         /* Rescan target on "Reported LUNs data has changed" */
 1970         if ((action & SSQ_RESCAN) != 0) {
 1971                 if (xpt_create_path(&newpath, NULL,
 1972                                     xpt_path_path_id(ccb->ccb_h.path),
 1973                                     xpt_path_target_id(ccb->ccb_h.path),
 1974                                     CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
 1975 
 1976                         scan_ccb = xpt_alloc_ccb_nowait();
 1977                         if (scan_ccb != NULL) {
 1978                                 scan_ccb->ccb_h.path = newpath;
 1979                                 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
 1980                                 scan_ccb->crcn.flags = 0;
 1981                                 xpt_rescan(scan_ccb);
 1982                         } else {
 1983                                 xpt_print(newpath,
 1984                                     "Can't allocate CCB to rescan target\n");
 1985                                 xpt_free_path(newpath);
 1986                         }
 1987                 }
 1988         }
 1989 
 1990         /* Attempt a retry */
 1991         if (error == ERESTART || error == 0) {
 1992                 if (frozen != 0)
 1993                         ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
 1994                 if (error == ERESTART)
 1995                         xpt_action(ccb);
 1996                 if (frozen != 0)
 1997                         cam_release_devq(ccb->ccb_h.path,
 1998                                          relsim_flags,
 1999                                          openings,
 2000                                          timeout,
 2001                                          /*getcount_only*/0);
 2002         }
 2003 
 2004         return (error);
 2005 }
 2006 
 2007 #define CAM_PERIPH_DEVD_MSG_SIZE        256
 2008 
 2009 static void
 2010 cam_periph_devctl_notify(union ccb *ccb)
 2011 {
 2012         struct cam_periph *periph;
 2013         struct ccb_getdev *cgd;
 2014         struct sbuf sb;
 2015         int serr, sk, asc, ascq;
 2016         char *sbmsg, *type;
 2017 
 2018         sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
 2019         if (sbmsg == NULL)
 2020                 return;
 2021 
 2022         sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
 2023 
 2024         periph = xpt_path_periph(ccb->ccb_h.path);
 2025         sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
 2026             periph->unit_number);
 2027 
 2028         sbuf_printf(&sb, "serial=\"");
 2029         if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
 2030                 xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
 2031                     CAM_PRIORITY_NORMAL);
 2032                 cgd->ccb_h.func_code = XPT_GDEV_TYPE;
 2033                 xpt_action((union ccb *)cgd);
 2034 
 2035                 if (cgd->ccb_h.status == CAM_REQ_CMP)
 2036                         sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
 2037                 xpt_free_ccb((union ccb *)cgd);
 2038         }
 2039         sbuf_printf(&sb, "\" ");
 2040         sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
 2041 
 2042         switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
 2043         case CAM_CMD_TIMEOUT:
 2044                 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
 2045                 type = "timeout";
 2046                 break;
 2047         case CAM_SCSI_STATUS_ERROR:
 2048                 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
 2049                 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
 2050                         sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
 2051                             serr, sk, asc, ascq);
 2052                 type = "error";
 2053                 break;
 2054         case CAM_ATA_STATUS_ERROR:
 2055                 sbuf_printf(&sb, "RES=\"");
 2056                 ata_res_sbuf(&ccb->ataio.res, &sb);
 2057                 sbuf_printf(&sb, "\" ");
 2058                 type = "error";
 2059                 break;
 2060         default:
 2061                 type = "error";
 2062                 break;
 2063         }
 2064 
 2065         if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
 2066                 sbuf_printf(&sb, "CDB=\"");
 2067                 scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
 2068                 sbuf_printf(&sb, "\" ");
 2069         } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
 2070                 sbuf_printf(&sb, "ACB=\"");
 2071                 ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
 2072                 sbuf_printf(&sb, "\" ");
 2073         }
 2074 
 2075         if (sbuf_finish(&sb) == 0)
 2076                 devctl_notify("CAM", "periph", type, sbuf_data(&sb));
 2077         sbuf_delete(&sb);
 2078         free(sbmsg, M_CAMPERIPH);
 2079 }
 2080 
 2081 /*
 2082  * Sysctl to force an invalidation of the drive right now. Can be
 2083  * called with CTLFLAG_MPSAFE since we take periph lock.
 2084  */
 2085 int
 2086 cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)
 2087 {
 2088         struct cam_periph *periph;
 2089         int error, value;
 2090 
 2091         periph = arg1;
 2092         value = 0;
 2093         error = sysctl_handle_int(oidp, &value, 0, req);
 2094         if (error != 0 || req->newptr == NULL || value != 1)
 2095                 return (error);
 2096 
 2097         cam_periph_lock(periph);
 2098         cam_periph_invalidate(periph);
 2099         cam_periph_unlock(periph);
 2100 
 2101         return (0);
 2102 }

Cache object: 8ba54fc3443c322ae0d54c9d8ccad77c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.