The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/dpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: dpt.c,v 1.62 2008/06/08 12:43:51 tsutsui Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
    9  * Aerospace Simulation Facility, NASA Ames Research Center.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30  * POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 /*
   34  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
   35  * Copyright (c) 2000 Adaptec Corporation
   36  * All rights reserved.
   37  *
   38  * TERMS AND CONDITIONS OF USE
   39  *
   40  * Redistribution and use in source form, with or without modification, are
   41  * permitted provided that redistributions of source code must retain the
   42  * above copyright notice, this list of conditions and the following disclaimer.
   43  *
   44  * This software is provided `as is' by Adaptec and any express or implied
   45  * warranties, including, but not limited to, the implied warranties of
   46  * merchantability and fitness for a particular purpose, are disclaimed. In no
   47  * event shall Adaptec be liable for any direct, indirect, incidental, special,
   48  * exemplary or consequential damages (including, but not limited to,
   49  * procurement of substitute goods or services; loss of use, data, or profits;
   50  * or business interruptions) however caused and on any theory of liability,
   51  * whether in contract, strict liability, or tort (including negligence or
   52  * otherwise) arising in any way out of the use of this driver software, even
   53  * if advised of the possibility of such damage.
   54  */
   55 
   56 /*
   57  * Portions of this code fall under the following copyright:
   58  *
   59  * Originally written by Julian Elischer (julian@tfs.com)
   60  * for TRW Financial Systems for use under the MACH(2.5) operating system.
   61  *
   62  * TRW Financial Systems, in accordance with their agreement with Carnegie
   63  * Mellon University, makes this software available to CMU to distribute
   64  * or use in any manner that they see fit as long as this message is kept with
   65  * the software. For this reason TFS also grants any other persons or
   66  * organisations permission to use or modify this software.
   67  *
   68  * TFS supplies this software to be publicly redistributed
   69  * on the understanding that TFS is not responsible for the correct
   70  * functioning of this software in any circumstances.
   71  */
   72 
   73 #include <sys/cdefs.h>
   74 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.62 2008/06/08 12:43:51 tsutsui Exp $");
   75 
   76 #include <sys/param.h>
   77 #include <sys/systm.h>
   78 #include <sys/device.h>
   79 #include <sys/queue.h>
   80 #include <sys/buf.h>
   81 #include <sys/endian.h>
   82 #include <sys/conf.h>
   83 #include <sys/kauth.h>
   84 #include <sys/proc.h>
   85 
   86 #include <uvm/uvm_extern.h>
   87 
   88 #include <sys/bus.h>
   89 #ifdef i386
   90 #include <machine/pio.h>
   91 #include <machine/cputypes.h>
   92 #endif
   93 
   94 #include <dev/scsipi/scsi_all.h>
   95 #include <dev/scsipi/scsipi_all.h>
   96 #include <dev/scsipi/scsiconf.h>
   97 
   98 #include <dev/ic/dptreg.h>
   99 #include <dev/ic/dptvar.h>
  100 
  101 #include <dev/i2o/dptivar.h>
  102 
  103 #ifdef DEBUG
  104 #define DPRINTF(x)              printf x
  105 #else
  106 #define DPRINTF(x)
  107 #endif
  108 
  109 #define dpt_inb(x, o)           \
  110     bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
  111 #define dpt_outb(x, o, d)       \
  112     bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
  113 
  114 static const char * const dpt_cname[] = {
  115         "3334", "SmartRAID IV",
  116         "3332", "SmartRAID IV",
  117         "2144", "SmartCache IV",
  118         "2044", "SmartCache IV",
  119         "2142", "SmartCache IV",
  120         "2042", "SmartCache IV",
  121         "2041", "SmartCache IV",
  122         "3224", "SmartRAID III",
  123         "3222", "SmartRAID III",
  124         "3021", "SmartRAID III",
  125         "2124", "SmartCache III",
  126         "2024", "SmartCache III",
  127         "2122", "SmartCache III",
  128         "2022", "SmartCache III",
  129         "2021", "SmartCache III",
  130         "2012", "SmartCache Plus",
  131         "2011", "SmartCache Plus",
  132         NULL,   "<unknown>",
  133 };
  134 
  135 static void     *dpt_sdh;
  136 
  137 dev_type_open(dptopen);
  138 dev_type_ioctl(dptioctl);
  139 
  140 const struct cdevsw dpt_cdevsw = {
  141         dptopen, nullclose, noread, nowrite, dptioctl,
  142         nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
  143 };
  144 
  145 extern struct cfdriver dpt_cd;
  146 
  147 static struct dpt_sig dpt_sig = {
  148         { 'd', 'P', 't', 'S', 'i', 'G'},
  149         SIG_VERSION,
  150 #if defined(i386)
  151         PROC_INTEL,
  152 #elif defined(powerpc)
  153         PROC_POWERPC,
  154 #elif defined(alpha)
  155         PROC_ALPHA,
  156 #elif defined(__mips__)
  157         PROC_MIPS,
  158 #elif defined(sparc64)
  159         PROC_ULTRASPARC,
  160 #else
  161         0xff,
  162 #endif
  163 #if defined(i386)
  164         PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
  165 #else
  166         0,
  167 #endif
  168         FT_HBADRVR,
  169         0,
  170         OEM_DPT,
  171         OS_FREE_BSD,    /* XXX */
  172         CAP_ABOVE16MB,
  173         DEV_ALL,
  174         ADF_ALL_EATA,
  175         0,
  176         0,
  177         DPT_VERSION,
  178         DPT_REVISION,
  179         DPT_SUBREVISION,
  180         DPT_MONTH,
  181         DPT_DAY,
  182         DPT_YEAR,
  183         ""              /* Will be filled later */
  184 };
  185 
  186 static void     dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
  187 static void     dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
  188 static int      dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
  189 static int      dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
  190 static void     dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
  191 static int      dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
  192 static void     dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *);
  193 static void     dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
  194 static void     dpt_minphys(struct buf *);
  195 static int      dpt_passthrough(struct dpt_softc *, struct eata_ucp *,
  196                                 struct lwp *);
  197 static void     dpt_scsipi_request(struct scsipi_channel *,
  198                                    scsipi_adapter_req_t, void *);
  199 static void     dpt_shutdown(void *);
  200 static void     dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *);
  201 static int      dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
  202 
  203 static inline struct dpt_ccb    *dpt_ccb_alloc(struct dpt_softc *);
  204 static inline void      dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
  205 
  206 static inline struct dpt_ccb *
  207 dpt_ccb_alloc(struct dpt_softc *sc)
  208 {
  209         struct dpt_ccb *ccb;
  210         int s;
  211 
  212         s = splbio();
  213         ccb = SLIST_FIRST(&sc->sc_ccb_free);
  214         SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
  215         splx(s);
  216 
  217         return (ccb);
  218 }
  219 
  220 static inline void
  221 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
  222 {
  223         int s;
  224 
  225         ccb->ccb_flg = 0;
  226         ccb->ccb_savesp = NULL;
  227         s = splbio();
  228         SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
  229         splx(s);
  230 }
  231 
  232 /*
  233  * Handle an interrupt from the HBA.
  234  */
  235 int
  236 dpt_intr(void *cookie)
  237 {
  238         struct dpt_softc *sc;
  239         struct dpt_ccb *ccb;
  240         struct eata_sp *sp;
  241         volatile int junk;
  242         int forus;
  243 
  244         sc = cookie;
  245         sp = sc->sc_stp;
  246         forus = 0;
  247 
  248         for (;;) {
  249                 /*
  250                  * HBA might have interrupted while we were dealing with the
  251                  * last completed command, since we ACK before we deal; keep
  252                  * polling.
  253                  */
  254                 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
  255                         break;
  256                 forus = 1;
  257 
  258                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
  259                     sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
  260 
  261                 /* Might have looped before HBA can reset HBA_AUX_INTR. */
  262                 if (sp->sp_ccbid == -1) {
  263                         DELAY(50);
  264 
  265                         if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
  266                                 return (0);
  267 
  268                         printf("%s: no status\n", device_xname(&sc->sc_dv));
  269 
  270                         /* Re-sync DMA map */
  271                         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
  272                             sc->sc_stpoff, sizeof(struct eata_sp),
  273                             BUS_DMASYNC_POSTREAD);
  274                 }
  275 
  276                 /* Make sure CCB ID from status packet is realistic. */
  277                 if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
  278                         printf("%s: bogus status (returned CCB id %d)\n",
  279                             device_xname(&sc->sc_dv), sp->sp_ccbid);
  280 
  281                         /* Ack the interrupt */
  282                         sp->sp_ccbid = -1;
  283                         junk = dpt_inb(sc, HA_STATUS);
  284                         continue;
  285                 }
  286 
  287                 /* Sync up DMA map and cache cmd status. */
  288                 ccb = sc->sc_ccbs + sp->sp_ccbid;
  289 
  290                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
  291                     sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
  292 
  293                 ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
  294                 ccb->ccb_scsi_status = sp->sp_scsi_status;
  295                 if (ccb->ccb_savesp != NULL)
  296                         memcpy(ccb->ccb_savesp, sp, sizeof(*sp));
  297 
  298                 /*
  299                  * Ack the interrupt and process the CCB.  If this
  300                  * is a private CCB it's up to dpt_ccb_poll() to
  301                  * notice.
  302                  */
  303                 sp->sp_ccbid = -1;
  304                 ccb->ccb_flg |= CCB_INTR;
  305                 junk = dpt_inb(sc, HA_STATUS);
  306                 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
  307                         dpt_ccb_done(sc, ccb);
  308                 else if ((ccb->ccb_flg & CCB_WAIT) != 0)
  309                         wakeup(ccb);
  310         }
  311 
  312         return (forus);
  313 }
  314 
  315 /*
  316  * Initialize and attach the HBA.  This is the entry point from bus
  317  * specific probe-and-attach code.
  318  */
  319 void
  320 dpt_init(struct dpt_softc *sc, const char *intrstr)
  321 {
  322         struct scsipi_adapter *adapt;
  323         struct scsipi_channel *chan;
  324         struct eata_inquiry_data *ei;
  325         int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
  326         bus_dma_segment_t seg;
  327         struct eata_cfg *ec;
  328         struct dpt_ccb *ccb;
  329         char model[__arraycount(ei->ei_model) + __arraycount(ei->ei_suffix) + 1];
  330         char vendor[__arraycount(ei->ei_vendor) + 1];
  331 
  332         ec = &sc->sc_ec;
  333         snprintf(dpt_sig.dsDescription, sizeof(dpt_sig.dsDescription),
  334             "NetBSD %s DPT driver", osrelease);
  335 
  336         /*
  337          * Allocate the CCB/status packet/scratch DMA map and load.
  338          */
  339         sc->sc_nccbs =
  340             min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
  341         sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
  342         sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
  343         mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
  344             DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
  345 
  346         if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
  347             PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
  348                 aprint_error_dev(&sc->sc_dv, "unable to allocate CCBs, rv = %d\n", rv);
  349                 return;
  350         }
  351 
  352         if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
  353             (void **)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
  354                 aprint_error_dev(&sc->sc_dv, "unable to map CCBs, rv = %d\n",
  355                     rv);
  356                 return;
  357         }
  358 
  359         if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
  360             mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
  361                 aprint_error_dev(&sc->sc_dv, "unable to create CCB DMA map, rv = %d\n", rv);
  362                 return;
  363         }
  364 
  365         if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
  366             sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
  367                 aprint_error_dev(&sc->sc_dv, "unable to load CCB DMA map, rv = %d\n", rv);
  368                 return;
  369         }
  370 
  371         sc->sc_stp = (struct eata_sp *)((char *)sc->sc_ccbs + sc->sc_stpoff);
  372         sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
  373         sc->sc_scr = (char *)sc->sc_ccbs + sc->sc_scroff;
  374         sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
  375         sc->sc_stp->sp_ccbid = -1;
  376 
  377         /*
  378          * Create the CCBs.
  379          */
  380         SLIST_INIT(&sc->sc_ccb_free);
  381         memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
  382 
  383         for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
  384                 rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
  385                     DPT_SG_SIZE, DPT_MAX_XFER, 0,
  386                     BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
  387                     &ccb->ccb_dmamap_xfer);
  388                 if (rv) {
  389                         aprint_error_dev(&sc->sc_dv, "can't create ccb dmamap (%d)\n", rv);
  390                         break;
  391                 }
  392 
  393                 ccb->ccb_id = i;
  394                 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
  395                     CCB_OFF(sc, ccb);
  396                 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
  397         }
  398 
  399         if (i == 0) {
  400                 aprint_error_dev(&sc->sc_dv, "unable to create CCBs\n");
  401                 return;
  402         } else if (i != sc->sc_nccbs) {
  403                 aprint_error_dev(&sc->sc_dv, "%d/%d CCBs created!\n",
  404                     i, sc->sc_nccbs);
  405                 sc->sc_nccbs = i;
  406         }
  407 
  408         /* Set shutdownhook before we start any device activity. */
  409         if (dpt_sdh == NULL)
  410                 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
  411 
  412         /* Get the inquiry data from the HBA. */
  413         dpt_hba_inquire(sc, &ei);
  414 
  415         /*
  416          * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
  417          * dpt0: interrupting at irq 10
  418          * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
  419          */
  420         for (i = 0; ei->ei_vendor[i] != ' ' && i < __arraycount(ei->ei_vendor);
  421             i++)
  422                 vendor[i] = ei->ei_vendor[i];
  423         vendor[i] = '\0';
  424 
  425         for (i = 0; ei->ei_model[i] != ' ' && i < __arraycount(ei->ei_model);
  426             i++)
  427                 model[i] = ei->ei_model[i];
  428         for (j = 0; ei->ei_suffix[j] != ' ' && j < __arraycount(ei->ei_suffix);
  429             i++, j++)
  430                 model[i] = ei->ei_suffix[j];
  431         model[i] = '\0';
  432 
  433         /* Find the marketing name for the board. */
  434         for (i = 0; dpt_cname[i] != NULL; i += 2)
  435                 if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
  436                         break;
  437 
  438         aprint_normal("%s %s (%s)\n", vendor, dpt_cname[i + 1], model);
  439 
  440         if (intrstr != NULL)
  441                 aprint_normal_dev(&sc->sc_dv, "interrupting at %s\n",
  442                     intrstr);
  443 
  444         maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
  445             EC_F3_MAX_CHANNEL_SHIFT;
  446         maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
  447             EC_F3_MAX_TARGET_SHIFT;
  448 
  449         aprint_normal_dev(&sc->sc_dv, "%d queued commands, %d channel(s), adapter on ID(s)",
  450             sc->sc_nccbs, maxchannel + 1);
  451 
  452         for (i = 0; i <= maxchannel; i++) {
  453                 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
  454                 aprint_normal(" %d", sc->sc_hbaid[i]);
  455         }
  456         aprint_normal("\n");
  457 
  458         /*
  459          * Reset the SCSI controller chip(s) and bus.  XXX Do we need to do
  460          * this for each bus?
  461          */
  462         if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
  463                 panic("%s: dpt_cmd failed", device_xname(&sc->sc_dv));
  464 
  465         /* Fill in the scsipi_adapter. */
  466         adapt = &sc->sc_adapt;
  467         memset(adapt, 0, sizeof(*adapt));
  468         adapt->adapt_dev = &sc->sc_dv;
  469         adapt->adapt_nchannels = maxchannel + 1;
  470         adapt->adapt_openings = sc->sc_nccbs - 1;
  471         adapt->adapt_max_periph = sc->sc_nccbs - 1;
  472         adapt->adapt_request = dpt_scsipi_request;
  473         adapt->adapt_minphys = dpt_minphys;
  474 
  475         for (i = 0; i <= maxchannel; i++) {
  476                 /* Fill in the scsipi_channel. */
  477                 chan = &sc->sc_chans[i];
  478                 memset(chan, 0, sizeof(*chan));
  479                 chan->chan_adapter = adapt;
  480                 chan->chan_bustype = &scsi_bustype;
  481                 chan->chan_channel = i;
  482                 chan->chan_ntargets = maxtarget + 1;
  483                 chan->chan_nluns = ec->ec_maxlun + 1;
  484                 chan->chan_id = sc->sc_hbaid[i];
  485                 config_found(&sc->sc_dv, chan, scsiprint);
  486         }
  487 }
  488 
  489 /*
  490  * Read the EATA configuration from the HBA and perform some sanity checks.
  491  */
  492 int
  493 dpt_readcfg(struct dpt_softc *sc)
  494 {
  495         struct eata_cfg *ec;
  496         int i, j, stat;
  497         u_int16_t *p;
  498 
  499         ec = &sc->sc_ec;
  500 
  501         /* Older firmware may puke if we talk to it too soon after reset. */
  502         dpt_outb(sc, HA_COMMAND, CP_RESET);
  503         DELAY(750000);
  504 
  505         for (i = 1000; i; i--) {
  506                 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
  507                         break;
  508                 DELAY(2000);
  509         }
  510 
  511         if (i == 0) {
  512                 printf("%s: HBA not ready after reset (hba status:%02x)\n",
  513                     device_xname(&sc->sc_dv), dpt_inb(sc, HA_STATUS));
  514                 return (-1);
  515         }
  516 
  517         while((((stat = dpt_inb(sc, HA_STATUS))
  518             != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
  519             && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
  520             && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
  521             || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
  522                 /* RAID drives still spinning up? */
  523                 if(dpt_inb(sc, HA_ERROR) != 'D' ||
  524                    dpt_inb(sc, HA_ERROR + 1) != 'P' ||
  525                    dpt_inb(sc, HA_ERROR + 2) != 'T') {
  526                         printf("%s: HBA not ready\n", device_xname(&sc->sc_dv));
  527                         return (-1);
  528                 }
  529         }
  530 
  531         /*
  532          * Issue the read-config command and wait for the data to appear.
  533          *
  534          * Apparently certian firmware revisions won't DMA later on if we
  535          * request the config data using PIO, but it makes it a lot easier
  536          * as no DMA setup is required.
  537          */
  538         dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
  539         memset(ec, 0, sizeof(*ec));
  540         i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
  541             sizeof(ec->ec_cfglen)) >> 1;
  542         p = (u_int16_t *)ec;
  543 
  544         if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
  545                 printf("%s: cfg data didn't appear (hba status:%02x)\n",
  546                     device_xname(&sc->sc_dv), dpt_inb(sc, HA_STATUS));
  547                 return (-1);
  548         }
  549 
  550         /* Begin reading. */
  551         while (i--)
  552                 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
  553 
  554         if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
  555             - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
  556             - sizeof(ec->ec_cfglen)))
  557                 i = sizeof(struct eata_cfg)
  558                   - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
  559                   - sizeof(ec->ec_cfglen);
  560 
  561         j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
  562             sizeof(ec->ec_cfglen);
  563         i >>= 1;
  564 
  565         while (i--)
  566                 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
  567 
  568         /* Flush until we have read 512 bytes. */
  569         i = (512 - j + 1) >> 1;
  570         while (i--)
  571                 (void)bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
  572 
  573         /* Defaults for older firmware... */
  574         if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
  575                 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
  576 
  577         if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
  578                 aprint_error_dev(&sc->sc_dv, "HBA error\n");
  579                 return (-1);
  580         }
  581 
  582         if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
  583                 aprint_error_dev(&sc->sc_dv, "EATA signature mismatch\n");
  584                 return (-1);
  585         }
  586 
  587         if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
  588                 aprint_error_dev(&sc->sc_dv, "ec_hba field invalid\n");
  589                 return (-1);
  590         }
  591 
  592         if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
  593                 aprint_error_dev(&sc->sc_dv, "DMA not supported\n");
  594                 return (-1);
  595         }
  596 
  597         return (0);
  598 }
  599 
  600 /*
  601  * Our `shutdownhook' to cleanly shut down the HBA.  The HBA must flush all
  602  * data from it's cache and mark array groups as clean.
  603  *
  604  * XXX This doesn't always work (i.e., the HBA may still be flushing after
  605  * we tell root that it's safe to power off).
  606  */
  607 static void
  608 dpt_shutdown(void *cookie)
  609 {
  610         extern struct cfdriver dpt_cd;
  611         struct dpt_softc *sc;
  612         int i;
  613 
  614         printf("shutting down dpt devices...");
  615 
  616         for (i = 0; i < dpt_cd.cd_ndevs; i++) {
  617                 if ((sc = device_lookup_private(&dpt_cd, i)) == NULL)
  618                         continue;
  619                 dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
  620         }
  621 
  622         delay(10000*1000);
  623         printf(" done\n");
  624 }
  625 
  626 /*
  627  * Send an EATA command to the HBA.
  628  */
  629 static int
  630 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
  631 {
  632         u_int32_t pa;
  633         int i, s;
  634 
  635         s = splbio();
  636 
  637         for (i = 20000; i != 0; i--) {
  638                 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
  639                         break;
  640                 DELAY(50);
  641         }
  642         if (i == 0) {
  643                 splx(s);
  644                 return (-1);
  645         }
  646 
  647         pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
  648         dpt_outb(sc, HA_DMA_BASE + 0, (pa      ) & 0xff);
  649         dpt_outb(sc, HA_DMA_BASE + 1, (pa >>  8) & 0xff);
  650         dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
  651         dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
  652 
  653         if (eatacmd == CP_IMMEDIATE)
  654                 dpt_outb(sc, HA_ICMD, icmd);
  655 
  656         dpt_outb(sc, HA_COMMAND, eatacmd);
  657 
  658         splx(s);
  659         return (0);
  660 }
  661 
  662 /*
  663  * Wait for the HBA status register to reach a specific state.
  664  */
  665 static int
  666 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
  667 {
  668 
  669         for (ms *= 10; ms != 0; ms--) {
  670                 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
  671                         return (0);
  672                 DELAY(100);
  673         }
  674 
  675         return (-1);
  676 }
  677 
  678 /*
  679  * Spin waiting for a command to finish.  The timeout value from the CCB is
  680  * used.  The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
  681  * recycled before we get a look at it.
  682  */
  683 static int
  684 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
  685 {
  686         int i, s;
  687 
  688 #ifdef DEBUG
  689         if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
  690                 panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
  691 #endif
  692 
  693         s = splbio();
  694 
  695         if ((ccb->ccb_flg & CCB_INTR) != 0) {
  696                 splx(s);
  697                 return (0);
  698         }
  699 
  700         for (i = ccb->ccb_timeout * 20; i != 0; i--) {
  701                 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
  702                         dpt_intr(sc);
  703                 if ((ccb->ccb_flg & CCB_INTR) != 0)
  704                         break;
  705                 DELAY(50);
  706         }
  707 
  708         splx(s);
  709         return (i == 0);
  710 }
  711 
  712 /*
  713  * We have a command which has been processed by the HBA, so now we look to
  714  * see how the operation went.  CCBs marked CCB_PRIVATE are not passed here
  715  * by dpt_intr().
  716  */
  717 static void
  718 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
  719 {
  720         struct scsipi_xfer *xs;
  721 
  722         xs = ccb->ccb_xs;
  723 
  724         SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
  725 
  726         /*
  727          * If we were a data transfer, unload the map that described the
  728          * data buffer.
  729          */
  730         if (xs->datalen != 0)
  731                 dpt_ccb_unmap(sc, ccb);
  732 
  733         if (xs->error == XS_NOERROR) {
  734                 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
  735                         switch (ccb->ccb_hba_status) {
  736                         case SP_HBA_ERROR_SEL_TO:
  737                                 xs->error = XS_SELTIMEOUT;
  738                                 break;
  739                         case SP_HBA_ERROR_RESET:
  740                                 xs->error = XS_RESET;
  741                                 break;
  742                         default:
  743                                 printf("%s: HBA status %x\n",
  744                                     device_xname(&sc->sc_dv), ccb->ccb_hba_status);
  745                                 xs->error = XS_DRIVER_STUFFUP;
  746                                 break;
  747                         }
  748                 } else if (ccb->ccb_scsi_status != SCSI_OK) {
  749                         switch (ccb->ccb_scsi_status) {
  750                         case SCSI_CHECK:
  751                                 memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
  752                                     sizeof(xs->sense.scsi_sense));
  753                                 xs->error = XS_SENSE;
  754                                 break;
  755                         case SCSI_BUSY:
  756                         case SCSI_QUEUE_FULL:
  757                                 xs->error = XS_BUSY;
  758                                 break;
  759                         default:
  760                                 scsipi_printaddr(xs->xs_periph);
  761                                 printf("SCSI status %x\n",
  762                                     ccb->ccb_scsi_status);
  763                                 xs->error = XS_DRIVER_STUFFUP;
  764                                 break;
  765                         }
  766                 } else
  767                         xs->resid = 0;
  768 
  769                 xs->status = ccb->ccb_scsi_status;
  770         }
  771 
  772         /* Free up the CCB and mark the command as done. */
  773         dpt_ccb_free(sc, ccb);
  774         scsipi_done(xs);
  775 }
  776 
  777 /*
  778  * Specified CCB has timed out, abort it.
  779  */
  780 static void
  781 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
  782 {
  783         struct scsipi_periph *periph;
  784         struct scsipi_xfer *xs;
  785         int s;
  786 
  787         xs = ccb->ccb_xs;
  788         periph = xs->xs_periph;
  789 
  790         scsipi_printaddr(periph);
  791         printf("timed out (status:%02x aux status:%02x)",
  792             dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
  793 
  794         s = splbio();
  795 
  796         if ((ccb->ccb_flg & CCB_ABORT) != 0) {
  797                 /* Abort timed out, reset the HBA */
  798                 printf(" AGAIN, resetting HBA\n");
  799                 dpt_outb(sc, HA_COMMAND, CP_RESET);
  800                 DELAY(750000);
  801         } else {
  802                 /* Abort the operation that has timed out */
  803                 printf("\n");
  804                 xs->error = XS_TIMEOUT;
  805                 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
  806                 ccb->ccb_flg |= CCB_ABORT;
  807                 /* Start the abort */
  808                 if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
  809                         aprint_error_dev(&sc->sc_dv, "dpt_cmd failed\n");
  810         }
  811 
  812         splx(s);
  813 }
  814 
  815 /*
  816  * Map a data transfer.
  817  */
  818 static int
  819 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
  820 {
  821         struct scsipi_xfer *xs;
  822         bus_dmamap_t xfer;
  823         bus_dma_segment_t *ds;
  824         struct eata_sg *sg;
  825         struct eata_cp *cp;
  826         int rv, i;
  827 
  828         xs = ccb->ccb_xs;
  829         xfer = ccb->ccb_dmamap_xfer;
  830         cp = &ccb->ccb_eata_cp;
  831 
  832         rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
  833             ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
  834             BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
  835             ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
  836 
  837         switch (rv) {
  838         case 0:
  839                 break;
  840         case ENOMEM:
  841         case EAGAIN:
  842                 xs->error = XS_RESOURCE_SHORTAGE;
  843                 break;
  844         default:
  845                 xs->error = XS_DRIVER_STUFFUP;
  846                 printf("%s: error %d loading map\n", device_xname(&sc->sc_dv), rv);
  847                 break;
  848         }
  849 
  850         if (xs->error != XS_NOERROR) {
  851                 dpt_ccb_free(sc, ccb);
  852                 scsipi_done(xs);
  853                 return (-1);
  854         }
  855 
  856         bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
  857             (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
  858             BUS_DMASYNC_PREWRITE);
  859 
  860         /* Don't bother using scatter/gather for just 1 seg */
  861         if (xfer->dm_nsegs == 1) {
  862                 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
  863                 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
  864         } else {
  865                 /*
  866                  * Load the hardware scatter/gather map with
  867                  * the contents of the DMA map.
  868                  */
  869                 sg = ccb->ccb_sg;
  870                 ds = xfer->dm_segs;
  871                 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
  872                         sg->sg_addr = htobe32(ds->ds_addr);
  873                         sg->sg_len =  htobe32(ds->ds_len);
  874                 }
  875                 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
  876                     sc->sc_dmamap->dm_segs[0].ds_addr +
  877                     offsetof(struct dpt_ccb, ccb_sg));
  878                 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
  879                 cp->cp_ctl0 |= CP_C0_SCATTER;
  880         }
  881 
  882         return (0);
  883 }
  884 
  885 /*
  886  * Unmap a transfer.
  887  */
  888 static void
  889 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
  890 {
  891 
  892         bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
  893             ccb->ccb_dmamap_xfer->dm_mapsize,
  894             (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
  895             BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
  896         bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
  897 }
  898 
  899 /*
  900  * Adjust the size of each I/O before it passes to the SCSI layer.
  901  */
  902 static void
  903 dpt_minphys(struct buf *bp)
  904 {
  905 
  906         if (bp->b_bcount > DPT_MAX_XFER)
  907                 bp->b_bcount = DPT_MAX_XFER;
  908         minphys(bp);
  909 }
  910 
  911 /*
  912  * Start a SCSI command.
  913  */
  914 static void
  915 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
  916                    void *arg)
  917 {
  918         struct dpt_softc *sc;
  919         struct scsipi_xfer *xs;
  920         int flags;
  921         struct scsipi_periph *periph;
  922         struct dpt_ccb *ccb;
  923         struct eata_cp *cp;
  924 
  925         sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev;
  926 
  927         switch (req) {
  928         case ADAPTER_REQ_RUN_XFER:
  929                 xs = arg;
  930                 periph = xs->xs_periph;
  931                 flags = xs->xs_control;
  932 
  933 #ifdef DIAGNOSTIC
  934                 /* Cmds must be no more than 12 bytes for us. */
  935                 if (xs->cmdlen > 12) {
  936                         xs->error = XS_DRIVER_STUFFUP;
  937                         scsipi_done(xs);
  938                         break;
  939                 }
  940 #endif
  941                 /*
  942                  * XXX We can't reset devices just yet.  Apparently some
  943                  * older firmware revisions don't even support it.
  944                  */
  945                 if ((flags & XS_CTL_RESET) != 0) {
  946                         xs->error = XS_DRIVER_STUFFUP;
  947                         scsipi_done(xs);
  948                         break;
  949                 }
  950 
  951                 /*
  952                  * Get a CCB and fill it.
  953                  */
  954                 ccb = dpt_ccb_alloc(sc);
  955                 ccb->ccb_xs = xs;
  956                 ccb->ccb_timeout = xs->timeout;
  957 
  958                 cp = &ccb->ccb_eata_cp;
  959                 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
  960                 cp->cp_ccbid = ccb->ccb_id;
  961                 cp->cp_senselen = sizeof(ccb->ccb_sense);
  962                 cp->cp_stataddr = htobe32(sc->sc_stppa);
  963                 cp->cp_ctl0 = CP_C0_AUTO_SENSE;
  964                 cp->cp_ctl1 = 0;
  965                 cp->cp_ctl2 = 0;
  966                 cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
  967                 cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
  968                 cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
  969                 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
  970 
  971                 if ((flags & XS_CTL_DATA_IN) != 0)
  972                         cp->cp_ctl0 |= CP_C0_DATA_IN;
  973                 if ((flags & XS_CTL_DATA_OUT) != 0)
  974                         cp->cp_ctl0 |= CP_C0_DATA_OUT;
  975                 if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
  976                         cp->cp_ctl0 |= CP_C0_INTERPRET;
  977 
  978                 /* Synchronous xfers musn't write-back through the cache. */
  979                 if (xs->bp != NULL)
  980                         if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
  981                                 cp->cp_ctl2 |= CP_C2_NO_CACHE;
  982 
  983                 cp->cp_senseaddr =
  984                     htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
  985                     CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
  986 
  987                 if (xs->datalen != 0) {
  988                         if (dpt_ccb_map(sc, ccb))
  989                                 break;
  990                 } else {
  991                         cp->cp_dataaddr = 0;
  992                         cp->cp_datalen = 0;
  993                 }
  994 
  995                 /* Sync up CCB and status packet. */
  996                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
  997                     CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
  998                     BUS_DMASYNC_PREWRITE);
  999                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
 1000                     sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
 1001 
 1002                 /*
 1003                  * Start the command.
 1004                  */
 1005                 if ((xs->xs_control & XS_CTL_POLL) != 0)
 1006                         ccb->ccb_flg |= CCB_PRIVATE;
 1007 
 1008                 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
 1009                         aprint_error_dev(&sc->sc_dv, "dpt_cmd failed\n");
 1010                         xs->error = XS_DRIVER_STUFFUP;
 1011                         if (xs->datalen != 0)
 1012                                 dpt_ccb_unmap(sc, ccb);
 1013                         dpt_ccb_free(sc, ccb);
 1014                         break;
 1015                 }
 1016 
 1017                 if ((xs->xs_control & XS_CTL_POLL) == 0)
 1018                         break;
 1019 
 1020                 if (dpt_ccb_poll(sc, ccb)) {
 1021                         dpt_ccb_abort(sc, ccb);
 1022                         /* Wait for abort to complete... */
 1023                         if (dpt_ccb_poll(sc, ccb))
 1024                                 dpt_ccb_abort(sc, ccb);
 1025                 }
 1026 
 1027                 dpt_ccb_done(sc, ccb);
 1028                 break;
 1029 
 1030         case ADAPTER_REQ_GROW_RESOURCES:
 1031                 /*
 1032                  * Not supported, since we allocate the maximum number of
 1033                  * CCBs up front.
 1034                  */
 1035                 break;
 1036 
 1037         case ADAPTER_REQ_SET_XFER_MODE:
 1038                 /*
 1039                  * This will be handled by the HBA itself, and we can't
 1040                  * modify that (ditto for tagged queueing).
 1041                  */
 1042                 break;
 1043         }
 1044 }
 1045 
 1046 /*
 1047  * Get inquiry data from the adapter.
 1048  */
 1049 static void
 1050 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
 1051 {
 1052         struct dpt_ccb *ccb;
 1053         struct eata_cp *cp;
 1054 
 1055         *ei = (struct eata_inquiry_data *)sc->sc_scr;
 1056 
 1057         /* Get a CCB and mark as private */
 1058         ccb = dpt_ccb_alloc(sc);
 1059         ccb->ccb_flg |= CCB_PRIVATE;
 1060         ccb->ccb_timeout = 200;
 1061 
 1062         /* Put all the arguments into the CCB. */
 1063         cp = &ccb->ccb_eata_cp;
 1064         cp->cp_ccbid = ccb->ccb_id;
 1065         cp->cp_senselen = sizeof(ccb->ccb_sense);
 1066         cp->cp_senseaddr = 0;
 1067         cp->cp_stataddr = htobe32(sc->sc_stppa);
 1068         cp->cp_dataaddr = htobe32(sc->sc_scrpa);
 1069         cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
 1070         cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
 1071         cp->cp_ctl1 = 0;
 1072         cp->cp_ctl2 = 0;
 1073         cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
 1074         cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
 1075 
 1076         /* Put together the SCSI inquiry command. */
 1077         memset(&cp->cp_cdb_cmd, 0, 12);
 1078         cp->cp_cdb_cmd = INQUIRY;
 1079         cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
 1080 
 1081         /* Sync up CCB, status packet and scratch area. */
 1082         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
 1083             sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
 1084         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
 1085             sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
 1086         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
 1087             sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
 1088 
 1089         /* Start the command and poll on completion. */
 1090         if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
 1091                 panic("%s: dpt_cmd failed", device_xname(&sc->sc_dv));
 1092 
 1093         if (dpt_ccb_poll(sc, ccb))
 1094                 panic("%s: inquiry timed out", device_xname(&sc->sc_dv));
 1095 
 1096         if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
 1097             ccb->ccb_scsi_status != SCSI_OK)
 1098                 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
 1099                     device_xname(&sc->sc_dv), ccb->ccb_hba_status,
 1100                     ccb->ccb_scsi_status);
 1101 
 1102         /* Sync up the DMA map and free CCB, returning. */
 1103         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
 1104             sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
 1105         dpt_ccb_free(sc, ccb);
 1106 }
 1107 
 1108 int
 1109 dptopen(dev_t dev, int flag, int mode, struct lwp *l)
 1110 {
 1111 
 1112         if (device_lookup(&dpt_cd, minor(dev)) == NULL)
 1113                 return (ENXIO);
 1114 
 1115         return (0);
 1116 }
 1117 
 1118 int
 1119 dptioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
 1120 {
 1121         struct dpt_softc *sc;
 1122         int rv;
 1123 
 1124         sc = device_lookup_private(&dpt_cd, minor(dev));
 1125 
 1126         switch (cmd & 0xffff) {
 1127         case DPT_SIGNATURE:
 1128                 memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig)));
 1129                 break;
 1130 
 1131         case DPT_CTRLINFO:
 1132                 dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data);
 1133                 break;
 1134 
 1135         case DPT_SYSINFO:
 1136                 dpt_sysinfo(sc, (struct dpt_sysinfo *)data);
 1137                 break;
 1138 
 1139         case DPT_BLINKLED:
 1140                 /*
 1141                  * XXX Don't know how to get this from EATA boards.  I think
 1142                  * it involves waiting for a "DPT" sequence from HA_ERROR
 1143                  * and then reading one of the HA_ICMD registers.
 1144                  */
 1145                 *(int *)data = 0;
 1146                 break;
 1147 
 1148         case DPT_EATAUSRCMD:
 1149                 rv = kauth_authorize_device_passthru(l->l_cred, dev,
 1150                     KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
 1151                 if (rv)
 1152                         return (rv);
 1153 
 1154                 if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) {
 1155                         DPRINTF(("%s: ucp %lu vs %lu bytes\n",
 1156                             device_xname(&sc->sc_dv), IOCPARM_LEN(cmd),
 1157                             (unsigned long int)sizeof(struct eata_ucp)));
 1158                         return (EINVAL);
 1159                 }
 1160 
 1161                 if (sc->sc_uactive++)
 1162                         tsleep(&sc->sc_uactive, PRIBIO, "dptslp", 0);
 1163 
 1164                 rv = dpt_passthrough(sc, (struct eata_ucp *)data, l);
 1165 
 1166                 sc->sc_uactive--;
 1167                 wakeup_one(&sc->sc_uactive);
 1168                 return (rv);
 1169 
 1170         default:
 1171                 DPRINTF(("%s: unknown ioctl %lx\n", device_xname(&sc->sc_dv), cmd));
 1172                 return (ENOTTY);
 1173         }
 1174 
 1175         return (0);
 1176 }
 1177 
 1178 void
 1179 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info)
 1180 {
 1181 
 1182         memset(info, 0, sizeof(*info));
 1183         info->id = sc->sc_hbaid[0];
 1184         info->vect = sc->sc_isairq;
 1185         info->base = sc->sc_isaport;
 1186         info->qdepth = sc->sc_nccbs;
 1187         info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg);
 1188         info->heads = 16;
 1189         info->sectors = 63;
 1190         info->do_drive32 = 1;
 1191         info->primary = 1;
 1192         info->cpLength = sizeof(struct eata_cp);
 1193         info->spLength = sizeof(struct eata_sp);
 1194         info->drqNum = sc->sc_isadrq;
 1195 }
 1196 
 1197 void
 1198 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info)
 1199 {
 1200 #ifdef i386
 1201         int i, j;
 1202 #endif
 1203 
 1204         memset(info, 0, sizeof(*info));
 1205 
 1206 #ifdef i386
 1207         outb (0x70, 0x12);
 1208         i = inb(0x71);
 1209         j = i >> 4;
 1210         if (i == 0x0f) {
 1211                 outb (0x70, 0x19);
 1212                 j = inb (0x71);
 1213         }
 1214         info->drive0CMOS = j;
 1215 
 1216         j = i & 0x0f;
 1217         if (i == 0x0f) {
 1218                 outb (0x70, 0x1a);
 1219                 j = inb (0x71);
 1220         }
 1221         info->drive1CMOS = j;
 1222         info->processorFamily = dpt_sig.dsProcessorFamily;
 1223 
 1224         /*
 1225          * Get the conventional memory size from CMOS.
 1226          */
 1227         outb(0x70, 0x16);
 1228         j = inb(0x71);
 1229         j <<= 8;
 1230         outb(0x70, 0x15);
 1231         j |= inb(0x71);
 1232         info->conventionalMemSize = j;
 1233 
 1234         /*
 1235          * Get the extended memory size from CMOS.
 1236          */
 1237         outb(0x70, 0x31);
 1238         j = inb(0x71);
 1239         j <<= 8;
 1240         outb(0x70, 0x30);
 1241         j |= inb(0x71);
 1242         info->extendedMemSize = j;
 1243 
 1244         switch (cpu_class) {
 1245         case CPUCLASS_386:
 1246                 info->processorType = PROC_386;
 1247                 break;
 1248         case CPUCLASS_486:
 1249                 info->processorType = PROC_486;
 1250                 break;
 1251         case CPUCLASS_586:
 1252                 info->processorType = PROC_PENTIUM;
 1253                 break;
 1254         case CPUCLASS_686:
 1255         default:
 1256                 info->processorType = PROC_SEXIUM;
 1257                 break;
 1258         }
 1259 
 1260         info->flags = SI_CMOS_Valid | SI_BusTypeValid |
 1261             SI_MemorySizeValid | SI_NO_SmartROM;
 1262 #else
 1263         info->flags = SI_BusTypeValid | SI_NO_SmartROM;
 1264 #endif
 1265 
 1266         info->busType = sc->sc_bustype;
 1267 }
 1268 
 1269 int
 1270 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct lwp *l)
 1271 {
 1272         struct dpt_ccb *ccb;
 1273         struct eata_sp sp;
 1274         struct eata_cp *cp;
 1275         struct eata_sg *sg;
 1276         bus_dmamap_t xfer = 0; /* XXX: gcc */
 1277         bus_dma_segment_t *ds;
 1278         int datain = 0, s, rv = 0, i, uslen; /* XXX: gcc */
 1279 
 1280         /*
 1281          * Get a CCB and fill.
 1282          */
 1283         ccb = dpt_ccb_alloc(sc);
 1284         ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT;
 1285         ccb->ccb_timeout = 0;
 1286         ccb->ccb_savesp = &sp;
 1287 
 1288         cp = &ccb->ccb_eata_cp;
 1289         memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp));
 1290         uslen = cp->cp_senselen;
 1291         cp->cp_ccbid = ccb->ccb_id;
 1292         cp->cp_senselen = sizeof(ccb->ccb_sense);
 1293         cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
 1294             CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
 1295         cp->cp_stataddr = htobe32(sc->sc_stppa);
 1296 
 1297         /*
 1298          * Map data transfers.
 1299          */
 1300         if (ucp->ucp_dataaddr && ucp->ucp_datalen) {
 1301                 xfer = ccb->ccb_dmamap_xfer;
 1302                 datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0);
 1303 
 1304                 if (ucp->ucp_datalen > DPT_MAX_XFER) {
 1305                         DPRINTF(("%s: xfer too big\n", device_xname(&sc->sc_dv)));
 1306                         dpt_ccb_free(sc, ccb);
 1307                         return (EFBIG);
 1308                 }
 1309                 rv = bus_dmamap_load(sc->sc_dmat, xfer,
 1310                     ucp->ucp_dataaddr, ucp->ucp_datalen, l->l_proc,
 1311                     BUS_DMA_WAITOK | BUS_DMA_STREAMING |
 1312                     (datain ? BUS_DMA_READ : BUS_DMA_WRITE));
 1313                 if (rv != 0) {
 1314                         DPRINTF(("%s: map failed; %d\n", device_xname(&sc->sc_dv),
 1315                             rv));
 1316                         dpt_ccb_free(sc, ccb);
 1317                         return (rv);
 1318                 }
 1319 
 1320                 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
 1321                     (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
 1322 
 1323                 sg = ccb->ccb_sg;
 1324                 ds = xfer->dm_segs;
 1325                 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
 1326                         sg->sg_addr = htobe32(ds->ds_addr);
 1327                         sg->sg_len = htobe32(ds->ds_len);
 1328                 }
 1329                 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
 1330                     sc->sc_dmamap->dm_segs[0].ds_addr +
 1331                     offsetof(struct dpt_ccb, ccb_sg));
 1332                 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
 1333                 cp->cp_ctl0 |= CP_C0_SCATTER;
 1334         } else {
 1335                 cp->cp_dataaddr = 0;
 1336                 cp->cp_datalen = 0;
 1337         }
 1338 
 1339         /*
 1340          * Start the command and sleep on completion.
 1341          */
 1342         uvm_lwp_hold(curlwp);
 1343         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
 1344             sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
 1345         s = splbio();
 1346         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
 1347             sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
 1348         if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
 1349                 panic("%s: dpt_cmd failed", device_xname(&sc->sc_dv));
 1350         tsleep(ccb, PWAIT, "dptucmd", 0);
 1351         splx(s);
 1352         uvm_lwp_rele(curlwp);
 1353 
 1354         /*
 1355          * Sync up the DMA map and copy out results.
 1356          */
 1357         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
 1358             sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
 1359 
 1360         if (cp->cp_datalen != 0) {
 1361                 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
 1362                     (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
 1363                 bus_dmamap_unload(sc->sc_dmat, xfer);
 1364         }
 1365 
 1366         if (ucp->ucp_stataddr != NULL) {
 1367                 rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp));
 1368                 if (rv != 0) {
 1369                         DPRINTF(("%s: sp copyout() failed\n",
 1370                             device_xname(&sc->sc_dv)));
 1371                 }
 1372         }
 1373         if (rv == 0 && ucp->ucp_senseaddr != NULL) {
 1374                 i = min(uslen, sizeof(ccb->ccb_sense));
 1375                 rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i);
 1376                 if (rv != 0) {
 1377                         DPRINTF(("%s: sense copyout() failed\n",
 1378                             device_xname(&sc->sc_dv)));
 1379                 }
 1380         }
 1381 
 1382         ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status;
 1383         ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status;
 1384         dpt_ccb_free(sc, ccb);
 1385         return (rv);
 1386 }

Cache object: c53a7b8263caf7a00377f919a86be4f3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.