The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hptiop/hptiop.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
    5  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include <sys/param.h>
   33 #include <sys/types.h>
   34 #include <sys/cons.h>
   35 #include <sys/time.h>
   36 #include <sys/systm.h>
   37 
   38 #include <sys/stat.h>
   39 #include <sys/malloc.h>
   40 #include <sys/conf.h>
   41 #include <sys/libkern.h>
   42 #include <sys/kernel.h>
   43 
   44 #include <sys/kthread.h>
   45 #include <sys/mutex.h>
   46 #include <sys/module.h>
   47 
   48 #include <sys/eventhandler.h>
   49 #include <sys/bus.h>
   50 #include <sys/taskqueue.h>
   51 #include <sys/ioccom.h>
   52 
   53 #include <machine/resource.h>
   54 #include <machine/bus.h>
   55 #include <machine/stdarg.h>
   56 #include <sys/rman.h>
   57 
   58 #include <vm/vm.h>
   59 #include <vm/pmap.h>
   60 
   61 #include <dev/pci/pcireg.h>
   62 #include <dev/pci/pcivar.h>
   63 
   64 
   65 #include <cam/cam.h>
   66 #include <cam/cam_ccb.h>
   67 #include <cam/cam_sim.h>
   68 #include <cam/cam_xpt_sim.h>
   69 #include <cam/cam_debug.h>
   70 #include <cam/cam_periph.h>
   71 #include <cam/scsi/scsi_all.h>
   72 #include <cam/scsi/scsi_message.h>
   73 
   74 
   75 #include <dev/hptiop/hptiop.h>
   76 
   77 static const char driver_name[] = "hptiop";
   78 static const char driver_version[] = "v1.9";
   79 
   80 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
   81                                 u_int32_t msg, u_int32_t millisec);
   82 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
   83                                                         u_int32_t req);
   84 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
   85 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
   86                                                         u_int32_t req);
   87 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
   88 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
   89                                 struct hpt_iop_ioctl_param *pParams);
   90 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
   91                                 struct hpt_iop_ioctl_param *pParams);
   92 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
   93                                 struct hpt_iop_ioctl_param *pParams);
   94 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
   95 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
   96 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
   97 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
   98 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
   99                                 struct hpt_iop_request_get_config *config);
  100 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
  101                                 struct hpt_iop_request_get_config *config);
  102 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
  103                                 struct hpt_iop_request_get_config *config);
  104 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
  105                                 struct hpt_iop_request_set_config *config);
  106 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
  107                                 struct hpt_iop_request_set_config *config);
  108 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
  109                                 struct hpt_iop_request_set_config *config);
  110 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
  111 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
  112 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
  113 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
  114 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
  115 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
  116                         u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
  117 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
  118                                 struct hpt_iop_request_ioctl_command *req,
  119                                 struct hpt_iop_ioctl_param *pParams);
  120 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
  121                                 struct hpt_iop_request_ioctl_command *req,
  122                                 struct hpt_iop_ioctl_param *pParams);
  123 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
  124                                 struct hpt_iop_srb *srb,
  125                                 bus_dma_segment_t *segs, int nsegs);
  126 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
  127                                 struct hpt_iop_srb *srb,
  128                                 bus_dma_segment_t *segs, int nsegs);
  129 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
  130                                 struct hpt_iop_srb *srb,
  131                                 bus_dma_segment_t *segs, int nsegs);
  132 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
  133 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
  134 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
  135 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
  136 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
  137 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
  138 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
  139 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
  140 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
  141 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
  142 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
  143 static int  hptiop_probe(device_t dev);
  144 static int  hptiop_attach(device_t dev);
  145 static int  hptiop_detach(device_t dev);
  146 static int  hptiop_shutdown(device_t dev);
  147 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
  148 static void hptiop_poll(struct cam_sim *sim);
  149 static void hptiop_async(void *callback_arg, u_int32_t code,
  150                                         struct cam_path *path, void *arg);
  151 static void hptiop_pci_intr(void *arg);
  152 static void hptiop_release_resource(struct hpt_iop_hba *hba);
  153 static void hptiop_reset_adapter(void *argv);
  154 static d_open_t hptiop_open;
  155 static d_close_t hptiop_close;
  156 static d_ioctl_t hptiop_ioctl;
  157 
  158 static struct cdevsw hptiop_cdevsw = {
  159         .d_open = hptiop_open,
  160         .d_close = hptiop_close,
  161         .d_ioctl = hptiop_ioctl,
  162         .d_name = driver_name,
  163         .d_version = D_VERSION,
  164 };
  165 
  166 #define hba_from_dev(dev) \
  167         ((struct hpt_iop_hba *)((dev)->si_drv1))
  168 
  169 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
  170                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
  171 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
  172                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
  173 
  174 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
  175                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
  176 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
  177                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
  178 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
  179                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
  180 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
  181                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
  182 
  183 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
  184                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
  185 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
  186                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
  187 
  188 static int hptiop_open(ioctl_dev_t dev, int flags,
  189                                         int devtype, ioctl_thread_t proc)
  190 {
  191         struct hpt_iop_hba *hba = hba_from_dev(dev);
  192 
  193         if (hba==NULL)
  194                 return ENXIO;
  195         if (hba->flag & HPT_IOCTL_FLAG_OPEN)
  196                 return EBUSY;
  197         hba->flag |= HPT_IOCTL_FLAG_OPEN;
  198         return 0;
  199 }
  200 
  201 static int hptiop_close(ioctl_dev_t dev, int flags,
  202                                         int devtype, ioctl_thread_t proc)
  203 {
  204         struct hpt_iop_hba *hba = hba_from_dev(dev);
  205         hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
  206         return 0;
  207 }
  208 
  209 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
  210                                         int flags, ioctl_thread_t proc)
  211 {
  212         int ret = EFAULT;
  213         struct hpt_iop_hba *hba = hba_from_dev(dev);
  214 
  215         switch (cmd) {
  216         case HPT_DO_IOCONTROL:
  217                 ret = hba->ops->do_ioctl(hba,
  218                                 (struct hpt_iop_ioctl_param *)data);
  219                 break;
  220         case HPT_SCAN_BUS:
  221                 ret = hptiop_rescan_bus(hba);
  222                 break;
  223         }
  224         return ret;
  225 }
  226 
  227 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
  228 {
  229         u_int64_t p;
  230         u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
  231         u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
  232 
  233         if (outbound_tail != outbound_head) {
  234                 bus_space_read_region_4(hba->bar2t, hba->bar2h,
  235                         offsetof(struct hpt_iopmu_mv,
  236                                 outbound_q[outbound_tail]),
  237                         (u_int32_t *)&p, 2);
  238 
  239                 outbound_tail++;
  240 
  241                 if (outbound_tail == MVIOP_QUEUE_LEN)
  242                         outbound_tail = 0;
  243 
  244                 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
  245                 return p;
  246         } else
  247                 return 0;
  248 }
  249 
  250 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
  251 {
  252         u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
  253         u_int32_t head = inbound_head + 1;
  254 
  255         if (head == MVIOP_QUEUE_LEN)
  256                 head = 0;
  257 
  258         bus_space_write_region_4(hba->bar2t, hba->bar2h,
  259                         offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
  260                         (u_int32_t *)&p, 2);
  261         BUS_SPACE_WRT4_MV2(inbound_head, head);
  262         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
  263 }
  264 
  265 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
  266 {
  267         BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
  268         BUS_SPACE_RD4_ITL(outbound_intstatus);
  269 }
  270 
  271 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
  272 {
  273 
  274         BUS_SPACE_WRT4_MV2(inbound_msg, msg);
  275         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
  276 
  277         BUS_SPACE_RD4_MV0(outbound_intmask);
  278 }
  279 
  280 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
  281 {
  282         BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
  283         BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
  284 }
  285 
  286 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
  287 {
  288         u_int32_t req=0;
  289         int i;
  290 
  291         for (i = 0; i < millisec; i++) {
  292                 req = BUS_SPACE_RD4_ITL(inbound_queue);
  293                 if (req != IOPMU_QUEUE_EMPTY)
  294                         break;
  295                 DELAY(1000);
  296         }
  297 
  298         if (req!=IOPMU_QUEUE_EMPTY) {
  299                 BUS_SPACE_WRT4_ITL(outbound_queue, req);
  300                 BUS_SPACE_RD4_ITL(outbound_intstatus);
  301                 return 0;
  302         }
  303 
  304         return -1;
  305 }
  306 
  307 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
  308 {
  309         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
  310                 return -1;
  311 
  312         return 0;
  313 }
  314 
  315 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
  316                                                         u_int32_t millisec)
  317 {
  318         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
  319                 return -1;
  320 
  321         return 0;
  322 }
  323 
  324 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
  325                                                         u_int32_t index)
  326 {
  327         struct hpt_iop_srb *srb;
  328         struct hpt_iop_request_scsi_command *req=NULL;
  329         union ccb *ccb;
  330         u_int8_t *cdb;
  331         u_int32_t result, temp, dxfer;
  332         u_int64_t temp64;
  333 
  334         if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
  335                 if (hba->firmware_version > 0x01020000 ||
  336                         hba->interface_version > 0x01020000) {
  337                         srb = hba->srb[index & ~(u_int32_t)
  338                                 (IOPMU_QUEUE_ADDR_HOST_BIT
  339                                 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
  340                         req = (struct hpt_iop_request_scsi_command *)srb;
  341                         if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
  342                                 result = IOP_RESULT_SUCCESS;
  343                         else
  344                                 result = req->header.result;
  345                 } else {
  346                         srb = hba->srb[index &
  347                                 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
  348                         req = (struct hpt_iop_request_scsi_command *)srb;
  349                         result = req->header.result;
  350                 }
  351                 dxfer = req->dataxfer_length;
  352                 goto srb_complete;
  353         }
  354 
  355         /*iop req*/
  356         temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
  357                 offsetof(struct hpt_iop_request_header, type));
  358         result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
  359                 offsetof(struct hpt_iop_request_header, result));
  360         switch(temp) {
  361         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
  362         {
  363                 temp64 = 0;
  364                 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
  365                         offsetof(struct hpt_iop_request_header, context),
  366                         (u_int32_t *)&temp64, 2);
  367                 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
  368                 break;
  369         }
  370 
  371         case IOP_REQUEST_TYPE_SCSI_COMMAND:
  372                 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
  373                         offsetof(struct hpt_iop_request_header, context),
  374                         (u_int32_t *)&temp64, 2);
  375                 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
  376                 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, 
  377                                 index + offsetof(struct hpt_iop_request_scsi_command,
  378                                 dataxfer_length));      
  379 srb_complete:
  380                 ccb = (union ccb *)srb->ccb;
  381                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
  382                         cdb = ccb->csio.cdb_io.cdb_ptr;
  383                 else
  384                         cdb = ccb->csio.cdb_io.cdb_bytes;
  385 
  386                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
  387                         ccb->ccb_h.status = CAM_REQ_CMP;
  388                         goto scsi_done;
  389                 }
  390 
  391                 switch (result) {
  392                 case IOP_RESULT_SUCCESS:
  393                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
  394                         case CAM_DIR_IN:
  395                                 bus_dmamap_sync(hba->io_dmat,
  396                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
  397                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  398                                 break;
  399                         case CAM_DIR_OUT:
  400                                 bus_dmamap_sync(hba->io_dmat,
  401                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
  402                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  403                                 break;
  404                         }
  405 
  406                         ccb->ccb_h.status = CAM_REQ_CMP;
  407                         break;
  408 
  409                 case IOP_RESULT_BAD_TARGET:
  410                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
  411                         break;
  412                 case IOP_RESULT_BUSY:
  413                         ccb->ccb_h.status = CAM_BUSY;
  414                         break;
  415                 case IOP_RESULT_INVALID_REQUEST:
  416                         ccb->ccb_h.status = CAM_REQ_INVALID;
  417                         break;
  418                 case IOP_RESULT_FAIL:
  419                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  420                         break;
  421                 case IOP_RESULT_RESET:
  422                         ccb->ccb_h.status = CAM_BUSY;
  423                         break;
  424                 case IOP_RESULT_CHECK_CONDITION:
  425                         memset(&ccb->csio.sense_data, 0,
  426                             sizeof(ccb->csio.sense_data));
  427                         if (dxfer < ccb->csio.sense_len)
  428                                 ccb->csio.sense_resid = ccb->csio.sense_len -
  429                                     dxfer;
  430                         else
  431                                 ccb->csio.sense_resid = 0;
  432                         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
  433                                 bus_space_read_region_1(hba->bar0t, hba->bar0h,
  434                                         index + offsetof(struct hpt_iop_request_scsi_command,
  435                                         sg_list), (u_int8_t *)&ccb->csio.sense_data, 
  436                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
  437                         } else {
  438                                 memcpy(&ccb->csio.sense_data, &req->sg_list, 
  439                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
  440                         }
  441                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  442                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
  443                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
  444                         break;
  445                 default:
  446                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  447                         break;
  448                 }
  449 scsi_done:
  450                 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
  451                         BUS_SPACE_WRT4_ITL(outbound_queue, index);
  452 
  453                 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
  454 
  455                 hptiop_free_srb(hba, srb);
  456                 xpt_done(ccb);
  457                 break;
  458         }
  459 }
  460 
  461 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
  462 {
  463         u_int32_t req, temp;
  464 
  465         while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
  466                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
  467                         hptiop_request_callback_itl(hba, req);
  468                 else {
  469                         temp = bus_space_read_4(hba->bar0t,
  470                                         hba->bar0h,req +
  471                                         offsetof(struct hpt_iop_request_header,
  472                                                 flags));
  473                         if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
  474                                 u_int64_t temp64;
  475                                 bus_space_read_region_4(hba->bar0t,
  476                                         hba->bar0h,req +
  477                                         offsetof(struct hpt_iop_request_header,
  478                                                 context),
  479                                         (u_int32_t *)&temp64, 2);
  480                                 if (temp64) {
  481                                         hptiop_request_callback_itl(hba, req);
  482                                 } else {
  483                                         temp64 = 1;
  484                                         bus_space_write_region_4(hba->bar0t,
  485                                                 hba->bar0h,req +
  486                                                 offsetof(struct hpt_iop_request_header,
  487                                                         context),
  488                                                 (u_int32_t *)&temp64, 2);
  489                                 }
  490                         } else
  491                                 hptiop_request_callback_itl(hba, req);
  492                 }
  493         }
  494 }
  495 
  496 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
  497 {
  498         u_int32_t status;
  499         int ret = 0;
  500 
  501         status = BUS_SPACE_RD4_ITL(outbound_intstatus);
  502 
  503         if (status & IOPMU_OUTBOUND_INT_MSG0) {
  504                 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
  505                 KdPrint(("hptiop: received outbound msg %x\n", msg));
  506                 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
  507                 hptiop_os_message_callback(hba, msg);
  508                 ret = 1;
  509         }
  510 
  511         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
  512                 hptiop_drain_outbound_queue_itl(hba);
  513                 ret = 1;
  514         }
  515 
  516         return ret;
  517 }
  518 
  519 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
  520                                                         u_int64_t _tag)
  521 {
  522         u_int32_t context = (u_int32_t)_tag;
  523 
  524         if (context & MVIOP_CMD_TYPE_SCSI) {
  525                 struct hpt_iop_srb *srb;
  526                 struct hpt_iop_request_scsi_command *req;
  527                 union ccb *ccb;
  528                 u_int8_t *cdb;
  529 
  530                 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
  531                 req = (struct hpt_iop_request_scsi_command *)srb;
  532                 ccb = (union ccb *)srb->ccb;
  533                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
  534                         cdb = ccb->csio.cdb_io.cdb_ptr;
  535                 else
  536                         cdb = ccb->csio.cdb_io.cdb_bytes;
  537 
  538                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
  539                         ccb->ccb_h.status = CAM_REQ_CMP;
  540                         goto scsi_done;
  541                 }
  542                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
  543                         req->header.result = IOP_RESULT_SUCCESS;
  544 
  545                 switch (req->header.result) {
  546                 case IOP_RESULT_SUCCESS:
  547                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
  548                         case CAM_DIR_IN:
  549                                 bus_dmamap_sync(hba->io_dmat,
  550                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
  551                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  552                                 break;
  553                         case CAM_DIR_OUT:
  554                                 bus_dmamap_sync(hba->io_dmat,
  555                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
  556                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  557                                 break;
  558                         }
  559                         ccb->ccb_h.status = CAM_REQ_CMP;
  560                         break;
  561                 case IOP_RESULT_BAD_TARGET:
  562                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
  563                         break;
  564                 case IOP_RESULT_BUSY:
  565                         ccb->ccb_h.status = CAM_BUSY;
  566                         break;
  567                 case IOP_RESULT_INVALID_REQUEST:
  568                         ccb->ccb_h.status = CAM_REQ_INVALID;
  569                         break;
  570                 case IOP_RESULT_FAIL:
  571                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  572                         break;
  573                 case IOP_RESULT_RESET:
  574                         ccb->ccb_h.status = CAM_BUSY;
  575                         break;
  576                 case IOP_RESULT_CHECK_CONDITION:
  577                         memset(&ccb->csio.sense_data, 0,
  578                             sizeof(ccb->csio.sense_data));
  579                         if (req->dataxfer_length < ccb->csio.sense_len)
  580                                 ccb->csio.sense_resid = ccb->csio.sense_len -
  581                                     req->dataxfer_length;
  582                         else
  583                                 ccb->csio.sense_resid = 0;
  584                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
  585                                 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
  586                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  587                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
  588                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
  589                         break;
  590                 default:
  591                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  592                         break;
  593                 }
  594 scsi_done:
  595                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
  596                 
  597                 hptiop_free_srb(hba, srb);
  598                 xpt_done(ccb);
  599         } else if (context & MVIOP_CMD_TYPE_IOCTL) {
  600                 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
  601                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
  602                         hba->config_done = 1;
  603                 else
  604                         hba->config_done = -1;
  605                 wakeup(req);
  606         } else if (context &
  607                         (MVIOP_CMD_TYPE_SET_CONFIG |
  608                                 MVIOP_CMD_TYPE_GET_CONFIG))
  609                 hba->config_done = 1;
  610         else {
  611                 device_printf(hba->pcidev, "wrong callback type\n");
  612         }
  613 }
  614 
  615 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
  616                                 u_int32_t _tag)
  617 {
  618         u_int32_t req_type = _tag & 0xf;
  619 
  620         struct hpt_iop_srb *srb;
  621         struct hpt_iop_request_scsi_command *req;
  622         union ccb *ccb;
  623         u_int8_t *cdb;
  624 
  625         switch (req_type) {
  626         case IOP_REQUEST_TYPE_GET_CONFIG:
  627         case IOP_REQUEST_TYPE_SET_CONFIG:
  628                 hba->config_done = 1;
  629                 break;
  630 
  631         case IOP_REQUEST_TYPE_SCSI_COMMAND:
  632                 srb = hba->srb[(_tag >> 4) & 0xff];
  633                 req = (struct hpt_iop_request_scsi_command *)srb;
  634 
  635                 ccb = (union ccb *)srb->ccb;
  636 
  637                 callout_stop(&srb->timeout);
  638 
  639                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
  640                         cdb = ccb->csio.cdb_io.cdb_ptr;
  641                 else
  642                         cdb = ccb->csio.cdb_io.cdb_bytes;
  643 
  644                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
  645                         ccb->ccb_h.status = CAM_REQ_CMP;
  646                         goto scsi_done;
  647                 }
  648 
  649                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
  650                         req->header.result = IOP_RESULT_SUCCESS;
  651 
  652                 switch (req->header.result) {
  653                 case IOP_RESULT_SUCCESS:
  654                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
  655                         case CAM_DIR_IN:
  656                                 bus_dmamap_sync(hba->io_dmat,
  657                                                 srb->dma_map, BUS_DMASYNC_POSTREAD);
  658                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  659                                 break;
  660                         case CAM_DIR_OUT:
  661                                 bus_dmamap_sync(hba->io_dmat,
  662                                                 srb->dma_map, BUS_DMASYNC_POSTWRITE);
  663                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  664                                 break;
  665                         }
  666                         ccb->ccb_h.status = CAM_REQ_CMP;
  667                         break;
  668                 case IOP_RESULT_BAD_TARGET:
  669                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
  670                         break;
  671                 case IOP_RESULT_BUSY:
  672                         ccb->ccb_h.status = CAM_BUSY;
  673                         break;
  674                 case IOP_RESULT_INVALID_REQUEST:
  675                         ccb->ccb_h.status = CAM_REQ_INVALID;
  676                         break;
  677                 case IOP_RESULT_FAIL:
  678                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  679                         break;
  680                 case IOP_RESULT_RESET:
  681                         ccb->ccb_h.status = CAM_BUSY;
  682                         break;
  683                 case IOP_RESULT_CHECK_CONDITION:
  684                         memset(&ccb->csio.sense_data, 0,
  685                                sizeof(ccb->csio.sense_data));
  686                         if (req->dataxfer_length < ccb->csio.sense_len)
  687                                 ccb->csio.sense_resid = ccb->csio.sense_len -
  688                                 req->dataxfer_length;
  689                         else
  690                                 ccb->csio.sense_resid = 0;
  691                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
  692                                MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
  693                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  694                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
  695                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
  696                         break;
  697                 default:
  698                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  699                         break;
  700                 }
  701 scsi_done:
  702                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
  703                 
  704                 hptiop_free_srb(hba, srb);
  705                 xpt_done(ccb);
  706                 break;
  707         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
  708                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
  709                         hba->config_done = 1;
  710                 else
  711                         hba->config_done = -1;
  712                 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
  713                 break;
  714         default:
  715                 device_printf(hba->pcidev, "wrong callback type\n");
  716                 break;
  717         }
  718 }
  719 
  720 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
  721 {
  722         u_int64_t req;
  723 
  724         while ((req = hptiop_mv_outbound_read(hba))) {
  725                 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
  726                         if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
  727                                 hptiop_request_callback_mv(hba, req);
  728                         }
  729                 }
  730         }
  731 }
  732 
  733 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
  734 {
  735         u_int32_t status;
  736         int ret = 0;
  737 
  738         status = BUS_SPACE_RD4_MV0(outbound_doorbell);
  739 
  740         if (status)
  741                 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
  742 
  743         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
  744                 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
  745                 KdPrint(("hptiop: received outbound msg %x\n", msg));
  746                 hptiop_os_message_callback(hba, msg);
  747                 ret = 1;
  748         }
  749 
  750         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
  751                 hptiop_drain_outbound_queue_mv(hba);
  752                 ret = 1;
  753         }
  754 
  755         return ret;
  756 }
  757 
  758 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
  759 {
  760         u_int32_t status, _tag, cptr;
  761         int ret = 0;
  762 
  763         if (hba->initialized) {
  764                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
  765         }
  766 
  767         status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
  768         if (status) {
  769                 BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
  770                 if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
  771                         u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
  772                         hptiop_os_message_callback(hba, msg);
  773                 }
  774                 ret = 1;
  775         }
  776 
  777         status = BUS_SPACE_RD4_MVFREY2(isr_cause);
  778         if (status) {
  779                 BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
  780                 do {
  781                         cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
  782                         while (hba->u.mvfrey.outlist_rptr != cptr) {
  783                                 hba->u.mvfrey.outlist_rptr++;
  784                                 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
  785                                         hba->u.mvfrey.outlist_rptr = 0;
  786                                 }
  787         
  788                                 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
  789                                 hptiop_request_callback_mvfrey(hba, _tag);
  790                                 ret = 2;
  791                         }
  792                 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
  793         }
  794 
  795         if (hba->initialized) {
  796                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
  797         }
  798 
  799         return ret;
  800 }
  801 
  802 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
  803                                         u_int32_t req32, u_int32_t millisec)
  804 {
  805         u_int32_t i;
  806         u_int64_t temp64;
  807 
  808         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
  809         BUS_SPACE_RD4_ITL(outbound_intstatus);
  810 
  811         for (i = 0; i < millisec; i++) {
  812                 hptiop_intr_itl(hba);
  813                 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
  814                         offsetof(struct hpt_iop_request_header, context),
  815                         (u_int32_t *)&temp64, 2);
  816                 if (temp64)
  817                         return 0;
  818                 DELAY(1000);
  819         }
  820 
  821         return -1;
  822 }
  823 
  824 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
  825                                         void *req, u_int32_t millisec)
  826 {
  827         u_int32_t i;
  828         u_int64_t phy_addr;
  829         hba->config_done = 0;
  830 
  831         phy_addr = hba->ctlcfgcmd_phy |
  832                         (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
  833         ((struct hpt_iop_request_get_config *)req)->header.flags |=
  834                 IOP_REQUEST_FLAG_SYNC_REQUEST |
  835                 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
  836         hptiop_mv_inbound_write(phy_addr, hba);
  837         BUS_SPACE_RD4_MV0(outbound_intmask);
  838 
  839         for (i = 0; i < millisec; i++) {
  840                 hptiop_intr_mv(hba);
  841                 if (hba->config_done)
  842                         return 0;
  843                 DELAY(1000);
  844         }
  845         return -1;
  846 }
  847 
  848 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
  849                                         void *req, u_int32_t millisec)
  850 {
  851         u_int32_t i, index;
  852         u_int64_t phy_addr;
  853         struct hpt_iop_request_header *reqhdr =
  854                                                                                 (struct hpt_iop_request_header *)req;
  855         
  856         hba->config_done = 0;
  857 
  858         phy_addr = hba->ctlcfgcmd_phy;
  859         reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
  860                                         | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
  861                                         | IOP_REQUEST_FLAG_ADDR_BITS
  862                                         | ((phy_addr >> 16) & 0xffff0000);
  863         reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
  864                                         | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
  865 
  866         hba->u.mvfrey.inlist_wptr++;
  867         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
  868 
  869         if (index == hba->u.mvfrey.list_count) {
  870                 index = 0;
  871                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
  872                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
  873         }
  874 
  875         hba->u.mvfrey.inlist[index].addr = phy_addr;
  876         hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
  877 
  878         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
  879         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
  880 
  881         for (i = 0; i < millisec; i++) {
  882                 hptiop_intr_mvfrey(hba);
  883                 if (hba->config_done)
  884                         return 0;
  885                 DELAY(1000);
  886         }
  887         return -1;
  888 }
  889 
  890 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
  891                                         u_int32_t msg, u_int32_t millisec)
  892 {
  893         u_int32_t i;
  894 
  895         hba->msg_done = 0;
  896         hba->ops->post_msg(hba, msg);
  897 
  898         for (i=0; i<millisec; i++) {
  899                 hba->ops->iop_intr(hba);
  900                 if (hba->msg_done)
  901                         break;
  902                 DELAY(1000);
  903         }
  904 
  905         return hba->msg_done? 0 : -1;
  906 }
  907 
  908 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
  909                                 struct hpt_iop_request_get_config * config)
  910 {
  911         u_int32_t req32;
  912 
  913         config->header.size = sizeof(struct hpt_iop_request_get_config);
  914         config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
  915         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
  916         config->header.result = IOP_RESULT_PENDING;
  917         config->header.context = 0;
  918 
  919         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
  920         if (req32 == IOPMU_QUEUE_EMPTY)
  921                 return -1;
  922 
  923         bus_space_write_region_4(hba->bar0t, hba->bar0h,
  924                         req32, (u_int32_t *)config,
  925                         sizeof(struct hpt_iop_request_header) >> 2);
  926 
  927         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
  928                 KdPrint(("hptiop: get config send cmd failed"));
  929                 return -1;
  930         }
  931 
  932         bus_space_read_region_4(hba->bar0t, hba->bar0h,
  933                         req32, (u_int32_t *)config,
  934                         sizeof(struct hpt_iop_request_get_config) >> 2);
  935 
  936         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
  937 
  938         return 0;
  939 }
  940 
  941 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
  942                                 struct hpt_iop_request_get_config * config)
  943 {
  944         struct hpt_iop_request_get_config *req;
  945 
  946         if (!(req = hba->ctlcfg_ptr))
  947                 return -1;
  948 
  949         req->header.flags = 0;
  950         req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
  951         req->header.size = sizeof(struct hpt_iop_request_get_config);
  952         req->header.result = IOP_RESULT_PENDING;
  953         req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
  954 
  955         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
  956                 KdPrint(("hptiop: get config send cmd failed"));
  957                 return -1;
  958         }
  959 
  960         *config = *req;
  961         return 0;
  962 }
  963 
  964 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
  965                                 struct hpt_iop_request_get_config * config)
  966 {
  967         struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
  968 
  969         if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
  970             info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
  971                 KdPrint(("hptiop: header size %x/%x type %x/%x",
  972                          info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
  973                          info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
  974                 return -1;
  975         }
  976 
  977         config->interface_version = info->interface_version;
  978         config->firmware_version = info->firmware_version;
  979         config->max_requests = info->max_requests;
  980         config->request_size = info->request_size;
  981         config->max_sg_count = info->max_sg_count;
  982         config->data_transfer_length = info->data_transfer_length;
  983         config->alignment_mask = info->alignment_mask;
  984         config->max_devices = info->max_devices;
  985         config->sdram_size = info->sdram_size;
  986 
  987         KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
  988                  config->max_requests, config->request_size,
  989                  config->data_transfer_length, config->max_devices,
  990                  config->sdram_size));
  991 
  992         return 0;
  993 }
  994 
  995 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
  996                                 struct hpt_iop_request_set_config *config)
  997 {
  998         u_int32_t req32;
  999 
 1000         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
 1001 
 1002         if (req32 == IOPMU_QUEUE_EMPTY)
 1003                 return -1;
 1004 
 1005         config->header.size = sizeof(struct hpt_iop_request_set_config);
 1006         config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
 1007         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
 1008         config->header.result = IOP_RESULT_PENDING;
 1009         config->header.context = 0;
 1010 
 1011         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, 
 1012                 (u_int32_t *)config, 
 1013                 sizeof(struct hpt_iop_request_set_config) >> 2);
 1014 
 1015         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
 1016                 KdPrint(("hptiop: set config send cmd failed"));
 1017                 return -1;
 1018         }
 1019 
 1020         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
 1021 
 1022         return 0;
 1023 }
 1024 
 1025 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
 1026                                 struct hpt_iop_request_set_config *config)
 1027 {
 1028         struct hpt_iop_request_set_config *req;
 1029 
 1030         if (!(req = hba->ctlcfg_ptr))
 1031                 return -1;
 1032 
 1033         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
 1034                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
 1035                 sizeof(struct hpt_iop_request_set_config) -
 1036                         sizeof(struct hpt_iop_request_header));
 1037 
 1038         req->header.flags = 0;
 1039         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
 1040         req->header.size = sizeof(struct hpt_iop_request_set_config);
 1041         req->header.result = IOP_RESULT_PENDING;
 1042         req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
 1043 
 1044         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
 1045                 KdPrint(("hptiop: set config send cmd failed"));
 1046                 return -1;
 1047         }
 1048 
 1049         return 0;
 1050 }
 1051 
 1052 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
 1053                                 struct hpt_iop_request_set_config *config)
 1054 {
 1055         struct hpt_iop_request_set_config *req;
 1056 
 1057         if (!(req = hba->ctlcfg_ptr))
 1058                 return -1;
 1059 
 1060         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
 1061                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
 1062                 sizeof(struct hpt_iop_request_set_config) -
 1063                         sizeof(struct hpt_iop_request_header));
 1064 
 1065         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
 1066         req->header.size = sizeof(struct hpt_iop_request_set_config);
 1067         req->header.result = IOP_RESULT_PENDING;
 1068 
 1069         if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
 1070                 KdPrint(("hptiop: set config send cmd failed"));
 1071                 return -1;
 1072         }
 1073 
 1074         return 0;
 1075 }
 1076 
 1077 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
 1078                                 u_int32_t req32,
 1079                                 struct hpt_iop_ioctl_param *pParams)
 1080 {
 1081         u_int64_t temp64;
 1082         struct hpt_iop_request_ioctl_command req;
 1083 
 1084         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
 1085                         (hba->max_request_size -
 1086                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
 1087                 device_printf(hba->pcidev, "request size beyond max value");
 1088                 return -1;
 1089         }
 1090 
 1091         req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
 1092                 + pParams->nInBufferSize;
 1093         req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
 1094         req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
 1095         req.header.result = IOP_RESULT_PENDING;
 1096         req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
 1097         req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
 1098         req.inbuf_size = pParams->nInBufferSize;
 1099         req.outbuf_size = pParams->nOutBufferSize;
 1100         req.bytes_returned = 0;
 1101 
 1102         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, 
 1103                 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
 1104         
 1105         hptiop_lock_adapter(hba);
 1106 
 1107         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
 1108         BUS_SPACE_RD4_ITL(outbound_intstatus);
 1109 
 1110         bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
 1111                 offsetof(struct hpt_iop_request_ioctl_command, header.context),
 1112                 (u_int32_t *)&temp64, 2);
 1113         while (temp64) {
 1114                 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
 1115                                 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
 1116                         break;
 1117                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
 1118                 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
 1119                         offsetof(struct hpt_iop_request_ioctl_command,
 1120                                 header.context),
 1121                         (u_int32_t *)&temp64, 2);
 1122         }
 1123 
 1124         hptiop_unlock_adapter(hba);
 1125         return 0;
 1126 }
 1127 
 1128 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
 1129                                                                         void *user, int size)
 1130 {
 1131         unsigned char byte;
 1132         int i;
 1133 
 1134         for (i=0; i<size; i++) {
 1135                 if (copyin((u_int8_t *)user + i, &byte, 1))
 1136                         return -1;
 1137                 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
 1138         }
 1139 
 1140         return 0;
 1141 }
 1142 
 1143 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
 1144                                                                         void *user, int size)
 1145 {
 1146         unsigned char byte;
 1147         int i;
 1148 
 1149         for (i=0; i<size; i++) {
 1150                 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
 1151                 if (copyout(&byte, (u_int8_t *)user + i, 1))
 1152                         return -1;
 1153         }
 1154 
 1155         return 0;
 1156 }
 1157 
 1158 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
 1159                                 struct hpt_iop_ioctl_param * pParams)
 1160 {
 1161         u_int32_t req32;
 1162         u_int32_t result;
 1163 
 1164         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
 1165                 (pParams->Magic != HPT_IOCTL_MAGIC32))
 1166                 return EFAULT;
 1167         
 1168         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
 1169         if (req32 == IOPMU_QUEUE_EMPTY)
 1170                 return EFAULT;
 1171 
 1172         if (pParams->nInBufferSize)
 1173                 if (hptiop_bus_space_copyin(hba, req32 +
 1174                         offsetof(struct hpt_iop_request_ioctl_command, buf),
 1175                         (void *)pParams->lpInBuffer, pParams->nInBufferSize))
 1176                         goto invalid;
 1177 
 1178         if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
 1179                 goto invalid;
 1180 
 1181         result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
 1182                         offsetof(struct hpt_iop_request_ioctl_command,
 1183                                 header.result));
 1184 
 1185         if (result == IOP_RESULT_SUCCESS) {
 1186                 if (pParams->nOutBufferSize)
 1187                         if (hptiop_bus_space_copyout(hba, req32 +
 1188                                 offsetof(struct hpt_iop_request_ioctl_command, buf) + 
 1189                                         ((pParams->nInBufferSize + 3) & ~3),
 1190                                 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
 1191                                 goto invalid;
 1192 
 1193                 if (pParams->lpBytesReturned) {
 1194                         if (hptiop_bus_space_copyout(hba, req32 + 
 1195                                 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
 1196                                 (void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
 1197                                 goto invalid;
 1198                 }
 1199 
 1200                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
 1201 
 1202                 return 0;
 1203         } else{
 1204 invalid:
 1205                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
 1206 
 1207                 return EFAULT;
 1208         }
 1209 }
 1210 
 1211 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
 1212                                 struct hpt_iop_request_ioctl_command *req,
 1213                                 struct hpt_iop_ioctl_param *pParams)
 1214 {
 1215         u_int64_t req_phy;
 1216         int size = 0;
 1217 
 1218         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
 1219                         (hba->max_request_size -
 1220                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
 1221                 device_printf(hba->pcidev, "request size beyond max value");
 1222                 return -1;
 1223         }
 1224 
 1225         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
 1226         req->inbuf_size = pParams->nInBufferSize;
 1227         req->outbuf_size = pParams->nOutBufferSize;
 1228         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
 1229                                         + pParams->nInBufferSize;
 1230         req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
 1231         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
 1232         req->header.result = IOP_RESULT_PENDING;
 1233         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
 1234         size = req->header.size >> 8;
 1235         size = imin(3, size);
 1236         req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
 1237         hptiop_mv_inbound_write(req_phy, hba);
 1238 
 1239         BUS_SPACE_RD4_MV0(outbound_intmask);
 1240 
 1241         while (hba->config_done == 0) {
 1242                 if (hptiop_sleep(hba, req, PPAUSE,
 1243                         "hptctl", HPT_OSM_TIMEOUT)==0)
 1244                         continue;
 1245                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
 1246         }
 1247         return 0;
 1248 }
 1249 
 1250 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
 1251                                 struct hpt_iop_ioctl_param *pParams)
 1252 {
 1253         struct hpt_iop_request_ioctl_command *req;
 1254 
 1255         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
 1256                 (pParams->Magic != HPT_IOCTL_MAGIC32))
 1257                 return EFAULT;
 1258 
 1259         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
 1260         hba->config_done = 0;
 1261         hptiop_lock_adapter(hba);
 1262         if (pParams->nInBufferSize)
 1263                 if (copyin((void *)pParams->lpInBuffer,
 1264                                 req->buf, pParams->nInBufferSize))
 1265                         goto invalid;
 1266         if (hptiop_post_ioctl_command_mv(hba, req, pParams))
 1267                 goto invalid;
 1268 
 1269         if (hba->config_done == 1) {
 1270                 if (pParams->nOutBufferSize)
 1271                         if (copyout(req->buf +
 1272                                 ((pParams->nInBufferSize + 3) & ~3),
 1273                                 (void *)pParams->lpOutBuffer,
 1274                                 pParams->nOutBufferSize))
 1275                                 goto invalid;
 1276 
 1277                 if (pParams->lpBytesReturned)
 1278                         if (copyout(&req->bytes_returned,
 1279                                 (void*)pParams->lpBytesReturned,
 1280                                 sizeof(u_int32_t)))
 1281                                 goto invalid;
 1282                 hptiop_unlock_adapter(hba);
 1283                 return 0;
 1284         } else{
 1285 invalid:
 1286                 hptiop_unlock_adapter(hba);
 1287                 return EFAULT;
 1288         }
 1289 }
 1290 
 1291 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
 1292                                 struct hpt_iop_request_ioctl_command *req,
 1293                                 struct hpt_iop_ioctl_param *pParams)
 1294 {
 1295         u_int64_t phy_addr;
 1296         u_int32_t index;
 1297 
 1298         phy_addr = hba->ctlcfgcmd_phy;
 1299 
 1300         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
 1301                         (hba->max_request_size -
 1302                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
 1303                 device_printf(hba->pcidev, "request size beyond max value");
 1304                 return -1;
 1305         }
 1306 
 1307         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
 1308         req->inbuf_size = pParams->nInBufferSize;
 1309         req->outbuf_size = pParams->nOutBufferSize;
 1310         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
 1311                                         + pParams->nInBufferSize;
 1312 
 1313         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
 1314         req->header.result = IOP_RESULT_PENDING;
 1315 
 1316         req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
 1317                                                 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
 1318                                                 | IOP_REQUEST_FLAG_ADDR_BITS
 1319                                                 | ((phy_addr >> 16) & 0xffff0000);
 1320         req->header.context = ((phy_addr & 0xffffffff) << 32 )
 1321                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
 1322 
 1323         hba->u.mvfrey.inlist_wptr++;
 1324         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
 1325 
 1326         if (index == hba->u.mvfrey.list_count) {
 1327                 index = 0;
 1328                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
 1329                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
 1330         }
 1331 
 1332         hba->u.mvfrey.inlist[index].addr = phy_addr;
 1333         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
 1334 
 1335         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
 1336         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
 1337 
 1338         while (hba->config_done == 0) {
 1339                 if (hptiop_sleep(hba, req, PPAUSE,
 1340                         "hptctl", HPT_OSM_TIMEOUT)==0)
 1341                         continue;
 1342                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
 1343         }
 1344         return 0;
 1345 }
 1346 
 1347 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
 1348                                 struct hpt_iop_ioctl_param *pParams)
 1349 {
 1350         struct hpt_iop_request_ioctl_command *req;
 1351 
 1352         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
 1353                 (pParams->Magic != HPT_IOCTL_MAGIC32))
 1354                 return EFAULT;
 1355 
 1356         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
 1357         hba->config_done = 0;
 1358         hptiop_lock_adapter(hba);
 1359         if (pParams->nInBufferSize)
 1360                 if (copyin((void *)pParams->lpInBuffer,
 1361                                 req->buf, pParams->nInBufferSize))
 1362                         goto invalid;
 1363         if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
 1364                 goto invalid;
 1365 
 1366         if (hba->config_done == 1) {
 1367                 if (pParams->nOutBufferSize)
 1368                         if (copyout(req->buf +
 1369                                 ((pParams->nInBufferSize + 3) & ~3),
 1370                                 (void *)pParams->lpOutBuffer,
 1371                                 pParams->nOutBufferSize))
 1372                                 goto invalid;
 1373 
 1374                 if (pParams->lpBytesReturned)
 1375                         if (copyout(&req->bytes_returned,
 1376                                 (void*)pParams->lpBytesReturned,
 1377                                 sizeof(u_int32_t)))
 1378                                 goto invalid;
 1379                 hptiop_unlock_adapter(hba);
 1380                 return 0;
 1381         } else{
 1382 invalid:
 1383                 hptiop_unlock_adapter(hba);
 1384                 return EFAULT;
 1385         }
 1386 }
 1387 
 1388 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
 1389 {
 1390         union ccb           *ccb;
 1391 
 1392         if ((ccb = xpt_alloc_ccb()) == NULL)
 1393                 return(ENOMEM);
 1394         if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
 1395                 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
 1396                 xpt_free_ccb(ccb);
 1397                 return(EIO);
 1398         }
 1399         xpt_rescan(ccb);
 1400         return(0);
 1401 }
 1402 
 1403 static  bus_dmamap_callback_t   hptiop_map_srb;
 1404 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
 1405 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
 1406 static  bus_dmamap_callback_t   hptiop_mvfrey_map_ctlcfg;
 1407 
 1408 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
 1409 {
 1410         hba->bar0_rid = 0x10;
 1411         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
 1412                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
 1413 
 1414         if (hba->bar0_res == NULL) {
 1415                 device_printf(hba->pcidev,
 1416                         "failed to get iop base adrress.\n");
 1417                 return -1;
 1418         }
 1419         hba->bar0t = rman_get_bustag(hba->bar0_res);
 1420         hba->bar0h = rman_get_bushandle(hba->bar0_res);
 1421         hba->u.itl.mu = (struct hpt_iopmu_itl *)
 1422                                 rman_get_virtual(hba->bar0_res);
 1423 
 1424         if (!hba->u.itl.mu) {
 1425                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1426                                         hba->bar0_rid, hba->bar0_res);
 1427                 device_printf(hba->pcidev, "alloc mem res failed\n");
 1428                 return -1;
 1429         }
 1430 
 1431         return 0;
 1432 }
 1433 
 1434 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
 1435 {
 1436         hba->bar0_rid = 0x10;
 1437         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
 1438                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
 1439 
 1440         if (hba->bar0_res == NULL) {
 1441                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
 1442                 return -1;
 1443         }
 1444         hba->bar0t = rman_get_bustag(hba->bar0_res);
 1445         hba->bar0h = rman_get_bushandle(hba->bar0_res);
 1446         hba->u.mv.regs = (struct hpt_iopmv_regs *)
 1447                                 rman_get_virtual(hba->bar0_res);
 1448 
 1449         if (!hba->u.mv.regs) {
 1450                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1451                                         hba->bar0_rid, hba->bar0_res);
 1452                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
 1453                 return -1;
 1454         }
 1455 
 1456         hba->bar2_rid = 0x18;
 1457         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
 1458                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
 1459 
 1460         if (hba->bar2_res == NULL) {
 1461                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1462                                         hba->bar0_rid, hba->bar0_res);
 1463                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
 1464                 return -1;
 1465         }
 1466 
 1467         hba->bar2t = rman_get_bustag(hba->bar2_res);
 1468         hba->bar2h = rman_get_bushandle(hba->bar2_res);
 1469         hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
 1470 
 1471         if (!hba->u.mv.mu) {
 1472                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1473                                         hba->bar0_rid, hba->bar0_res);
 1474                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1475                                         hba->bar2_rid, hba->bar2_res);
 1476                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
 1477                 return -1;
 1478         }
 1479 
 1480         return 0;
 1481 }
 1482 
 1483 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
 1484 {
 1485         hba->bar0_rid = 0x10;
 1486         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
 1487                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
 1488 
 1489         if (hba->bar0_res == NULL) {
 1490                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
 1491                 return -1;
 1492         }
 1493         hba->bar0t = rman_get_bustag(hba->bar0_res);
 1494         hba->bar0h = rman_get_bushandle(hba->bar0_res);
 1495         hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
 1496                                 rman_get_virtual(hba->bar0_res);
 1497 
 1498         if (!hba->u.mvfrey.config) {
 1499                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1500                                         hba->bar0_rid, hba->bar0_res);
 1501                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
 1502                 return -1;
 1503         }
 1504 
 1505         hba->bar2_rid = 0x18;
 1506         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
 1507                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
 1508 
 1509         if (hba->bar2_res == NULL) {
 1510                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1511                                         hba->bar0_rid, hba->bar0_res);
 1512                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
 1513                 return -1;
 1514         }
 1515 
 1516         hba->bar2t = rman_get_bustag(hba->bar2_res);
 1517         hba->bar2h = rman_get_bushandle(hba->bar2_res);
 1518         hba->u.mvfrey.mu =
 1519                                         (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
 1520 
 1521         if (!hba->u.mvfrey.mu) {
 1522                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1523                                         hba->bar0_rid, hba->bar0_res);
 1524                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1525                                         hba->bar2_rid, hba->bar2_res);
 1526                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
 1527                 return -1;
 1528         }
 1529 
 1530         return 0;
 1531 }
 1532 
 1533 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
 1534 {
 1535         if (hba->bar0_res)
 1536                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1537                         hba->bar0_rid, hba->bar0_res);
 1538 }
 1539 
 1540 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
 1541 {
 1542         if (hba->bar0_res)
 1543                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1544                         hba->bar0_rid, hba->bar0_res);
 1545         if (hba->bar2_res)
 1546                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1547                         hba->bar2_rid, hba->bar2_res);
 1548 }
 1549 
 1550 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
 1551 {
 1552         if (hba->bar0_res)
 1553                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1554                         hba->bar0_rid, hba->bar0_res);
 1555         if (hba->bar2_res)
 1556                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1557                         hba->bar2_rid, hba->bar2_res);
 1558 }
 1559 
 1560 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
 1561 {
 1562         if (bus_dma_tag_create(hba->parent_dmat,
 1563                                 1,
 1564                                 0,
 1565                                 BUS_SPACE_MAXADDR_32BIT,
 1566                                 BUS_SPACE_MAXADDR,
 1567                                 NULL, NULL,
 1568                                 0x800 - 0x8,
 1569                                 1,
 1570                                 BUS_SPACE_MAXSIZE_32BIT,
 1571                                 BUS_DMA_ALLOCNOW,
 1572                                 NULL,
 1573                                 NULL,
 1574                                 &hba->ctlcfg_dmat)) {
 1575                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
 1576                 return -1;
 1577         }
 1578 
 1579         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
 1580                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
 1581                 &hba->ctlcfg_dmamap) != 0) {
 1582                         device_printf(hba->pcidev,
 1583                                         "bus_dmamem_alloc failed!\n");
 1584                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1585                         return -1;
 1586         }
 1587 
 1588         if (bus_dmamap_load(hba->ctlcfg_dmat,
 1589                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
 1590                         MVIOP_IOCTLCFG_SIZE,
 1591                         hptiop_mv_map_ctlcfg, hba, 0)) {
 1592                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
 1593                 if (hba->ctlcfg_dmat) {
 1594                         bus_dmamem_free(hba->ctlcfg_dmat,
 1595                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1596                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1597                 }
 1598                 return -1;
 1599         }
 1600 
 1601         return 0;
 1602 }
 1603 
 1604 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
 1605 {
 1606         u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
 1607 
 1608         list_count >>= 16;
 1609 
 1610         if (list_count == 0) {
 1611                 return -1;
 1612         }
 1613 
 1614         hba->u.mvfrey.list_count = list_count;
 1615         hba->u.mvfrey.internal_mem_size = 0x800
 1616                                                         + list_count * sizeof(struct mvfrey_inlist_entry)
 1617                                                         + list_count * sizeof(struct mvfrey_outlist_entry)
 1618                                                         + sizeof(int);
 1619         if (bus_dma_tag_create(hba->parent_dmat,
 1620                                 1,
 1621                                 0,
 1622                                 BUS_SPACE_MAXADDR_32BIT,
 1623                                 BUS_SPACE_MAXADDR,
 1624                                 NULL, NULL,
 1625                                 hba->u.mvfrey.internal_mem_size,
 1626                                 1,
 1627                                 BUS_SPACE_MAXSIZE_32BIT,
 1628                                 BUS_DMA_ALLOCNOW,
 1629                                 NULL,
 1630                                 NULL,
 1631                                 &hba->ctlcfg_dmat)) {
 1632                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
 1633                 return -1;
 1634         }
 1635 
 1636         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
 1637                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
 1638                 &hba->ctlcfg_dmamap) != 0) {
 1639                         device_printf(hba->pcidev,
 1640                                         "bus_dmamem_alloc failed!\n");
 1641                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1642                         return -1;
 1643         }
 1644 
 1645         if (bus_dmamap_load(hba->ctlcfg_dmat,
 1646                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
 1647                         hba->u.mvfrey.internal_mem_size,
 1648                         hptiop_mvfrey_map_ctlcfg, hba, 0)) {
 1649                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
 1650                 if (hba->ctlcfg_dmat) {
 1651                         bus_dmamem_free(hba->ctlcfg_dmat,
 1652                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1653                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1654                 }
 1655                 return -1;
 1656         }
 1657 
 1658         return 0;
 1659 }
 1660 
 1661 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
 1662         return 0;
 1663 }
 1664 
 1665 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
 1666 {
 1667         if (hba->ctlcfg_dmat) {
 1668                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
 1669                 bus_dmamem_free(hba->ctlcfg_dmat,
 1670                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1671                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1672         }
 1673 
 1674         return 0;
 1675 }
 1676 
 1677 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
 1678 {
 1679         if (hba->ctlcfg_dmat) {
 1680                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
 1681                 bus_dmamem_free(hba->ctlcfg_dmat,
 1682                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1683                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1684         }
 1685 
 1686         return 0;
 1687 }
 1688 
 1689 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
 1690 {
 1691         u_int32_t i = 100;
 1692 
 1693         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
 1694                 return -1;
 1695 
 1696         /* wait 100ms for MCU ready */
 1697         while(i--) {
 1698                 DELAY(1000);
 1699         }
 1700 
 1701         BUS_SPACE_WRT4_MVFREY2(inbound_base,
 1702                                                         hba->u.mvfrey.inlist_phy & 0xffffffff);
 1703         BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
 1704                                                         (hba->u.mvfrey.inlist_phy >> 16) >> 16);
 1705 
 1706         BUS_SPACE_WRT4_MVFREY2(outbound_base,
 1707                                                         hba->u.mvfrey.outlist_phy & 0xffffffff);
 1708         BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
 1709                                                         (hba->u.mvfrey.outlist_phy >> 16) >> 16);
 1710 
 1711         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
 1712                                                         hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
 1713         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
 1714                                                         (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
 1715 
 1716         hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
 1717                                                                 | CL_POINTER_TOGGLE;
 1718         *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
 1719                                                                 | CL_POINTER_TOGGLE;
 1720         hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
 1721         
 1722         return 0;
 1723 }
 1724 
 1725 /*
 1726  * CAM driver interface
 1727  */
 1728 static device_method_t driver_methods[] = {
 1729         /* Device interface */
 1730         DEVMETHOD(device_probe,     hptiop_probe),
 1731         DEVMETHOD(device_attach,    hptiop_attach),
 1732         DEVMETHOD(device_detach,    hptiop_detach),
 1733         DEVMETHOD(device_shutdown,  hptiop_shutdown),
 1734         { 0, 0 }
 1735 };
 1736 
 1737 static struct hptiop_adapter_ops hptiop_itl_ops = {
 1738         .family            = INTEL_BASED_IOP,
 1739         .iop_wait_ready    = hptiop_wait_ready_itl,
 1740         .internal_memalloc = 0,
 1741         .internal_memfree  = hptiop_internal_memfree_itl,
 1742         .alloc_pci_res     = hptiop_alloc_pci_res_itl,
 1743         .release_pci_res   = hptiop_release_pci_res_itl,
 1744         .enable_intr       = hptiop_enable_intr_itl,
 1745         .disable_intr      = hptiop_disable_intr_itl,
 1746         .get_config        = hptiop_get_config_itl,
 1747         .set_config        = hptiop_set_config_itl,
 1748         .iop_intr          = hptiop_intr_itl,
 1749         .post_msg          = hptiop_post_msg_itl,
 1750         .post_req          = hptiop_post_req_itl,
 1751         .do_ioctl          = hptiop_do_ioctl_itl,
 1752         .reset_comm        = 0,
 1753 };
 1754 
 1755 static struct hptiop_adapter_ops hptiop_mv_ops = {
 1756         .family            = MV_BASED_IOP,
 1757         .iop_wait_ready    = hptiop_wait_ready_mv,
 1758         .internal_memalloc = hptiop_internal_memalloc_mv,
 1759         .internal_memfree  = hptiop_internal_memfree_mv,
 1760         .alloc_pci_res     = hptiop_alloc_pci_res_mv,
 1761         .release_pci_res   = hptiop_release_pci_res_mv,
 1762         .enable_intr       = hptiop_enable_intr_mv,
 1763         .disable_intr      = hptiop_disable_intr_mv,
 1764         .get_config        = hptiop_get_config_mv,
 1765         .set_config        = hptiop_set_config_mv,
 1766         .iop_intr          = hptiop_intr_mv,
 1767         .post_msg          = hptiop_post_msg_mv,
 1768         .post_req          = hptiop_post_req_mv,
 1769         .do_ioctl          = hptiop_do_ioctl_mv,
 1770         .reset_comm        = 0,
 1771 };
 1772 
 1773 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
 1774         .family            = MVFREY_BASED_IOP,
 1775         .iop_wait_ready    = hptiop_wait_ready_mvfrey,
 1776         .internal_memalloc = hptiop_internal_memalloc_mvfrey,
 1777         .internal_memfree  = hptiop_internal_memfree_mvfrey,
 1778         .alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
 1779         .release_pci_res   = hptiop_release_pci_res_mvfrey,
 1780         .enable_intr       = hptiop_enable_intr_mvfrey,
 1781         .disable_intr      = hptiop_disable_intr_mvfrey,
 1782         .get_config        = hptiop_get_config_mvfrey,
 1783         .set_config        = hptiop_set_config_mvfrey,
 1784         .iop_intr          = hptiop_intr_mvfrey,
 1785         .post_msg          = hptiop_post_msg_mvfrey,
 1786         .post_req          = hptiop_post_req_mvfrey,
 1787         .do_ioctl          = hptiop_do_ioctl_mvfrey,
 1788         .reset_comm        = hptiop_reset_comm_mvfrey,
 1789 };
 1790 
 1791 static driver_t hptiop_pci_driver = {
 1792         driver_name,
 1793         driver_methods,
 1794         sizeof(struct hpt_iop_hba)
 1795 };
 1796 
 1797 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, 0, 0);
 1798 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
 1799 
 1800 static int hptiop_probe(device_t dev)
 1801 {
 1802         struct hpt_iop_hba *hba;
 1803         u_int32_t id;
 1804         static char buf[256];
 1805         int sas = 0;
 1806         struct hptiop_adapter_ops *ops;
 1807 
 1808         if (pci_get_vendor(dev) != 0x1103)
 1809                 return (ENXIO);
 1810 
 1811         id = pci_get_device(dev);
 1812 
 1813         switch (id) {
 1814                 case 0x4520:
 1815                 case 0x4521:
 1816                 case 0x4522:
 1817                         sas = 1;
 1818                 case 0x3620:
 1819                 case 0x3622:
 1820                 case 0x3640:
 1821                         ops = &hptiop_mvfrey_ops;
 1822                         break;
 1823                 case 0x4210:
 1824                 case 0x4211:
 1825                 case 0x4310:
 1826                 case 0x4311:
 1827                 case 0x4320:
 1828                 case 0x4321:
 1829                 case 0x4322:
 1830                         sas = 1;
 1831                 case 0x3220:
 1832                 case 0x3320:
 1833                 case 0x3410:
 1834                 case 0x3520:
 1835                 case 0x3510:
 1836                 case 0x3511:
 1837                 case 0x3521:
 1838                 case 0x3522:
 1839                 case 0x3530:
 1840                 case 0x3540:
 1841                 case 0x3560:
 1842                         ops = &hptiop_itl_ops;
 1843                         break;
 1844                 case 0x3020:
 1845                 case 0x3120:
 1846                 case 0x3122:
 1847                         ops = &hptiop_mv_ops;
 1848                         break;
 1849                 default:
 1850                         return (ENXIO);
 1851         }
 1852 
 1853         device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
 1854                 pci_get_bus(dev), pci_get_slot(dev),
 1855                 pci_get_function(dev), pci_get_irq(dev));
 1856 
 1857         sprintf(buf, "RocketRAID %x %s Controller\n",
 1858                                 id, sas ? "SAS" : "SATA");
 1859         device_set_desc_copy(dev, buf);
 1860 
 1861         hba = (struct hpt_iop_hba *)device_get_softc(dev);
 1862         bzero(hba, sizeof(struct hpt_iop_hba));
 1863         hba->ops = ops;
 1864 
 1865         KdPrint(("hba->ops=%p\n", hba->ops));
 1866         return 0;
 1867 }
 1868 
 1869 static int hptiop_attach(device_t dev)
 1870 {
 1871         struct make_dev_args args;
 1872         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
 1873         struct hpt_iop_request_get_config  iop_config;
 1874         struct hpt_iop_request_set_config  set_config;
 1875         int rid = 0;
 1876         struct cam_devq *devq;
 1877         struct ccb_setasync ccb;
 1878         u_int32_t unit = device_get_unit(dev);
 1879 
 1880         device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
 1881                         unit, driver_version);
 1882 
 1883         KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
 1884                 pci_get_bus(dev), pci_get_slot(dev),
 1885                 pci_get_function(dev), hba->ops));
 1886 
 1887         pci_enable_busmaster(dev);
 1888         hba->pcidev = dev;
 1889         hba->pciunit = unit;
 1890 
 1891         if (hba->ops->alloc_pci_res(hba))
 1892                 return ENXIO;
 1893 
 1894         if (hba->ops->iop_wait_ready(hba, 2000)) {
 1895                 device_printf(dev, "adapter is not ready\n");
 1896                 goto release_pci_res;
 1897         }
 1898 
 1899         mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
 1900 
 1901         if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
 1902                         1,  /* alignment */
 1903                         0, /* boundary */
 1904                         BUS_SPACE_MAXADDR,  /* lowaddr */
 1905                         BUS_SPACE_MAXADDR,  /* highaddr */
 1906                         NULL, NULL,         /* filter, filterarg */
 1907                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 1908                         BUS_SPACE_UNRESTRICTED, /* nsegments */
 1909                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1910                         0,      /* flags */
 1911                         NULL,   /* lockfunc */
 1912                         NULL,       /* lockfuncarg */
 1913                         &hba->parent_dmat   /* tag */))
 1914         {
 1915                 device_printf(dev, "alloc parent_dmat failed\n");
 1916                 goto release_pci_res;
 1917         }
 1918 
 1919         if (hba->ops->family == MV_BASED_IOP) {
 1920                 if (hba->ops->internal_memalloc(hba)) {
 1921                         device_printf(dev, "alloc srb_dmat failed\n");
 1922                         goto destroy_parent_tag;
 1923                 }
 1924         }
 1925         
 1926         if (hba->ops->get_config(hba, &iop_config)) {
 1927                 device_printf(dev, "get iop config failed.\n");
 1928                 goto get_config_failed;
 1929         }
 1930 
 1931         hba->firmware_version = iop_config.firmware_version;
 1932         hba->interface_version = iop_config.interface_version;
 1933         hba->max_requests = iop_config.max_requests;
 1934         hba->max_devices = iop_config.max_devices;
 1935         hba->max_request_size = iop_config.request_size;
 1936         hba->max_sg_count = iop_config.max_sg_count;
 1937 
 1938         if (hba->ops->family == MVFREY_BASED_IOP) {
 1939                 if (hba->ops->internal_memalloc(hba)) {
 1940                         device_printf(dev, "alloc srb_dmat failed\n");
 1941                         goto destroy_parent_tag;
 1942                 }
 1943                 if (hba->ops->reset_comm(hba)) {
 1944                         device_printf(dev, "reset comm failed\n");
 1945                         goto get_config_failed;
 1946                 }
 1947         }
 1948 
 1949         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
 1950                         4,  /* alignment */
 1951                         BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
 1952                         BUS_SPACE_MAXADDR,  /* lowaddr */
 1953                         BUS_SPACE_MAXADDR,  /* highaddr */
 1954                         NULL, NULL,         /* filter, filterarg */
 1955                         PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
 1956                         hba->max_sg_count,  /* nsegments */
 1957                         0x20000,    /* maxsegsize */
 1958                         BUS_DMA_ALLOCNOW,       /* flags */
 1959                         busdma_lock_mutex,  /* lockfunc */
 1960                         &hba->lock,     /* lockfuncarg */
 1961                         &hba->io_dmat   /* tag */))
 1962         {
 1963                 device_printf(dev, "alloc io_dmat failed\n");
 1964                 goto get_config_failed;
 1965         }
 1966 
 1967         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
 1968                         1,  /* alignment */
 1969                         0, /* boundary */
 1970                         BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1971                         BUS_SPACE_MAXADDR,  /* highaddr */
 1972                         NULL, NULL,         /* filter, filterarg */
 1973                         HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
 1974                         1,  /* nsegments */
 1975                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1976                         0,      /* flags */
 1977                         NULL,   /* lockfunc */
 1978                         NULL,       /* lockfuncarg */
 1979                         &hba->srb_dmat  /* tag */))
 1980         {
 1981                 device_printf(dev, "alloc srb_dmat failed\n");
 1982                 goto destroy_io_dmat;
 1983         }
 1984 
 1985         if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
 1986                         BUS_DMA_WAITOK | BUS_DMA_COHERENT,
 1987                         &hba->srb_dmamap) != 0)
 1988         {
 1989                 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
 1990                 goto destroy_srb_dmat;
 1991         }
 1992 
 1993         if (bus_dmamap_load(hba->srb_dmat,
 1994                         hba->srb_dmamap, hba->uncached_ptr,
 1995                         (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
 1996                         hptiop_map_srb, hba, 0))
 1997         {
 1998                 device_printf(dev, "bus_dmamap_load failed!\n");
 1999                 goto srb_dmamem_free;
 2000         }
 2001 
 2002         if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
 2003                 device_printf(dev, "cam_simq_alloc failed\n");
 2004                 goto srb_dmamap_unload;
 2005         }
 2006 
 2007         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
 2008                         hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
 2009         if (!hba->sim) {
 2010                 device_printf(dev, "cam_sim_alloc failed\n");
 2011                 cam_simq_free(devq);
 2012                 goto srb_dmamap_unload;
 2013         }
 2014         hptiop_lock_adapter(hba);
 2015         if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
 2016         {
 2017                 device_printf(dev, "xpt_bus_register failed\n");
 2018                 goto free_cam_sim;
 2019         }
 2020 
 2021         if (xpt_create_path(&hba->path, /*periph */ NULL,
 2022                         cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
 2023                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
 2024                 device_printf(dev, "xpt_create_path failed\n");
 2025                 goto deregister_xpt_bus;
 2026         }
 2027         hptiop_unlock_adapter(hba);
 2028 
 2029         bzero(&set_config, sizeof(set_config));
 2030         set_config.iop_id = unit;
 2031         set_config.vbus_id = cam_sim_path(hba->sim);
 2032         set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
 2033 
 2034         if (hba->ops->set_config(hba, &set_config)) {
 2035                 device_printf(dev, "set iop config failed.\n");
 2036                 goto free_hba_path;
 2037         }
 2038 
 2039         memset(&ccb, 0, sizeof(ccb));
 2040         xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
 2041         ccb.ccb_h.func_code = XPT_SASYNC_CB;
 2042         ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
 2043         ccb.callback = hptiop_async;
 2044         ccb.callback_arg = hba->sim;
 2045         xpt_action((union ccb *)&ccb);
 2046 
 2047         rid = 0;
 2048         if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
 2049                         &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
 2050                 device_printf(dev, "allocate irq failed!\n");
 2051                 goto free_hba_path;
 2052         }
 2053 
 2054         if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
 2055                                 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
 2056         {
 2057                 device_printf(dev, "allocate intr function failed!\n");
 2058                 goto free_irq_resource;
 2059         }
 2060 
 2061         if (hptiop_send_sync_msg(hba,
 2062                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
 2063                 device_printf(dev, "fail to start background task\n");
 2064                 goto teartown_irq_resource;
 2065         }
 2066 
 2067         hba->ops->enable_intr(hba);
 2068         hba->initialized = 1;
 2069 
 2070         make_dev_args_init(&args);
 2071         args.mda_devsw = &hptiop_cdevsw;
 2072         args.mda_uid = UID_ROOT;
 2073         args.mda_gid = GID_WHEEL /*GID_OPERATOR*/;
 2074         args.mda_mode = S_IRUSR | S_IWUSR;
 2075         args.mda_si_drv1 = hba;
 2076 
 2077         make_dev_s(&args, &hba->ioctl_dev, "%s%d", driver_name, unit);
 2078 
 2079         return 0;
 2080 
 2081 
 2082 teartown_irq_resource:
 2083         bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
 2084 
 2085 free_irq_resource:
 2086         bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
 2087 
 2088         hptiop_lock_adapter(hba);
 2089 free_hba_path:
 2090         xpt_free_path(hba->path);
 2091 
 2092 deregister_xpt_bus:
 2093         xpt_bus_deregister(cam_sim_path(hba->sim));
 2094 
 2095 free_cam_sim:
 2096         cam_sim_free(hba->sim, /*free devq*/ TRUE);
 2097         hptiop_unlock_adapter(hba);
 2098 
 2099 srb_dmamap_unload:
 2100         if (hba->uncached_ptr)
 2101                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
 2102 
 2103 srb_dmamem_free:
 2104         if (hba->uncached_ptr)
 2105                 bus_dmamem_free(hba->srb_dmat,
 2106                         hba->uncached_ptr, hba->srb_dmamap);
 2107 
 2108 destroy_srb_dmat:
 2109         if (hba->srb_dmat)
 2110                 bus_dma_tag_destroy(hba->srb_dmat);
 2111 
 2112 destroy_io_dmat:
 2113         if (hba->io_dmat)
 2114                 bus_dma_tag_destroy(hba->io_dmat);
 2115 
 2116 get_config_failed:
 2117         hba->ops->internal_memfree(hba);
 2118 
 2119 destroy_parent_tag:
 2120         if (hba->parent_dmat)
 2121                 bus_dma_tag_destroy(hba->parent_dmat);
 2122 
 2123 release_pci_res:
 2124         if (hba->ops->release_pci_res)
 2125                 hba->ops->release_pci_res(hba);
 2126 
 2127         return ENXIO;
 2128 }
 2129 
 2130 static int hptiop_detach(device_t dev)
 2131 {
 2132         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
 2133         int i;
 2134         int error = EBUSY;
 2135 
 2136         hptiop_lock_adapter(hba);
 2137         for (i = 0; i < hba->max_devices; i++)
 2138                 if (hptiop_os_query_remove_device(hba, i)) {
 2139                         device_printf(dev, "%d file system is busy. id=%d",
 2140                                                 hba->pciunit, i);
 2141                         goto out;
 2142                 }
 2143 
 2144         if ((error = hptiop_shutdown(dev)) != 0)
 2145                 goto out;
 2146         if (hptiop_send_sync_msg(hba,
 2147                 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
 2148                 goto out;
 2149         hptiop_unlock_adapter(hba);
 2150 
 2151         hptiop_release_resource(hba);
 2152         return (0);
 2153 out:
 2154         hptiop_unlock_adapter(hba);
 2155         return error;
 2156 }
 2157 
 2158 static int hptiop_shutdown(device_t dev)
 2159 {
 2160         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
 2161 
 2162         int error = 0;
 2163 
 2164         if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
 2165                 device_printf(dev, "%d device is busy", hba->pciunit);
 2166                 return EBUSY;
 2167         }
 2168 
 2169         hba->ops->disable_intr(hba);
 2170 
 2171         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
 2172                 error = EBUSY;
 2173 
 2174         return error;
 2175 }
 2176 
 2177 static void hptiop_pci_intr(void *arg)
 2178 {
 2179         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
 2180         hptiop_lock_adapter(hba);
 2181         hba->ops->iop_intr(hba);
 2182         hptiop_unlock_adapter(hba);
 2183 }
 2184 
 2185 static void hptiop_poll(struct cam_sim *sim)
 2186 {
 2187         struct hpt_iop_hba *hba;
 2188 
 2189         hba = cam_sim_softc(sim);
 2190         hba->ops->iop_intr(hba);
 2191 }
 2192 
 2193 static void hptiop_async(void * callback_arg, u_int32_t code,
 2194                                         struct cam_path * path, void * arg)
 2195 {
 2196 }
 2197 
 2198 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
 2199 {
 2200         BUS_SPACE_WRT4_ITL(outbound_intmask,
 2201                 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
 2202 }
 2203 
 2204 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
 2205 {
 2206         u_int32_t int_mask;
 2207 
 2208         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
 2209                         
 2210         int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
 2211                         | MVIOP_MU_OUTBOUND_INT_MSG;
 2212         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
 2213 }
 2214 
 2215 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
 2216 {
 2217         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
 2218         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
 2219 
 2220         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
 2221         BUS_SPACE_RD4_MVFREY2(isr_enable);
 2222 
 2223         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
 2224         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
 2225 }
 2226 
 2227 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
 2228 {
 2229         u_int32_t int_mask;
 2230 
 2231         int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
 2232 
 2233         int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
 2234         BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
 2235         BUS_SPACE_RD4_ITL(outbound_intstatus);
 2236 }
 2237 
 2238 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
 2239 {
 2240         u_int32_t int_mask;
 2241         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
 2242         
 2243         int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
 2244                         | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
 2245         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
 2246         BUS_SPACE_RD4_MV0(outbound_intmask);
 2247 }
 2248 
 2249 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
 2250 {
 2251         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
 2252         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
 2253 
 2254         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
 2255         BUS_SPACE_RD4_MVFREY2(isr_enable);
 2256 
 2257         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
 2258         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
 2259 }
 2260 
 2261 static void hptiop_reset_adapter(void *argv)
 2262 {
 2263         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
 2264         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
 2265                 return;
 2266         hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
 2267 }
 2268 
 2269 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
 2270 {
 2271         struct hpt_iop_srb * srb;
 2272 
 2273         if (hba->srb_list) {
 2274                 srb = hba->srb_list;
 2275                 hba->srb_list = srb->next;
 2276                 return srb;
 2277         }
 2278 
 2279         return NULL;
 2280 }
 2281 
 2282 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
 2283 {
 2284         srb->next = hba->srb_list;
 2285         hba->srb_list = srb;
 2286 }
 2287 
 2288 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
 2289 {
 2290         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
 2291         struct hpt_iop_srb * srb;
 2292         int error;
 2293 
 2294         switch (ccb->ccb_h.func_code) {
 2295 
 2296         case XPT_SCSI_IO:
 2297                 if (ccb->ccb_h.target_lun != 0 ||
 2298                         ccb->ccb_h.target_id >= hba->max_devices ||
 2299                         (ccb->ccb_h.flags & CAM_CDB_PHYS))
 2300                 {
 2301                         ccb->ccb_h.status = CAM_TID_INVALID;
 2302                         xpt_done(ccb);
 2303                         return;
 2304                 }
 2305 
 2306                 if ((srb = hptiop_get_srb(hba)) == NULL) {
 2307                         device_printf(hba->pcidev, "srb allocated failed");
 2308                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2309                         xpt_done(ccb);
 2310                         return;
 2311                 }
 2312 
 2313                 srb->ccb = ccb;
 2314                 error = bus_dmamap_load_ccb(hba->io_dmat,
 2315                                             srb->dma_map,
 2316                                             ccb,
 2317                                             hptiop_post_scsi_command,
 2318                                             srb,
 2319                                             0);
 2320 
 2321                 if (error && error != EINPROGRESS) {
 2322                         device_printf(hba->pcidev,
 2323                                 "%d bus_dmamap_load error %d",
 2324                                 hba->pciunit, error);
 2325                         xpt_freeze_simq(hba->sim, 1);
 2326                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2327                         hptiop_free_srb(hba, srb);
 2328                         xpt_done(ccb);
 2329                         return;
 2330                 }
 2331 
 2332                 return;
 2333 
 2334         case XPT_RESET_BUS:
 2335                 device_printf(hba->pcidev, "reset adapter");
 2336                 hba->msg_done = 0;
 2337                 hptiop_reset_adapter(hba);
 2338                 break;
 2339 
 2340         case XPT_GET_TRAN_SETTINGS:
 2341         case XPT_SET_TRAN_SETTINGS:
 2342                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 2343                 break;
 2344 
 2345         case XPT_CALC_GEOMETRY:
 2346                 cam_calc_geometry(&ccb->ccg, 1);
 2347                 break;
 2348 
 2349         case XPT_PATH_INQ:
 2350         {
 2351                 struct ccb_pathinq *cpi = &ccb->cpi;
 2352 
 2353                 cpi->version_num = 1;
 2354                 cpi->hba_inquiry = PI_SDTR_ABLE;
 2355                 cpi->target_sprt = 0;
 2356                 cpi->hba_misc = PIM_NOBUSRESET;
 2357                 cpi->hba_eng_cnt = 0;
 2358                 cpi->max_target = hba->max_devices;
 2359                 cpi->max_lun = 0;
 2360                 cpi->unit_number = cam_sim_unit(sim);
 2361                 cpi->bus_id = cam_sim_bus(sim);
 2362                 cpi->initiator_id = hba->max_devices;
 2363                 cpi->base_transfer_speed = 3300;
 2364 
 2365                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 2366                 strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
 2367                 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
 2368                 cpi->transport = XPORT_SPI;
 2369                 cpi->transport_version = 2;
 2370                 cpi->protocol = PROTO_SCSI;
 2371                 cpi->protocol_version = SCSI_REV_2;
 2372                 cpi->ccb_h.status = CAM_REQ_CMP;
 2373                 break;
 2374         }
 2375 
 2376         default:
 2377                 ccb->ccb_h.status = CAM_REQ_INVALID;
 2378                 break;
 2379         }
 2380 
 2381         xpt_done(ccb);
 2382         return;
 2383 }
 2384 
 2385 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
 2386                                 struct hpt_iop_srb *srb,
 2387                                 bus_dma_segment_t *segs, int nsegs)
 2388 {
 2389         int idx;
 2390         union ccb *ccb = srb->ccb;
 2391         u_int8_t *cdb;
 2392 
 2393         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
 2394                 cdb = ccb->csio.cdb_io.cdb_ptr;
 2395         else
 2396                 cdb = ccb->csio.cdb_io.cdb_bytes;
 2397 
 2398         KdPrint(("ccb=%p %x-%x-%x\n",
 2399                 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
 2400 
 2401         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
 2402                 u_int32_t iop_req32;
 2403                 struct hpt_iop_request_scsi_command req;
 2404 
 2405                 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
 2406 
 2407                 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
 2408                         device_printf(hba->pcidev, "invalid req offset\n");
 2409                         ccb->ccb_h.status = CAM_BUSY;
 2410                         bus_dmamap_unload(hba->io_dmat, srb->dma_map);
 2411                         hptiop_free_srb(hba, srb);
 2412                         xpt_done(ccb);
 2413                         return;
 2414                 }
 2415 
 2416                 if (ccb->csio.dxfer_len && nsegs > 0) {
 2417                         struct hpt_iopsg *psg = req.sg_list;
 2418                         for (idx = 0; idx < nsegs; idx++, psg++) {
 2419                                 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
 2420                                 psg->size = segs[idx].ds_len;
 2421                                 psg->eot = 0;
 2422                         }
 2423                         psg[-1].eot = 1;
 2424                 }
 2425 
 2426                 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
 2427 
 2428                 req.header.size =
 2429                                 offsetof(struct hpt_iop_request_scsi_command, sg_list)
 2430                                 + nsegs*sizeof(struct hpt_iopsg);
 2431                 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2432                 req.header.flags = 0;
 2433                 req.header.result = IOP_RESULT_PENDING;
 2434                 req.header.context = (u_int64_t)(unsigned long)srb;
 2435                 req.dataxfer_length = ccb->csio.dxfer_len;
 2436                 req.channel =  0;
 2437                 req.target =  ccb->ccb_h.target_id;
 2438                 req.lun =  ccb->ccb_h.target_lun;
 2439 
 2440                 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
 2441                         (u_int8_t *)&req, req.header.size);
 2442 
 2443                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2444                         bus_dmamap_sync(hba->io_dmat,
 2445                                 srb->dma_map, BUS_DMASYNC_PREREAD);
 2446                 }
 2447                 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
 2448                         bus_dmamap_sync(hba->io_dmat,
 2449                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
 2450 
 2451                 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
 2452         } else {
 2453                 struct hpt_iop_request_scsi_command *req;
 2454 
 2455                 req = (struct hpt_iop_request_scsi_command *)srb;
 2456                 if (ccb->csio.dxfer_len && nsegs > 0) {
 2457                         struct hpt_iopsg *psg = req->sg_list;
 2458                         for (idx = 0; idx < nsegs; idx++, psg++) {
 2459                                 psg->pci_address = 
 2460                                         (u_int64_t)segs[idx].ds_addr;
 2461                                 psg->size = segs[idx].ds_len;
 2462                                 psg->eot = 0;
 2463                         }
 2464                         psg[-1].eot = 1;
 2465                 }
 2466 
 2467                 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
 2468 
 2469                 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2470                 req->header.result = IOP_RESULT_PENDING;
 2471                 req->dataxfer_length = ccb->csio.dxfer_len;
 2472                 req->channel =  0;
 2473                 req->target =  ccb->ccb_h.target_id;
 2474                 req->lun =  ccb->ccb_h.target_lun;
 2475                 req->header.size =
 2476                         offsetof(struct hpt_iop_request_scsi_command, sg_list)
 2477                         + nsegs*sizeof(struct hpt_iopsg);
 2478                 req->header.context = (u_int64_t)srb->index |
 2479                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
 2480                 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
 2481 
 2482                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2483                         bus_dmamap_sync(hba->io_dmat,
 2484                                 srb->dma_map, BUS_DMASYNC_PREREAD);
 2485                 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
 2486                         bus_dmamap_sync(hba->io_dmat,
 2487                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
 2488                 }
 2489 
 2490                 if (hba->firmware_version > 0x01020000
 2491                         || hba->interface_version > 0x01020000) {
 2492                         u_int32_t size_bits;
 2493 
 2494                         if (req->header.size < 256)
 2495                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
 2496                         else if (req->header.size < 512)
 2497                                 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
 2498                         else
 2499                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
 2500                                                 | IOPMU_QUEUE_ADDR_HOST_BIT;
 2501 
 2502                         BUS_SPACE_WRT4_ITL(inbound_queue,
 2503                                 (u_int32_t)srb->phy_addr | size_bits);
 2504                 } else
 2505                         BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
 2506                                 |IOPMU_QUEUE_ADDR_HOST_BIT);
 2507         }
 2508 }
 2509 
 2510 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
 2511                                 struct hpt_iop_srb *srb,
 2512                                 bus_dma_segment_t *segs, int nsegs)
 2513 {
 2514         int idx, size;
 2515         union ccb *ccb = srb->ccb;
 2516         u_int8_t *cdb;
 2517         struct hpt_iop_request_scsi_command *req;
 2518         u_int64_t req_phy;
 2519 
 2520         req = (struct hpt_iop_request_scsi_command *)srb;
 2521         req_phy = srb->phy_addr;
 2522 
 2523         if (ccb->csio.dxfer_len && nsegs > 0) {
 2524                 struct hpt_iopsg *psg = req->sg_list;
 2525                 for (idx = 0; idx < nsegs; idx++, psg++) {
 2526                         psg->pci_address = (u_int64_t)segs[idx].ds_addr;
 2527                         psg->size = segs[idx].ds_len;
 2528                         psg->eot = 0;
 2529                 }
 2530                 psg[-1].eot = 1;
 2531         }
 2532         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
 2533                 cdb = ccb->csio.cdb_io.cdb_ptr;
 2534         else
 2535                 cdb = ccb->csio.cdb_io.cdb_bytes;
 2536 
 2537         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
 2538         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2539         req->header.result = IOP_RESULT_PENDING;
 2540         req->dataxfer_length = ccb->csio.dxfer_len;
 2541         req->channel = 0;
 2542         req->target =  ccb->ccb_h.target_id;
 2543         req->lun =  ccb->ccb_h.target_lun;
 2544         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
 2545                                 - sizeof(struct hpt_iopsg)
 2546                                 + nsegs * sizeof(struct hpt_iopsg);
 2547         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2548                 bus_dmamap_sync(hba->io_dmat,
 2549                         srb->dma_map, BUS_DMASYNC_PREREAD);
 2550         }
 2551         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
 2552                 bus_dmamap_sync(hba->io_dmat,
 2553                         srb->dma_map, BUS_DMASYNC_PREWRITE);
 2554         req->header.context = (u_int64_t)srb->index
 2555                                         << MVIOP_REQUEST_NUMBER_START_BIT
 2556                                         | MVIOP_CMD_TYPE_SCSI;
 2557         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
 2558         size = req->header.size >> 8;
 2559         hptiop_mv_inbound_write(req_phy
 2560                         | MVIOP_MU_QUEUE_ADDR_HOST_BIT
 2561                         | imin(3, size), hba);
 2562 }
 2563 
 2564 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
 2565                                 struct hpt_iop_srb *srb,
 2566                                 bus_dma_segment_t *segs, int nsegs)
 2567 {
 2568         int idx, index;
 2569         union ccb *ccb = srb->ccb;
 2570         u_int8_t *cdb;
 2571         struct hpt_iop_request_scsi_command *req;
 2572         u_int64_t req_phy;
 2573 
 2574         req = (struct hpt_iop_request_scsi_command *)srb;
 2575         req_phy = srb->phy_addr;
 2576 
 2577         if (ccb->csio.dxfer_len && nsegs > 0) {
 2578                 struct hpt_iopsg *psg = req->sg_list;
 2579                 for (idx = 0; idx < nsegs; idx++, psg++) {
 2580                         psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
 2581                         psg->size = segs[idx].ds_len;
 2582                         psg->eot = 0;
 2583                 }
 2584                 psg[-1].eot = 1;
 2585         }
 2586         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
 2587                 cdb = ccb->csio.cdb_io.cdb_ptr;
 2588         else
 2589                 cdb = ccb->csio.cdb_io.cdb_bytes;
 2590 
 2591         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
 2592         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2593         req->header.result = IOP_RESULT_PENDING;
 2594         req->dataxfer_length = ccb->csio.dxfer_len;
 2595         req->channel = 0;
 2596         req->target = ccb->ccb_h.target_id;
 2597         req->lun = ccb->ccb_h.target_lun;
 2598         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
 2599                                 - sizeof(struct hpt_iopsg)
 2600                                 + nsegs * sizeof(struct hpt_iopsg);
 2601         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2602                 bus_dmamap_sync(hba->io_dmat,
 2603                         srb->dma_map, BUS_DMASYNC_PREREAD);
 2604         }
 2605         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
 2606                 bus_dmamap_sync(hba->io_dmat,
 2607                         srb->dma_map, BUS_DMASYNC_PREWRITE);
 2608 
 2609         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
 2610                                                 | IOP_REQUEST_FLAG_ADDR_BITS
 2611                                                 | ((req_phy >> 16) & 0xffff0000);
 2612         req->header.context = ((req_phy & 0xffffffff) << 32 )
 2613                                                 | srb->index << 4
 2614                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
 2615 
 2616         hba->u.mvfrey.inlist_wptr++;
 2617         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
 2618 
 2619         if (index == hba->u.mvfrey.list_count) {
 2620                 index = 0;
 2621                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
 2622                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
 2623         }
 2624 
 2625         hba->u.mvfrey.inlist[index].addr = req_phy;
 2626         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
 2627 
 2628         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
 2629         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
 2630 
 2631         if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
 2632                 callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
 2633         }
 2634 }
 2635 
 2636 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
 2637                                         int nsegs, int error)
 2638 {
 2639         struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
 2640         union ccb *ccb = srb->ccb;
 2641         struct hpt_iop_hba *hba = srb->hba;
 2642 
 2643         if (error || nsegs > hba->max_sg_count) {
 2644                 KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n",
 2645                         ccb->ccb_h.func_code,
 2646                         ccb->ccb_h.target_id,
 2647                         (uintmax_t)ccb->ccb_h.target_lun, nsegs));
 2648                 ccb->ccb_h.status = CAM_BUSY;
 2649                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
 2650                 hptiop_free_srb(hba, srb);
 2651                 xpt_done(ccb);
 2652                 return;
 2653         }
 2654 
 2655         hba->ops->post_req(hba, srb, segs, nsegs);
 2656 }
 2657 
 2658 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
 2659                                 int nsegs, int error)
 2660 {
 2661         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
 2662         hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) 
 2663                                 & ~(u_int64_t)0x1F;
 2664         hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
 2665                                 & ~0x1F);
 2666 }
 2667 
 2668 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
 2669                                 int nsegs, int error)
 2670 {
 2671         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
 2672         char *p;
 2673         u_int64_t phy;
 2674         u_int32_t list_count = hba->u.mvfrey.list_count;
 2675 
 2676         phy = ((u_int64_t)segs->ds_addr + 0x1F) 
 2677                                 & ~(u_int64_t)0x1F;
 2678         p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
 2679                                 & ~0x1F);
 2680         
 2681         hba->ctlcfgcmd_phy = phy;
 2682         hba->ctlcfg_ptr = p;
 2683 
 2684         p += 0x800;
 2685         phy += 0x800;
 2686 
 2687         hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
 2688         hba->u.mvfrey.inlist_phy = phy;
 2689 
 2690         p += list_count * sizeof(struct mvfrey_inlist_entry);
 2691         phy += list_count * sizeof(struct mvfrey_inlist_entry);
 2692 
 2693         hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
 2694         hba->u.mvfrey.outlist_phy = phy;
 2695 
 2696         p += list_count * sizeof(struct mvfrey_outlist_entry);
 2697         phy += list_count * sizeof(struct mvfrey_outlist_entry);
 2698 
 2699         hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
 2700         hba->u.mvfrey.outlist_cptr_phy = phy;
 2701 }
 2702 
 2703 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
 2704                                 int nsegs, int error)
 2705 {
 2706         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
 2707         bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
 2708         struct hpt_iop_srb *srb, *tmp_srb;
 2709         int i;
 2710 
 2711         if (error || nsegs == 0) {
 2712                 device_printf(hba->pcidev, "hptiop_map_srb error");
 2713                 return;
 2714         }
 2715 
 2716         /* map srb */
 2717         srb = (struct hpt_iop_srb *)
 2718                 (((unsigned long)hba->uncached_ptr + 0x1F)
 2719                 & ~(unsigned long)0x1F);
 2720 
 2721         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
 2722                 tmp_srb = (struct hpt_iop_srb *)
 2723                                         ((char *)srb + i * HPT_SRB_MAX_SIZE);
 2724                 if (((unsigned long)tmp_srb & 0x1F) == 0) {
 2725                         if (bus_dmamap_create(hba->io_dmat,
 2726                                                 0, &tmp_srb->dma_map)) {
 2727                                 device_printf(hba->pcidev, "dmamap create failed");
 2728                                 return;
 2729                         }
 2730 
 2731                         bzero(tmp_srb, sizeof(struct hpt_iop_srb));
 2732                         tmp_srb->hba = hba;
 2733                         tmp_srb->index = i;
 2734                         if (hba->ctlcfg_ptr == 0) {/*itl iop*/
 2735                                 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
 2736                                                         (phy_addr >> 5);
 2737                                 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
 2738                                         tmp_srb->srb_flag =
 2739                                                 HPT_SRB_FLAG_HIGH_MEM_ACESS;
 2740                         } else {
 2741                                 tmp_srb->phy_addr = phy_addr;
 2742                         }
 2743 
 2744                         callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
 2745                         hptiop_free_srb(hba, tmp_srb);
 2746                         hba->srb[i] = tmp_srb;
 2747                         phy_addr += HPT_SRB_MAX_SIZE;
 2748                 }
 2749                 else {
 2750                         device_printf(hba->pcidev, "invalid alignment");
 2751                         return;
 2752                 }
 2753         }
 2754 }
 2755 
 2756 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
 2757 {
 2758         hba->msg_done = 1;
 2759 }
 2760 
 2761 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
 2762                                                 int target_id)
 2763 {
 2764         struct cam_periph       *periph = NULL;
 2765         struct cam_path         *path;
 2766         int                     status, retval = 0;
 2767 
 2768         status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
 2769 
 2770         if (status == CAM_REQ_CMP) {
 2771                 if ((periph = cam_periph_find(path, "da")) != NULL) {
 2772                         if (periph->refcount >= 1) {
 2773                                 device_printf(hba->pcidev, "%d ,"
 2774                                         "target_id=0x%x,"
 2775                                         "refcount=%d",
 2776                                     hba->pciunit, target_id, periph->refcount);
 2777                                 retval = -1;
 2778                         }
 2779                 }
 2780                 xpt_free_path(path);
 2781         }
 2782         return retval;
 2783 }
 2784 
 2785 static void hptiop_release_resource(struct hpt_iop_hba *hba)
 2786 {
 2787         int i;
 2788 
 2789         if (hba->ioctl_dev)
 2790                 destroy_dev(hba->ioctl_dev);
 2791 
 2792         if (hba->path) {
 2793                 struct ccb_setasync ccb;
 2794 
 2795                 memset(&ccb, 0, sizeof(ccb));
 2796                 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
 2797                 ccb.ccb_h.func_code = XPT_SASYNC_CB;
 2798                 ccb.event_enable = 0;
 2799                 ccb.callback = hptiop_async;
 2800                 ccb.callback_arg = hba->sim;
 2801                 xpt_action((union ccb *)&ccb);
 2802                 xpt_free_path(hba->path);
 2803         }
 2804 
 2805         if (hba->irq_handle)
 2806                 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
 2807 
 2808         if (hba->sim) {
 2809                 hptiop_lock_adapter(hba);
 2810                 xpt_bus_deregister(cam_sim_path(hba->sim));
 2811                 cam_sim_free(hba->sim, TRUE);
 2812                 hptiop_unlock_adapter(hba);
 2813         }
 2814 
 2815         if (hba->ctlcfg_dmat) {
 2816                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
 2817                 bus_dmamem_free(hba->ctlcfg_dmat,
 2818                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 2819                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
 2820         }
 2821 
 2822         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
 2823                 struct hpt_iop_srb *srb = hba->srb[i];
 2824                 if (srb->dma_map)
 2825                         bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
 2826                 callout_drain(&srb->timeout);
 2827         }
 2828 
 2829         if (hba->srb_dmat) {
 2830                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
 2831                 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
 2832                 bus_dma_tag_destroy(hba->srb_dmat);
 2833         }
 2834 
 2835         if (hba->io_dmat)
 2836                 bus_dma_tag_destroy(hba->io_dmat);
 2837 
 2838         if (hba->parent_dmat)
 2839                 bus_dma_tag_destroy(hba->parent_dmat);
 2840 
 2841         if (hba->irq_res)
 2842                 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
 2843                                         0, hba->irq_res);
 2844 
 2845         if (hba->bar0_res)
 2846                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 2847                                         hba->bar0_rid, hba->bar0_res);
 2848         if (hba->bar2_res)
 2849                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 2850                                         hba->bar2_rid, hba->bar2_res);
 2851         mtx_destroy(&hba->lock);
 2852 }

Cache object: c5f32f6b37fc20f47d57a5fb5ea14336


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.