The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hptiop/hptiop.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
    3  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/11.2/sys/dev/hptiop/hptiop.c 331722 2018-03-29 02:50:57Z eadler $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/types.h>
   32 #include <sys/cons.h>
   33 #include <sys/time.h>
   34 #include <sys/systm.h>
   35 
   36 #include <sys/stat.h>
   37 #include <sys/malloc.h>
   38 #include <sys/conf.h>
   39 #include <sys/libkern.h>
   40 #include <sys/kernel.h>
   41 
   42 #include <sys/kthread.h>
   43 #include <sys/mutex.h>
   44 #include <sys/module.h>
   45 
   46 #include <sys/eventhandler.h>
   47 #include <sys/bus.h>
   48 #include <sys/taskqueue.h>
   49 #include <sys/ioccom.h>
   50 
   51 #include <machine/resource.h>
   52 #include <machine/bus.h>
   53 #include <machine/stdarg.h>
   54 #include <sys/rman.h>
   55 
   56 #include <vm/vm.h>
   57 #include <vm/pmap.h>
   58 
   59 #include <dev/pci/pcireg.h>
   60 #include <dev/pci/pcivar.h>
   61 
   62 
   63 #include <cam/cam.h>
   64 #include <cam/cam_ccb.h>
   65 #include <cam/cam_sim.h>
   66 #include <cam/cam_xpt_sim.h>
   67 #include <cam/cam_debug.h>
   68 #include <cam/cam_periph.h>
   69 #include <cam/scsi/scsi_all.h>
   70 #include <cam/scsi/scsi_message.h>
   71 
   72 
   73 #include <dev/hptiop/hptiop.h>
   74 
   75 static const char driver_name[] = "hptiop";
   76 static const char driver_version[] = "v1.9";
   77 
   78 static devclass_t hptiop_devclass;
   79 
   80 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
   81                                 u_int32_t msg, u_int32_t millisec);
   82 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
   83                                                         u_int32_t req);
   84 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
   85 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
   86                                                         u_int32_t req);
   87 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
   88 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
   89                                 struct hpt_iop_ioctl_param *pParams);
   90 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
   91                                 struct hpt_iop_ioctl_param *pParams);
   92 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
   93                                 struct hpt_iop_ioctl_param *pParams);
   94 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
   95 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
   96 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
   97 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
   98 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
   99                                 struct hpt_iop_request_get_config *config);
  100 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
  101                                 struct hpt_iop_request_get_config *config);
  102 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
  103                                 struct hpt_iop_request_get_config *config);
  104 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
  105                                 struct hpt_iop_request_set_config *config);
  106 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
  107                                 struct hpt_iop_request_set_config *config);
  108 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
  109                                 struct hpt_iop_request_set_config *config);
  110 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
  111 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
  112 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
  113 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
  114 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
  115 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
  116                         u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
  117 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
  118                                 struct hpt_iop_request_ioctl_command *req,
  119                                 struct hpt_iop_ioctl_param *pParams);
  120 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
  121                                 struct hpt_iop_request_ioctl_command *req,
  122                                 struct hpt_iop_ioctl_param *pParams);
  123 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
  124                                 struct hpt_iop_srb *srb,
  125                                 bus_dma_segment_t *segs, int nsegs);
  126 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
  127                                 struct hpt_iop_srb *srb,
  128                                 bus_dma_segment_t *segs, int nsegs);
  129 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
  130                                 struct hpt_iop_srb *srb,
  131                                 bus_dma_segment_t *segs, int nsegs);
  132 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
  133 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
  134 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
  135 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
  136 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
  137 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
  138 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
  139 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
  140 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
  141 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
  142 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
  143 static int  hptiop_probe(device_t dev);
  144 static int  hptiop_attach(device_t dev);
  145 static int  hptiop_detach(device_t dev);
  146 static int  hptiop_shutdown(device_t dev);
  147 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
  148 static void hptiop_poll(struct cam_sim *sim);
  149 static void hptiop_async(void *callback_arg, u_int32_t code,
  150                                         struct cam_path *path, void *arg);
  151 static void hptiop_pci_intr(void *arg);
  152 static void hptiop_release_resource(struct hpt_iop_hba *hba);
  153 static void hptiop_reset_adapter(void *argv);
  154 static d_open_t hptiop_open;
  155 static d_close_t hptiop_close;
  156 static d_ioctl_t hptiop_ioctl;
  157 
  158 static struct cdevsw hptiop_cdevsw = {
  159         .d_open = hptiop_open,
  160         .d_close = hptiop_close,
  161         .d_ioctl = hptiop_ioctl,
  162         .d_name = driver_name,
  163         .d_version = D_VERSION,
  164 };
  165 
  166 #define hba_from_dev(dev) \
  167         ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
  168 
  169 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
  170                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
  171 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
  172                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
  173 
  174 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
  175                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
  176 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
  177                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
  178 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
  179                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
  180 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
  181                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
  182 
  183 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
  184                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
  185 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
  186                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
  187 
  188 static int hptiop_open(ioctl_dev_t dev, int flags,
  189                                         int devtype, ioctl_thread_t proc)
  190 {
  191         struct hpt_iop_hba *hba = hba_from_dev(dev);
  192 
  193         if (hba==NULL)
  194                 return ENXIO;
  195         if (hba->flag & HPT_IOCTL_FLAG_OPEN)
  196                 return EBUSY;
  197         hba->flag |= HPT_IOCTL_FLAG_OPEN;
  198         return 0;
  199 }
  200 
  201 static int hptiop_close(ioctl_dev_t dev, int flags,
  202                                         int devtype, ioctl_thread_t proc)
  203 {
  204         struct hpt_iop_hba *hba = hba_from_dev(dev);
  205         hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
  206         return 0;
  207 }
  208 
  209 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
  210                                         int flags, ioctl_thread_t proc)
  211 {
  212         int ret = EFAULT;
  213         struct hpt_iop_hba *hba = hba_from_dev(dev);
  214 
  215         mtx_lock(&Giant);
  216 
  217         switch (cmd) {
  218         case HPT_DO_IOCONTROL:
  219                 ret = hba->ops->do_ioctl(hba,
  220                                 (struct hpt_iop_ioctl_param *)data);
  221                 break;
  222         case HPT_SCAN_BUS:
  223                 ret = hptiop_rescan_bus(hba);
  224                 break;
  225         }
  226 
  227         mtx_unlock(&Giant);
  228 
  229         return ret;
  230 }
  231 
  232 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
  233 {
  234         u_int64_t p;
  235         u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
  236         u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
  237 
  238         if (outbound_tail != outbound_head) {
  239                 bus_space_read_region_4(hba->bar2t, hba->bar2h,
  240                         offsetof(struct hpt_iopmu_mv,
  241                                 outbound_q[outbound_tail]),
  242                         (u_int32_t *)&p, 2);
  243 
  244                 outbound_tail++;
  245 
  246                 if (outbound_tail == MVIOP_QUEUE_LEN)
  247                         outbound_tail = 0;
  248 
  249                 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
  250                 return p;
  251         } else
  252                 return 0;
  253 }
  254 
  255 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
  256 {
  257         u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
  258         u_int32_t head = inbound_head + 1;
  259 
  260         if (head == MVIOP_QUEUE_LEN)
  261                 head = 0;
  262 
  263         bus_space_write_region_4(hba->bar2t, hba->bar2h,
  264                         offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
  265                         (u_int32_t *)&p, 2);
  266         BUS_SPACE_WRT4_MV2(inbound_head, head);
  267         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
  268 }
  269 
  270 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
  271 {
  272         BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
  273         BUS_SPACE_RD4_ITL(outbound_intstatus);
  274 }
  275 
  276 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
  277 {
  278 
  279         BUS_SPACE_WRT4_MV2(inbound_msg, msg);
  280         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
  281 
  282         BUS_SPACE_RD4_MV0(outbound_intmask);
  283 }
  284 
  285 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
  286 {
  287         BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
  288         BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
  289 }
  290 
  291 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
  292 {
  293         u_int32_t req=0;
  294         int i;
  295 
  296         for (i = 0; i < millisec; i++) {
  297                 req = BUS_SPACE_RD4_ITL(inbound_queue);
  298                 if (req != IOPMU_QUEUE_EMPTY)
  299                         break;
  300                 DELAY(1000);
  301         }
  302 
  303         if (req!=IOPMU_QUEUE_EMPTY) {
  304                 BUS_SPACE_WRT4_ITL(outbound_queue, req);
  305                 BUS_SPACE_RD4_ITL(outbound_intstatus);
  306                 return 0;
  307         }
  308 
  309         return -1;
  310 }
  311 
  312 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
  313 {
  314         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
  315                 return -1;
  316 
  317         return 0;
  318 }
  319 
  320 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
  321                                                         u_int32_t millisec)
  322 {
  323         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
  324                 return -1;
  325 
  326         return 0;
  327 }
  328 
  329 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
  330                                                         u_int32_t index)
  331 {
  332         struct hpt_iop_srb *srb;
  333         struct hpt_iop_request_scsi_command *req=NULL;
  334         union ccb *ccb;
  335         u_int8_t *cdb;
  336         u_int32_t result, temp, dxfer;
  337         u_int64_t temp64;
  338 
  339         if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
  340                 if (hba->firmware_version > 0x01020000 ||
  341                         hba->interface_version > 0x01020000) {
  342                         srb = hba->srb[index & ~(u_int32_t)
  343                                 (IOPMU_QUEUE_ADDR_HOST_BIT
  344                                 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
  345                         req = (struct hpt_iop_request_scsi_command *)srb;
  346                         if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
  347                                 result = IOP_RESULT_SUCCESS;
  348                         else
  349                                 result = req->header.result;
  350                 } else {
  351                         srb = hba->srb[index &
  352                                 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
  353                         req = (struct hpt_iop_request_scsi_command *)srb;
  354                         result = req->header.result;
  355                 }
  356                 dxfer = req->dataxfer_length;
  357                 goto srb_complete;
  358         }
  359 
  360         /*iop req*/
  361         temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
  362                 offsetof(struct hpt_iop_request_header, type));
  363         result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
  364                 offsetof(struct hpt_iop_request_header, result));
  365         switch(temp) {
  366         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
  367         {
  368                 temp64 = 0;
  369                 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
  370                         offsetof(struct hpt_iop_request_header, context),
  371                         (u_int32_t *)&temp64, 2);
  372                 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
  373                 break;
  374         }
  375 
  376         case IOP_REQUEST_TYPE_SCSI_COMMAND:
  377                 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
  378                         offsetof(struct hpt_iop_request_header, context),
  379                         (u_int32_t *)&temp64, 2);
  380                 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
  381                 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, 
  382                                 index + offsetof(struct hpt_iop_request_scsi_command,
  383                                 dataxfer_length));      
  384 srb_complete:
  385                 ccb = (union ccb *)srb->ccb;
  386                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
  387                         cdb = ccb->csio.cdb_io.cdb_ptr;
  388                 else
  389                         cdb = ccb->csio.cdb_io.cdb_bytes;
  390 
  391                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
  392                         ccb->ccb_h.status = CAM_REQ_CMP;
  393                         goto scsi_done;
  394                 }
  395 
  396                 switch (result) {
  397                 case IOP_RESULT_SUCCESS:
  398                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
  399                         case CAM_DIR_IN:
  400                                 bus_dmamap_sync(hba->io_dmat,
  401                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
  402                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  403                                 break;
  404                         case CAM_DIR_OUT:
  405                                 bus_dmamap_sync(hba->io_dmat,
  406                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
  407                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  408                                 break;
  409                         }
  410 
  411                         ccb->ccb_h.status = CAM_REQ_CMP;
  412                         break;
  413 
  414                 case IOP_RESULT_BAD_TARGET:
  415                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
  416                         break;
  417                 case IOP_RESULT_BUSY:
  418                         ccb->ccb_h.status = CAM_BUSY;
  419                         break;
  420                 case IOP_RESULT_INVALID_REQUEST:
  421                         ccb->ccb_h.status = CAM_REQ_INVALID;
  422                         break;
  423                 case IOP_RESULT_FAIL:
  424                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  425                         break;
  426                 case IOP_RESULT_RESET:
  427                         ccb->ccb_h.status = CAM_BUSY;
  428                         break;
  429                 case IOP_RESULT_CHECK_CONDITION:
  430                         memset(&ccb->csio.sense_data, 0,
  431                             sizeof(ccb->csio.sense_data));
  432                         if (dxfer < ccb->csio.sense_len)
  433                                 ccb->csio.sense_resid = ccb->csio.sense_len -
  434                                     dxfer;
  435                         else
  436                                 ccb->csio.sense_resid = 0;
  437                         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
  438                                 bus_space_read_region_1(hba->bar0t, hba->bar0h,
  439                                         index + offsetof(struct hpt_iop_request_scsi_command,
  440                                         sg_list), (u_int8_t *)&ccb->csio.sense_data, 
  441                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
  442                         } else {
  443                                 memcpy(&ccb->csio.sense_data, &req->sg_list, 
  444                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
  445                         }
  446                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  447                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
  448                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
  449                         break;
  450                 default:
  451                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  452                         break;
  453                 }
  454 scsi_done:
  455                 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
  456                         BUS_SPACE_WRT4_ITL(outbound_queue, index);
  457 
  458                 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
  459 
  460                 hptiop_free_srb(hba, srb);
  461                 xpt_done(ccb);
  462                 break;
  463         }
  464 }
  465 
  466 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
  467 {
  468         u_int32_t req, temp;
  469 
  470         while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
  471                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
  472                         hptiop_request_callback_itl(hba, req);
  473                 else {
  474                         struct hpt_iop_request_header *p;
  475 
  476                         p = (struct hpt_iop_request_header *)
  477                                 ((char *)hba->u.itl.mu + req);
  478                         temp = bus_space_read_4(hba->bar0t,
  479                                         hba->bar0h,req +
  480                                         offsetof(struct hpt_iop_request_header,
  481                                                 flags));
  482                         if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
  483                                 u_int64_t temp64;
  484                                 bus_space_read_region_4(hba->bar0t,
  485                                         hba->bar0h,req +
  486                                         offsetof(struct hpt_iop_request_header,
  487                                                 context),
  488                                         (u_int32_t *)&temp64, 2);
  489                                 if (temp64) {
  490                                         hptiop_request_callback_itl(hba, req);
  491                                 } else {
  492                                         temp64 = 1;
  493                                         bus_space_write_region_4(hba->bar0t,
  494                                                 hba->bar0h,req +
  495                                                 offsetof(struct hpt_iop_request_header,
  496                                                         context),
  497                                                 (u_int32_t *)&temp64, 2);
  498                                 }
  499                         } else
  500                                 hptiop_request_callback_itl(hba, req);
  501                 }
  502         }
  503 }
  504 
  505 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
  506 {
  507         u_int32_t status;
  508         int ret = 0;
  509 
  510         status = BUS_SPACE_RD4_ITL(outbound_intstatus);
  511 
  512         if (status & IOPMU_OUTBOUND_INT_MSG0) {
  513                 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
  514                 KdPrint(("hptiop: received outbound msg %x\n", msg));
  515                 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
  516                 hptiop_os_message_callback(hba, msg);
  517                 ret = 1;
  518         }
  519 
  520         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
  521                 hptiop_drain_outbound_queue_itl(hba);
  522                 ret = 1;
  523         }
  524 
  525         return ret;
  526 }
  527 
  528 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
  529                                                         u_int64_t _tag)
  530 {
  531         u_int32_t context = (u_int32_t)_tag;
  532 
  533         if (context & MVIOP_CMD_TYPE_SCSI) {
  534                 struct hpt_iop_srb *srb;
  535                 struct hpt_iop_request_scsi_command *req;
  536                 union ccb *ccb;
  537                 u_int8_t *cdb;
  538 
  539                 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
  540                 req = (struct hpt_iop_request_scsi_command *)srb;
  541                 ccb = (union ccb *)srb->ccb;
  542                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
  543                         cdb = ccb->csio.cdb_io.cdb_ptr;
  544                 else
  545                         cdb = ccb->csio.cdb_io.cdb_bytes;
  546 
  547                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
  548                         ccb->ccb_h.status = CAM_REQ_CMP;
  549                         goto scsi_done;
  550                 }
  551                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
  552                         req->header.result = IOP_RESULT_SUCCESS;
  553 
  554                 switch (req->header.result) {
  555                 case IOP_RESULT_SUCCESS:
  556                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
  557                         case CAM_DIR_IN:
  558                                 bus_dmamap_sync(hba->io_dmat,
  559                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
  560                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  561                                 break;
  562                         case CAM_DIR_OUT:
  563                                 bus_dmamap_sync(hba->io_dmat,
  564                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
  565                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  566                                 break;
  567                         }
  568                         ccb->ccb_h.status = CAM_REQ_CMP;
  569                         break;
  570                 case IOP_RESULT_BAD_TARGET:
  571                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
  572                         break;
  573                 case IOP_RESULT_BUSY:
  574                         ccb->ccb_h.status = CAM_BUSY;
  575                         break;
  576                 case IOP_RESULT_INVALID_REQUEST:
  577                         ccb->ccb_h.status = CAM_REQ_INVALID;
  578                         break;
  579                 case IOP_RESULT_FAIL:
  580                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  581                         break;
  582                 case IOP_RESULT_RESET:
  583                         ccb->ccb_h.status = CAM_BUSY;
  584                         break;
  585                 case IOP_RESULT_CHECK_CONDITION:
  586                         memset(&ccb->csio.sense_data, 0,
  587                             sizeof(ccb->csio.sense_data));
  588                         if (req->dataxfer_length < ccb->csio.sense_len)
  589                                 ccb->csio.sense_resid = ccb->csio.sense_len -
  590                                     req->dataxfer_length;
  591                         else
  592                                 ccb->csio.sense_resid = 0;
  593                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
  594                                 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
  595                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  596                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
  597                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
  598                         break;
  599                 default:
  600                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  601                         break;
  602                 }
  603 scsi_done:
  604                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
  605                 
  606                 hptiop_free_srb(hba, srb);
  607                 xpt_done(ccb);
  608         } else if (context & MVIOP_CMD_TYPE_IOCTL) {
  609                 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
  610                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
  611                         hba->config_done = 1;
  612                 else
  613                         hba->config_done = -1;
  614                 wakeup(req);
  615         } else if (context &
  616                         (MVIOP_CMD_TYPE_SET_CONFIG |
  617                                 MVIOP_CMD_TYPE_GET_CONFIG))
  618                 hba->config_done = 1;
  619         else {
  620                 device_printf(hba->pcidev, "wrong callback type\n");
  621         }
  622 }
  623 
  624 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
  625                                 u_int32_t _tag)
  626 {
  627         u_int32_t req_type = _tag & 0xf;
  628 
  629         struct hpt_iop_srb *srb;
  630         struct hpt_iop_request_scsi_command *req;
  631         union ccb *ccb;
  632         u_int8_t *cdb;
  633 
  634         switch (req_type) {
  635         case IOP_REQUEST_TYPE_GET_CONFIG:
  636         case IOP_REQUEST_TYPE_SET_CONFIG:
  637                 hba->config_done = 1;
  638                 break;
  639 
  640         case IOP_REQUEST_TYPE_SCSI_COMMAND:
  641                 srb = hba->srb[(_tag >> 4) & 0xff];
  642                 req = (struct hpt_iop_request_scsi_command *)srb;
  643 
  644                 ccb = (union ccb *)srb->ccb;
  645 
  646                 callout_stop(&srb->timeout);
  647 
  648                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
  649                         cdb = ccb->csio.cdb_io.cdb_ptr;
  650                 else
  651                         cdb = ccb->csio.cdb_io.cdb_bytes;
  652 
  653                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
  654                         ccb->ccb_h.status = CAM_REQ_CMP;
  655                         goto scsi_done;
  656                 }
  657 
  658                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
  659                         req->header.result = IOP_RESULT_SUCCESS;
  660 
  661                 switch (req->header.result) {
  662                 case IOP_RESULT_SUCCESS:
  663                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
  664                         case CAM_DIR_IN:
  665                                 bus_dmamap_sync(hba->io_dmat,
  666                                                 srb->dma_map, BUS_DMASYNC_POSTREAD);
  667                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  668                                 break;
  669                         case CAM_DIR_OUT:
  670                                 bus_dmamap_sync(hba->io_dmat,
  671                                                 srb->dma_map, BUS_DMASYNC_POSTWRITE);
  672                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  673                                 break;
  674                         }
  675                         ccb->ccb_h.status = CAM_REQ_CMP;
  676                         break;
  677                 case IOP_RESULT_BAD_TARGET:
  678                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
  679                         break;
  680                 case IOP_RESULT_BUSY:
  681                         ccb->ccb_h.status = CAM_BUSY;
  682                         break;
  683                 case IOP_RESULT_INVALID_REQUEST:
  684                         ccb->ccb_h.status = CAM_REQ_INVALID;
  685                         break;
  686                 case IOP_RESULT_FAIL:
  687                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  688                         break;
  689                 case IOP_RESULT_RESET:
  690                         ccb->ccb_h.status = CAM_BUSY;
  691                         break;
  692                 case IOP_RESULT_CHECK_CONDITION:
  693                         memset(&ccb->csio.sense_data, 0,
  694                                sizeof(ccb->csio.sense_data));
  695                         if (req->dataxfer_length < ccb->csio.sense_len)
  696                                 ccb->csio.sense_resid = ccb->csio.sense_len -
  697                                 req->dataxfer_length;
  698                         else
  699                                 ccb->csio.sense_resid = 0;
  700                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
  701                                MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
  702                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  703                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
  704                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
  705                         break;
  706                 default:
  707                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  708                         break;
  709                 }
  710 scsi_done:
  711                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
  712                 
  713                 hptiop_free_srb(hba, srb);
  714                 xpt_done(ccb);
  715                 break;
  716         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
  717                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
  718                         hba->config_done = 1;
  719                 else
  720                         hba->config_done = -1;
  721                 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
  722                 break;
  723         default:
  724                 device_printf(hba->pcidev, "wrong callback type\n");
  725                 break;
  726         }
  727 }
  728 
  729 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
  730 {
  731         u_int64_t req;
  732 
  733         while ((req = hptiop_mv_outbound_read(hba))) {
  734                 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
  735                         if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
  736                                 hptiop_request_callback_mv(hba, req);
  737                         }
  738                 }
  739         }
  740 }
  741 
  742 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
  743 {
  744         u_int32_t status;
  745         int ret = 0;
  746 
  747         status = BUS_SPACE_RD4_MV0(outbound_doorbell);
  748 
  749         if (status)
  750                 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
  751 
  752         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
  753                 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
  754                 KdPrint(("hptiop: received outbound msg %x\n", msg));
  755                 hptiop_os_message_callback(hba, msg);
  756                 ret = 1;
  757         }
  758 
  759         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
  760                 hptiop_drain_outbound_queue_mv(hba);
  761                 ret = 1;
  762         }
  763 
  764         return ret;
  765 }
  766 
  767 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
  768 {
  769         u_int32_t status, _tag, cptr;
  770         int ret = 0;
  771 
  772         if (hba->initialized) {
  773                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
  774         }
  775 
  776         status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
  777         if (status) {
  778                 BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
  779                 if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
  780                         u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
  781                         hptiop_os_message_callback(hba, msg);
  782                 }
  783                 ret = 1;
  784         }
  785 
  786         status = BUS_SPACE_RD4_MVFREY2(isr_cause);
  787         if (status) {
  788                 BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
  789                 do {
  790                         cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
  791                         while (hba->u.mvfrey.outlist_rptr != cptr) {
  792                                 hba->u.mvfrey.outlist_rptr++;
  793                                 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
  794                                         hba->u.mvfrey.outlist_rptr = 0;
  795                                 }
  796         
  797                                 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
  798                                 hptiop_request_callback_mvfrey(hba, _tag);
  799                                 ret = 2;
  800                         }
  801                 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
  802         }
  803 
  804         if (hba->initialized) {
  805                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
  806         }
  807 
  808         return ret;
  809 }
  810 
  811 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
  812                                         u_int32_t req32, u_int32_t millisec)
  813 {
  814         u_int32_t i;
  815         u_int64_t temp64;
  816 
  817         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
  818         BUS_SPACE_RD4_ITL(outbound_intstatus);
  819 
  820         for (i = 0; i < millisec; i++) {
  821                 hptiop_intr_itl(hba);
  822                 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
  823                         offsetof(struct hpt_iop_request_header, context),
  824                         (u_int32_t *)&temp64, 2);
  825                 if (temp64)
  826                         return 0;
  827                 DELAY(1000);
  828         }
  829 
  830         return -1;
  831 }
  832 
  833 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
  834                                         void *req, u_int32_t millisec)
  835 {
  836         u_int32_t i;
  837         u_int64_t phy_addr;
  838         hba->config_done = 0;
  839 
  840         phy_addr = hba->ctlcfgcmd_phy |
  841                         (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
  842         ((struct hpt_iop_request_get_config *)req)->header.flags |=
  843                 IOP_REQUEST_FLAG_SYNC_REQUEST |
  844                 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
  845         hptiop_mv_inbound_write(phy_addr, hba);
  846         BUS_SPACE_RD4_MV0(outbound_intmask);
  847 
  848         for (i = 0; i < millisec; i++) {
  849                 hptiop_intr_mv(hba);
  850                 if (hba->config_done)
  851                         return 0;
  852                 DELAY(1000);
  853         }
  854         return -1;
  855 }
  856 
  857 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
  858                                         void *req, u_int32_t millisec)
  859 {
  860         u_int32_t i, index;
  861         u_int64_t phy_addr;
  862         struct hpt_iop_request_header *reqhdr =
  863                                                                                 (struct hpt_iop_request_header *)req;
  864         
  865         hba->config_done = 0;
  866 
  867         phy_addr = hba->ctlcfgcmd_phy;
  868         reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
  869                                         | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
  870                                         | IOP_REQUEST_FLAG_ADDR_BITS
  871                                         | ((phy_addr >> 16) & 0xffff0000);
  872         reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
  873                                         | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
  874 
  875         hba->u.mvfrey.inlist_wptr++;
  876         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
  877 
  878         if (index == hba->u.mvfrey.list_count) {
  879                 index = 0;
  880                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
  881                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
  882         }
  883 
  884         hba->u.mvfrey.inlist[index].addr = phy_addr;
  885         hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
  886 
  887         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
  888         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
  889 
  890         for (i = 0; i < millisec; i++) {
  891                 hptiop_intr_mvfrey(hba);
  892                 if (hba->config_done)
  893                         return 0;
  894                 DELAY(1000);
  895         }
  896         return -1;
  897 }
  898 
  899 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
  900                                         u_int32_t msg, u_int32_t millisec)
  901 {
  902         u_int32_t i;
  903 
  904         hba->msg_done = 0;
  905         hba->ops->post_msg(hba, msg);
  906 
  907         for (i=0; i<millisec; i++) {
  908                 hba->ops->iop_intr(hba);
  909                 if (hba->msg_done)
  910                         break;
  911                 DELAY(1000);
  912         }
  913 
  914         return hba->msg_done? 0 : -1;
  915 }
  916 
  917 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
  918                                 struct hpt_iop_request_get_config * config)
  919 {
  920         u_int32_t req32;
  921 
  922         config->header.size = sizeof(struct hpt_iop_request_get_config);
  923         config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
  924         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
  925         config->header.result = IOP_RESULT_PENDING;
  926         config->header.context = 0;
  927 
  928         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
  929         if (req32 == IOPMU_QUEUE_EMPTY)
  930                 return -1;
  931 
  932         bus_space_write_region_4(hba->bar0t, hba->bar0h,
  933                         req32, (u_int32_t *)config,
  934                         sizeof(struct hpt_iop_request_header) >> 2);
  935 
  936         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
  937                 KdPrint(("hptiop: get config send cmd failed"));
  938                 return -1;
  939         }
  940 
  941         bus_space_read_region_4(hba->bar0t, hba->bar0h,
  942                         req32, (u_int32_t *)config,
  943                         sizeof(struct hpt_iop_request_get_config) >> 2);
  944 
  945         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
  946 
  947         return 0;
  948 }
  949 
  950 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
  951                                 struct hpt_iop_request_get_config * config)
  952 {
  953         struct hpt_iop_request_get_config *req;
  954 
  955         if (!(req = hba->ctlcfg_ptr))
  956                 return -1;
  957 
  958         req->header.flags = 0;
  959         req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
  960         req->header.size = sizeof(struct hpt_iop_request_get_config);
  961         req->header.result = IOP_RESULT_PENDING;
  962         req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
  963 
  964         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
  965                 KdPrint(("hptiop: get config send cmd failed"));
  966                 return -1;
  967         }
  968 
  969         *config = *req;
  970         return 0;
  971 }
  972 
  973 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
  974                                 struct hpt_iop_request_get_config * config)
  975 {
  976         struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
  977 
  978         if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
  979             info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
  980                 KdPrint(("hptiop: header size %x/%x type %x/%x",
  981                          info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
  982                          info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
  983                 return -1;
  984         }
  985 
  986         config->interface_version = info->interface_version;
  987         config->firmware_version = info->firmware_version;
  988         config->max_requests = info->max_requests;
  989         config->request_size = info->request_size;
  990         config->max_sg_count = info->max_sg_count;
  991         config->data_transfer_length = info->data_transfer_length;
  992         config->alignment_mask = info->alignment_mask;
  993         config->max_devices = info->max_devices;
  994         config->sdram_size = info->sdram_size;
  995 
  996         KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
  997                  config->max_requests, config->request_size,
  998                  config->data_transfer_length, config->max_devices,
  999                  config->sdram_size));
 1000 
 1001         return 0;
 1002 }
 1003 
 1004 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
 1005                                 struct hpt_iop_request_set_config *config)
 1006 {
 1007         u_int32_t req32;
 1008 
 1009         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
 1010 
 1011         if (req32 == IOPMU_QUEUE_EMPTY)
 1012                 return -1;
 1013 
 1014         config->header.size = sizeof(struct hpt_iop_request_set_config);
 1015         config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
 1016         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
 1017         config->header.result = IOP_RESULT_PENDING;
 1018         config->header.context = 0;
 1019 
 1020         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, 
 1021                 (u_int32_t *)config, 
 1022                 sizeof(struct hpt_iop_request_set_config) >> 2);
 1023 
 1024         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
 1025                 KdPrint(("hptiop: set config send cmd failed"));
 1026                 return -1;
 1027         }
 1028 
 1029         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
 1030 
 1031         return 0;
 1032 }
 1033 
 1034 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
 1035                                 struct hpt_iop_request_set_config *config)
 1036 {
 1037         struct hpt_iop_request_set_config *req;
 1038 
 1039         if (!(req = hba->ctlcfg_ptr))
 1040                 return -1;
 1041 
 1042         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
 1043                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
 1044                 sizeof(struct hpt_iop_request_set_config) -
 1045                         sizeof(struct hpt_iop_request_header));
 1046 
 1047         req->header.flags = 0;
 1048         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
 1049         req->header.size = sizeof(struct hpt_iop_request_set_config);
 1050         req->header.result = IOP_RESULT_PENDING;
 1051         req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
 1052 
 1053         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
 1054                 KdPrint(("hptiop: set config send cmd failed"));
 1055                 return -1;
 1056         }
 1057 
 1058         return 0;
 1059 }
 1060 
 1061 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
 1062                                 struct hpt_iop_request_set_config *config)
 1063 {
 1064         struct hpt_iop_request_set_config *req;
 1065 
 1066         if (!(req = hba->ctlcfg_ptr))
 1067                 return -1;
 1068 
 1069         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
 1070                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
 1071                 sizeof(struct hpt_iop_request_set_config) -
 1072                         sizeof(struct hpt_iop_request_header));
 1073 
 1074         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
 1075         req->header.size = sizeof(struct hpt_iop_request_set_config);
 1076         req->header.result = IOP_RESULT_PENDING;
 1077 
 1078         if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
 1079                 KdPrint(("hptiop: set config send cmd failed"));
 1080                 return -1;
 1081         }
 1082 
 1083         return 0;
 1084 }
 1085 
 1086 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
 1087                                 u_int32_t req32,
 1088                                 struct hpt_iop_ioctl_param *pParams)
 1089 {
 1090         u_int64_t temp64;
 1091         struct hpt_iop_request_ioctl_command req;
 1092 
 1093         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
 1094                         (hba->max_request_size -
 1095                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
 1096                 device_printf(hba->pcidev, "request size beyond max value");
 1097                 return -1;
 1098         }
 1099 
 1100         req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
 1101                 + pParams->nInBufferSize;
 1102         req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
 1103         req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
 1104         req.header.result = IOP_RESULT_PENDING;
 1105         req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
 1106         req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
 1107         req.inbuf_size = pParams->nInBufferSize;
 1108         req.outbuf_size = pParams->nOutBufferSize;
 1109         req.bytes_returned = 0;
 1110 
 1111         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, 
 1112                 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
 1113         
 1114         hptiop_lock_adapter(hba);
 1115 
 1116         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
 1117         BUS_SPACE_RD4_ITL(outbound_intstatus);
 1118 
 1119         bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
 1120                 offsetof(struct hpt_iop_request_ioctl_command, header.context),
 1121                 (u_int32_t *)&temp64, 2);
 1122         while (temp64) {
 1123                 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
 1124                                 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
 1125                         break;
 1126                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
 1127                 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
 1128                         offsetof(struct hpt_iop_request_ioctl_command,
 1129                                 header.context),
 1130                         (u_int32_t *)&temp64, 2);
 1131         }
 1132 
 1133         hptiop_unlock_adapter(hba);
 1134         return 0;
 1135 }
 1136 
 1137 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
 1138                                                                         void *user, int size)
 1139 {
 1140         unsigned char byte;
 1141         int i;
 1142 
 1143         for (i=0; i<size; i++) {
 1144                 if (copyin((u_int8_t *)user + i, &byte, 1))
 1145                         return -1;
 1146                 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
 1147         }
 1148 
 1149         return 0;
 1150 }
 1151 
 1152 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
 1153                                                                         void *user, int size)
 1154 {
 1155         unsigned char byte;
 1156         int i;
 1157 
 1158         for (i=0; i<size; i++) {
 1159                 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
 1160                 if (copyout(&byte, (u_int8_t *)user + i, 1))
 1161                         return -1;
 1162         }
 1163 
 1164         return 0;
 1165 }
 1166 
 1167 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
 1168                                 struct hpt_iop_ioctl_param * pParams)
 1169 {
 1170         u_int32_t req32;
 1171         u_int32_t result;
 1172 
 1173         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
 1174                 (pParams->Magic != HPT_IOCTL_MAGIC32))
 1175                 return EFAULT;
 1176         
 1177         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
 1178         if (req32 == IOPMU_QUEUE_EMPTY)
 1179                 return EFAULT;
 1180 
 1181         if (pParams->nInBufferSize)
 1182                 if (hptiop_bus_space_copyin(hba, req32 +
 1183                         offsetof(struct hpt_iop_request_ioctl_command, buf),
 1184                         (void *)pParams->lpInBuffer, pParams->nInBufferSize))
 1185                         goto invalid;
 1186 
 1187         if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
 1188                 goto invalid;
 1189 
 1190         result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
 1191                         offsetof(struct hpt_iop_request_ioctl_command,
 1192                                 header.result));
 1193 
 1194         if (result == IOP_RESULT_SUCCESS) {
 1195                 if (pParams->nOutBufferSize)
 1196                         if (hptiop_bus_space_copyout(hba, req32 +
 1197                                 offsetof(struct hpt_iop_request_ioctl_command, buf) + 
 1198                                         ((pParams->nInBufferSize + 3) & ~3),
 1199                                 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
 1200                                 goto invalid;
 1201 
 1202                 if (pParams->lpBytesReturned) {
 1203                         if (hptiop_bus_space_copyout(hba, req32 + 
 1204                                 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
 1205                                 (void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
 1206                                 goto invalid;
 1207                 }
 1208 
 1209                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
 1210 
 1211                 return 0;
 1212         } else{
 1213 invalid:
 1214                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
 1215 
 1216                 return EFAULT;
 1217         }
 1218 }
 1219 
 1220 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
 1221                                 struct hpt_iop_request_ioctl_command *req,
 1222                                 struct hpt_iop_ioctl_param *pParams)
 1223 {
 1224         u_int64_t req_phy;
 1225         int size = 0;
 1226 
 1227         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
 1228                         (hba->max_request_size -
 1229                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
 1230                 device_printf(hba->pcidev, "request size beyond max value");
 1231                 return -1;
 1232         }
 1233 
 1234         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
 1235         req->inbuf_size = pParams->nInBufferSize;
 1236         req->outbuf_size = pParams->nOutBufferSize;
 1237         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
 1238                                         + pParams->nInBufferSize;
 1239         req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
 1240         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
 1241         req->header.result = IOP_RESULT_PENDING;
 1242         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
 1243         size = req->header.size >> 8;
 1244         size = imin(3, size);
 1245         req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
 1246         hptiop_mv_inbound_write(req_phy, hba);
 1247 
 1248         BUS_SPACE_RD4_MV0(outbound_intmask);
 1249 
 1250         while (hba->config_done == 0) {
 1251                 if (hptiop_sleep(hba, req, PPAUSE,
 1252                         "hptctl", HPT_OSM_TIMEOUT)==0)
 1253                         continue;
 1254                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
 1255         }
 1256         return 0;
 1257 }
 1258 
 1259 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
 1260                                 struct hpt_iop_ioctl_param *pParams)
 1261 {
 1262         struct hpt_iop_request_ioctl_command *req;
 1263 
 1264         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
 1265                 (pParams->Magic != HPT_IOCTL_MAGIC32))
 1266                 return EFAULT;
 1267 
 1268         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
 1269         hba->config_done = 0;
 1270         hptiop_lock_adapter(hba);
 1271         if (pParams->nInBufferSize)
 1272                 if (copyin((void *)pParams->lpInBuffer,
 1273                                 req->buf, pParams->nInBufferSize))
 1274                         goto invalid;
 1275         if (hptiop_post_ioctl_command_mv(hba, req, pParams))
 1276                 goto invalid;
 1277 
 1278         if (hba->config_done == 1) {
 1279                 if (pParams->nOutBufferSize)
 1280                         if (copyout(req->buf +
 1281                                 ((pParams->nInBufferSize + 3) & ~3),
 1282                                 (void *)pParams->lpOutBuffer,
 1283                                 pParams->nOutBufferSize))
 1284                                 goto invalid;
 1285 
 1286                 if (pParams->lpBytesReturned)
 1287                         if (copyout(&req->bytes_returned,
 1288                                 (void*)pParams->lpBytesReturned,
 1289                                 sizeof(u_int32_t)))
 1290                                 goto invalid;
 1291                 hptiop_unlock_adapter(hba);
 1292                 return 0;
 1293         } else{
 1294 invalid:
 1295                 hptiop_unlock_adapter(hba);
 1296                 return EFAULT;
 1297         }
 1298 }
 1299 
 1300 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
 1301                                 struct hpt_iop_request_ioctl_command *req,
 1302                                 struct hpt_iop_ioctl_param *pParams)
 1303 {
 1304         u_int64_t phy_addr;
 1305         u_int32_t index;
 1306 
 1307         phy_addr = hba->ctlcfgcmd_phy;
 1308 
 1309         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
 1310                         (hba->max_request_size -
 1311                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
 1312                 device_printf(hba->pcidev, "request size beyond max value");
 1313                 return -1;
 1314         }
 1315 
 1316         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
 1317         req->inbuf_size = pParams->nInBufferSize;
 1318         req->outbuf_size = pParams->nOutBufferSize;
 1319         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
 1320                                         + pParams->nInBufferSize;
 1321 
 1322         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
 1323         req->header.result = IOP_RESULT_PENDING;
 1324 
 1325         req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
 1326                                                 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
 1327                                                 | IOP_REQUEST_FLAG_ADDR_BITS
 1328                                                 | ((phy_addr >> 16) & 0xffff0000);
 1329         req->header.context = ((phy_addr & 0xffffffff) << 32 )
 1330                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
 1331 
 1332         hba->u.mvfrey.inlist_wptr++;
 1333         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
 1334 
 1335         if (index == hba->u.mvfrey.list_count) {
 1336                 index = 0;
 1337                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
 1338                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
 1339         }
 1340 
 1341         hba->u.mvfrey.inlist[index].addr = phy_addr;
 1342         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
 1343 
 1344         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
 1345         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
 1346 
 1347         while (hba->config_done == 0) {
 1348                 if (hptiop_sleep(hba, req, PPAUSE,
 1349                         "hptctl", HPT_OSM_TIMEOUT)==0)
 1350                         continue;
 1351                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
 1352         }
 1353         return 0;
 1354 }
 1355 
 1356 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
 1357                                 struct hpt_iop_ioctl_param *pParams)
 1358 {
 1359         struct hpt_iop_request_ioctl_command *req;
 1360 
 1361         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
 1362                 (pParams->Magic != HPT_IOCTL_MAGIC32))
 1363                 return EFAULT;
 1364 
 1365         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
 1366         hba->config_done = 0;
 1367         hptiop_lock_adapter(hba);
 1368         if (pParams->nInBufferSize)
 1369                 if (copyin((void *)pParams->lpInBuffer,
 1370                                 req->buf, pParams->nInBufferSize))
 1371                         goto invalid;
 1372         if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
 1373                 goto invalid;
 1374 
 1375         if (hba->config_done == 1) {
 1376                 if (pParams->nOutBufferSize)
 1377                         if (copyout(req->buf +
 1378                                 ((pParams->nInBufferSize + 3) & ~3),
 1379                                 (void *)pParams->lpOutBuffer,
 1380                                 pParams->nOutBufferSize))
 1381                                 goto invalid;
 1382 
 1383                 if (pParams->lpBytesReturned)
 1384                         if (copyout(&req->bytes_returned,
 1385                                 (void*)pParams->lpBytesReturned,
 1386                                 sizeof(u_int32_t)))
 1387                                 goto invalid;
 1388                 hptiop_unlock_adapter(hba);
 1389                 return 0;
 1390         } else{
 1391 invalid:
 1392                 hptiop_unlock_adapter(hba);
 1393                 return EFAULT;
 1394         }
 1395 }
 1396 
 1397 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
 1398 {
 1399         union ccb           *ccb;
 1400 
 1401         if ((ccb = xpt_alloc_ccb()) == NULL)
 1402                 return(ENOMEM);
 1403         if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
 1404                 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
 1405                 xpt_free_ccb(ccb);
 1406                 return(EIO);
 1407         }
 1408         xpt_rescan(ccb);
 1409         return(0);
 1410 }
 1411 
 1412 static  bus_dmamap_callback_t   hptiop_map_srb;
 1413 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
 1414 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
 1415 static  bus_dmamap_callback_t   hptiop_mvfrey_map_ctlcfg;
 1416 
 1417 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
 1418 {
 1419         hba->bar0_rid = 0x10;
 1420         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
 1421                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
 1422 
 1423         if (hba->bar0_res == NULL) {
 1424                 device_printf(hba->pcidev,
 1425                         "failed to get iop base adrress.\n");
 1426                 return -1;
 1427         }
 1428         hba->bar0t = rman_get_bustag(hba->bar0_res);
 1429         hba->bar0h = rman_get_bushandle(hba->bar0_res);
 1430         hba->u.itl.mu = (struct hpt_iopmu_itl *)
 1431                                 rman_get_virtual(hba->bar0_res);
 1432 
 1433         if (!hba->u.itl.mu) {
 1434                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1435                                         hba->bar0_rid, hba->bar0_res);
 1436                 device_printf(hba->pcidev, "alloc mem res failed\n");
 1437                 return -1;
 1438         }
 1439 
 1440         return 0;
 1441 }
 1442 
 1443 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
 1444 {
 1445         hba->bar0_rid = 0x10;
 1446         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
 1447                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
 1448 
 1449         if (hba->bar0_res == NULL) {
 1450                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
 1451                 return -1;
 1452         }
 1453         hba->bar0t = rman_get_bustag(hba->bar0_res);
 1454         hba->bar0h = rman_get_bushandle(hba->bar0_res);
 1455         hba->u.mv.regs = (struct hpt_iopmv_regs *)
 1456                                 rman_get_virtual(hba->bar0_res);
 1457 
 1458         if (!hba->u.mv.regs) {
 1459                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1460                                         hba->bar0_rid, hba->bar0_res);
 1461                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
 1462                 return -1;
 1463         }
 1464 
 1465         hba->bar2_rid = 0x18;
 1466         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
 1467                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
 1468 
 1469         if (hba->bar2_res == NULL) {
 1470                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1471                                         hba->bar0_rid, hba->bar0_res);
 1472                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
 1473                 return -1;
 1474         }
 1475 
 1476         hba->bar2t = rman_get_bustag(hba->bar2_res);
 1477         hba->bar2h = rman_get_bushandle(hba->bar2_res);
 1478         hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
 1479 
 1480         if (!hba->u.mv.mu) {
 1481                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1482                                         hba->bar0_rid, hba->bar0_res);
 1483                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1484                                         hba->bar2_rid, hba->bar2_res);
 1485                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
 1486                 return -1;
 1487         }
 1488 
 1489         return 0;
 1490 }
 1491 
 1492 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
 1493 {
 1494         hba->bar0_rid = 0x10;
 1495         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
 1496                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
 1497 
 1498         if (hba->bar0_res == NULL) {
 1499                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
 1500                 return -1;
 1501         }
 1502         hba->bar0t = rman_get_bustag(hba->bar0_res);
 1503         hba->bar0h = rman_get_bushandle(hba->bar0_res);
 1504         hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
 1505                                 rman_get_virtual(hba->bar0_res);
 1506 
 1507         if (!hba->u.mvfrey.config) {
 1508                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1509                                         hba->bar0_rid, hba->bar0_res);
 1510                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
 1511                 return -1;
 1512         }
 1513 
 1514         hba->bar2_rid = 0x18;
 1515         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
 1516                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
 1517 
 1518         if (hba->bar2_res == NULL) {
 1519                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1520                                         hba->bar0_rid, hba->bar0_res);
 1521                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
 1522                 return -1;
 1523         }
 1524 
 1525         hba->bar2t = rman_get_bustag(hba->bar2_res);
 1526         hba->bar2h = rman_get_bushandle(hba->bar2_res);
 1527         hba->u.mvfrey.mu =
 1528                                         (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
 1529 
 1530         if (!hba->u.mvfrey.mu) {
 1531                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1532                                         hba->bar0_rid, hba->bar0_res);
 1533                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1534                                         hba->bar2_rid, hba->bar2_res);
 1535                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
 1536                 return -1;
 1537         }
 1538 
 1539         return 0;
 1540 }
 1541 
 1542 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
 1543 {
 1544         if (hba->bar0_res)
 1545                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1546                         hba->bar0_rid, hba->bar0_res);
 1547 }
 1548 
 1549 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
 1550 {
 1551         if (hba->bar0_res)
 1552                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1553                         hba->bar0_rid, hba->bar0_res);
 1554         if (hba->bar2_res)
 1555                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1556                         hba->bar2_rid, hba->bar2_res);
 1557 }
 1558 
 1559 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
 1560 {
 1561         if (hba->bar0_res)
 1562                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1563                         hba->bar0_rid, hba->bar0_res);
 1564         if (hba->bar2_res)
 1565                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1566                         hba->bar2_rid, hba->bar2_res);
 1567 }
 1568 
 1569 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
 1570 {
 1571         if (bus_dma_tag_create(hba->parent_dmat,
 1572                                 1,
 1573                                 0,
 1574                                 BUS_SPACE_MAXADDR_32BIT,
 1575                                 BUS_SPACE_MAXADDR,
 1576                                 NULL, NULL,
 1577                                 0x800 - 0x8,
 1578                                 1,
 1579                                 BUS_SPACE_MAXSIZE_32BIT,
 1580                                 BUS_DMA_ALLOCNOW,
 1581                                 NULL,
 1582                                 NULL,
 1583                                 &hba->ctlcfg_dmat)) {
 1584                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
 1585                 return -1;
 1586         }
 1587 
 1588         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
 1589                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
 1590                 &hba->ctlcfg_dmamap) != 0) {
 1591                         device_printf(hba->pcidev,
 1592                                         "bus_dmamem_alloc failed!\n");
 1593                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1594                         return -1;
 1595         }
 1596 
 1597         if (bus_dmamap_load(hba->ctlcfg_dmat,
 1598                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
 1599                         MVIOP_IOCTLCFG_SIZE,
 1600                         hptiop_mv_map_ctlcfg, hba, 0)) {
 1601                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
 1602                 if (hba->ctlcfg_dmat) {
 1603                         bus_dmamem_free(hba->ctlcfg_dmat,
 1604                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1605                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1606                 }
 1607                 return -1;
 1608         }
 1609 
 1610         return 0;
 1611 }
 1612 
 1613 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
 1614 {
 1615         u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
 1616 
 1617         list_count >>= 16;
 1618 
 1619         if (list_count == 0) {
 1620                 return -1;
 1621         }
 1622 
 1623         hba->u.mvfrey.list_count = list_count;
 1624         hba->u.mvfrey.internal_mem_size = 0x800
 1625                                                         + list_count * sizeof(struct mvfrey_inlist_entry)
 1626                                                         + list_count * sizeof(struct mvfrey_outlist_entry)
 1627                                                         + sizeof(int);
 1628         if (bus_dma_tag_create(hba->parent_dmat,
 1629                                 1,
 1630                                 0,
 1631                                 BUS_SPACE_MAXADDR_32BIT,
 1632                                 BUS_SPACE_MAXADDR,
 1633                                 NULL, NULL,
 1634                                 hba->u.mvfrey.internal_mem_size,
 1635                                 1,
 1636                                 BUS_SPACE_MAXSIZE_32BIT,
 1637                                 BUS_DMA_ALLOCNOW,
 1638                                 NULL,
 1639                                 NULL,
 1640                                 &hba->ctlcfg_dmat)) {
 1641                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
 1642                 return -1;
 1643         }
 1644 
 1645         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
 1646                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
 1647                 &hba->ctlcfg_dmamap) != 0) {
 1648                         device_printf(hba->pcidev,
 1649                                         "bus_dmamem_alloc failed!\n");
 1650                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1651                         return -1;
 1652         }
 1653 
 1654         if (bus_dmamap_load(hba->ctlcfg_dmat,
 1655                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
 1656                         hba->u.mvfrey.internal_mem_size,
 1657                         hptiop_mvfrey_map_ctlcfg, hba, 0)) {
 1658                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
 1659                 if (hba->ctlcfg_dmat) {
 1660                         bus_dmamem_free(hba->ctlcfg_dmat,
 1661                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1662                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1663                 }
 1664                 return -1;
 1665         }
 1666 
 1667         return 0;
 1668 }
 1669 
 1670 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
 1671         return 0;
 1672 }
 1673 
 1674 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
 1675 {
 1676         if (hba->ctlcfg_dmat) {
 1677                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
 1678                 bus_dmamem_free(hba->ctlcfg_dmat,
 1679                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1680                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1681         }
 1682 
 1683         return 0;
 1684 }
 1685 
 1686 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
 1687 {
 1688         if (hba->ctlcfg_dmat) {
 1689                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
 1690                 bus_dmamem_free(hba->ctlcfg_dmat,
 1691                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1692                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1693         }
 1694 
 1695         return 0;
 1696 }
 1697 
 1698 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
 1699 {
 1700         u_int32_t i = 100;
 1701 
 1702         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
 1703                 return -1;
 1704 
 1705         /* wait 100ms for MCU ready */
 1706         while(i--) {
 1707                 DELAY(1000);
 1708         }
 1709 
 1710         BUS_SPACE_WRT4_MVFREY2(inbound_base,
 1711                                                         hba->u.mvfrey.inlist_phy & 0xffffffff);
 1712         BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
 1713                                                         (hba->u.mvfrey.inlist_phy >> 16) >> 16);
 1714 
 1715         BUS_SPACE_WRT4_MVFREY2(outbound_base,
 1716                                                         hba->u.mvfrey.outlist_phy & 0xffffffff);
 1717         BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
 1718                                                         (hba->u.mvfrey.outlist_phy >> 16) >> 16);
 1719 
 1720         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
 1721                                                         hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
 1722         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
 1723                                                         (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
 1724 
 1725         hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
 1726                                                                 | CL_POINTER_TOGGLE;
 1727         *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
 1728                                                                 | CL_POINTER_TOGGLE;
 1729         hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
 1730         
 1731         return 0;
 1732 }
 1733 
 1734 /*
 1735  * CAM driver interface
 1736  */
 1737 static device_method_t driver_methods[] = {
 1738         /* Device interface */
 1739         DEVMETHOD(device_probe,     hptiop_probe),
 1740         DEVMETHOD(device_attach,    hptiop_attach),
 1741         DEVMETHOD(device_detach,    hptiop_detach),
 1742         DEVMETHOD(device_shutdown,  hptiop_shutdown),
 1743         { 0, 0 }
 1744 };
 1745 
 1746 static struct hptiop_adapter_ops hptiop_itl_ops = {
 1747         .family            = INTEL_BASED_IOP,
 1748         .iop_wait_ready    = hptiop_wait_ready_itl,
 1749         .internal_memalloc = 0,
 1750         .internal_memfree  = hptiop_internal_memfree_itl,
 1751         .alloc_pci_res     = hptiop_alloc_pci_res_itl,
 1752         .release_pci_res   = hptiop_release_pci_res_itl,
 1753         .enable_intr       = hptiop_enable_intr_itl,
 1754         .disable_intr      = hptiop_disable_intr_itl,
 1755         .get_config        = hptiop_get_config_itl,
 1756         .set_config        = hptiop_set_config_itl,
 1757         .iop_intr          = hptiop_intr_itl,
 1758         .post_msg          = hptiop_post_msg_itl,
 1759         .post_req          = hptiop_post_req_itl,
 1760         .do_ioctl          = hptiop_do_ioctl_itl,
 1761         .reset_comm        = 0,
 1762 };
 1763 
 1764 static struct hptiop_adapter_ops hptiop_mv_ops = {
 1765         .family            = MV_BASED_IOP,
 1766         .iop_wait_ready    = hptiop_wait_ready_mv,
 1767         .internal_memalloc = hptiop_internal_memalloc_mv,
 1768         .internal_memfree  = hptiop_internal_memfree_mv,
 1769         .alloc_pci_res     = hptiop_alloc_pci_res_mv,
 1770         .release_pci_res   = hptiop_release_pci_res_mv,
 1771         .enable_intr       = hptiop_enable_intr_mv,
 1772         .disable_intr      = hptiop_disable_intr_mv,
 1773         .get_config        = hptiop_get_config_mv,
 1774         .set_config        = hptiop_set_config_mv,
 1775         .iop_intr          = hptiop_intr_mv,
 1776         .post_msg          = hptiop_post_msg_mv,
 1777         .post_req          = hptiop_post_req_mv,
 1778         .do_ioctl          = hptiop_do_ioctl_mv,
 1779         .reset_comm        = 0,
 1780 };
 1781 
 1782 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
 1783         .family            = MVFREY_BASED_IOP,
 1784         .iop_wait_ready    = hptiop_wait_ready_mvfrey,
 1785         .internal_memalloc = hptiop_internal_memalloc_mvfrey,
 1786         .internal_memfree  = hptiop_internal_memfree_mvfrey,
 1787         .alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
 1788         .release_pci_res   = hptiop_release_pci_res_mvfrey,
 1789         .enable_intr       = hptiop_enable_intr_mvfrey,
 1790         .disable_intr      = hptiop_disable_intr_mvfrey,
 1791         .get_config        = hptiop_get_config_mvfrey,
 1792         .set_config        = hptiop_set_config_mvfrey,
 1793         .iop_intr          = hptiop_intr_mvfrey,
 1794         .post_msg          = hptiop_post_msg_mvfrey,
 1795         .post_req          = hptiop_post_req_mvfrey,
 1796         .do_ioctl          = hptiop_do_ioctl_mvfrey,
 1797         .reset_comm        = hptiop_reset_comm_mvfrey,
 1798 };
 1799 
 1800 static driver_t hptiop_pci_driver = {
 1801         driver_name,
 1802         driver_methods,
 1803         sizeof(struct hpt_iop_hba)
 1804 };
 1805 
 1806 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
 1807 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
 1808 
 1809 static int hptiop_probe(device_t dev)
 1810 {
 1811         struct hpt_iop_hba *hba;
 1812         u_int32_t id;
 1813         static char buf[256];
 1814         int sas = 0;
 1815         struct hptiop_adapter_ops *ops;
 1816 
 1817         if (pci_get_vendor(dev) != 0x1103)
 1818                 return (ENXIO);
 1819 
 1820         id = pci_get_device(dev);
 1821 
 1822         switch (id) {
 1823                 case 0x4520:
 1824                 case 0x4521:
 1825                 case 0x4522:
 1826                         sas = 1;
 1827                 case 0x3620:
 1828                 case 0x3622:
 1829                 case 0x3640:
 1830                         ops = &hptiop_mvfrey_ops;
 1831                         break;
 1832                 case 0x4210:
 1833                 case 0x4211:
 1834                 case 0x4310:
 1835                 case 0x4311:
 1836                 case 0x4320:
 1837                 case 0x4321:
 1838                 case 0x4322:
 1839                         sas = 1;
 1840                 case 0x3220:
 1841                 case 0x3320:
 1842                 case 0x3410:
 1843                 case 0x3520:
 1844                 case 0x3510:
 1845                 case 0x3511:
 1846                 case 0x3521:
 1847                 case 0x3522:
 1848                 case 0x3530:
 1849                 case 0x3540:
 1850                 case 0x3560:
 1851                         ops = &hptiop_itl_ops;
 1852                         break;
 1853                 case 0x3020:
 1854                 case 0x3120:
 1855                 case 0x3122:
 1856                         ops = &hptiop_mv_ops;
 1857                         break;
 1858                 default:
 1859                         return (ENXIO);
 1860         }
 1861 
 1862         device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
 1863                 pci_get_bus(dev), pci_get_slot(dev),
 1864                 pci_get_function(dev), pci_get_irq(dev));
 1865 
 1866         sprintf(buf, "RocketRAID %x %s Controller\n",
 1867                                 id, sas ? "SAS" : "SATA");
 1868         device_set_desc_copy(dev, buf);
 1869 
 1870         hba = (struct hpt_iop_hba *)device_get_softc(dev);
 1871         bzero(hba, sizeof(struct hpt_iop_hba));
 1872         hba->ops = ops;
 1873 
 1874         KdPrint(("hba->ops=%p\n", hba->ops));
 1875         return 0;
 1876 }
 1877 
 1878 static int hptiop_attach(device_t dev)
 1879 {
 1880         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
 1881         struct hpt_iop_request_get_config  iop_config;
 1882         struct hpt_iop_request_set_config  set_config;
 1883         int rid = 0;
 1884         struct cam_devq *devq;
 1885         struct ccb_setasync ccb;
 1886         u_int32_t unit = device_get_unit(dev);
 1887 
 1888         device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
 1889                         unit, driver_version);
 1890 
 1891         KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
 1892                 pci_get_bus(dev), pci_get_slot(dev),
 1893                 pci_get_function(dev), hba->ops));
 1894 
 1895         pci_enable_busmaster(dev);
 1896         hba->pcidev = dev;
 1897         hba->pciunit = unit;
 1898 
 1899         if (hba->ops->alloc_pci_res(hba))
 1900                 return ENXIO;
 1901 
 1902         if (hba->ops->iop_wait_ready(hba, 2000)) {
 1903                 device_printf(dev, "adapter is not ready\n");
 1904                 goto release_pci_res;
 1905         }
 1906 
 1907         mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
 1908 
 1909         if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
 1910                         1,  /* alignment */
 1911                         0, /* boundary */
 1912                         BUS_SPACE_MAXADDR,  /* lowaddr */
 1913                         BUS_SPACE_MAXADDR,  /* highaddr */
 1914                         NULL, NULL,         /* filter, filterarg */
 1915                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 1916                         BUS_SPACE_UNRESTRICTED, /* nsegments */
 1917                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1918                         0,      /* flags */
 1919                         NULL,   /* lockfunc */
 1920                         NULL,       /* lockfuncarg */
 1921                         &hba->parent_dmat   /* tag */))
 1922         {
 1923                 device_printf(dev, "alloc parent_dmat failed\n");
 1924                 goto release_pci_res;
 1925         }
 1926 
 1927         if (hba->ops->family == MV_BASED_IOP) {
 1928                 if (hba->ops->internal_memalloc(hba)) {
 1929                         device_printf(dev, "alloc srb_dmat failed\n");
 1930                         goto destroy_parent_tag;
 1931                 }
 1932         }
 1933         
 1934         if (hba->ops->get_config(hba, &iop_config)) {
 1935                 device_printf(dev, "get iop config failed.\n");
 1936                 goto get_config_failed;
 1937         }
 1938 
 1939         hba->firmware_version = iop_config.firmware_version;
 1940         hba->interface_version = iop_config.interface_version;
 1941         hba->max_requests = iop_config.max_requests;
 1942         hba->max_devices = iop_config.max_devices;
 1943         hba->max_request_size = iop_config.request_size;
 1944         hba->max_sg_count = iop_config.max_sg_count;
 1945 
 1946         if (hba->ops->family == MVFREY_BASED_IOP) {
 1947                 if (hba->ops->internal_memalloc(hba)) {
 1948                         device_printf(dev, "alloc srb_dmat failed\n");
 1949                         goto destroy_parent_tag;
 1950                 }
 1951                 if (hba->ops->reset_comm(hba)) {
 1952                         device_printf(dev, "reset comm failed\n");
 1953                         goto get_config_failed;
 1954                 }
 1955         }
 1956 
 1957         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
 1958                         4,  /* alignment */
 1959                         BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
 1960                         BUS_SPACE_MAXADDR,  /* lowaddr */
 1961                         BUS_SPACE_MAXADDR,  /* highaddr */
 1962                         NULL, NULL,         /* filter, filterarg */
 1963                         PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
 1964                         hba->max_sg_count,  /* nsegments */
 1965                         0x20000,    /* maxsegsize */
 1966                         BUS_DMA_ALLOCNOW,       /* flags */
 1967                         busdma_lock_mutex,  /* lockfunc */
 1968                         &hba->lock,     /* lockfuncarg */
 1969                         &hba->io_dmat   /* tag */))
 1970         {
 1971                 device_printf(dev, "alloc io_dmat failed\n");
 1972                 goto get_config_failed;
 1973         }
 1974 
 1975         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
 1976                         1,  /* alignment */
 1977                         0, /* boundary */
 1978                         BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1979                         BUS_SPACE_MAXADDR,  /* highaddr */
 1980                         NULL, NULL,         /* filter, filterarg */
 1981                         HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
 1982                         1,  /* nsegments */
 1983                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1984                         0,      /* flags */
 1985                         NULL,   /* lockfunc */
 1986                         NULL,       /* lockfuncarg */
 1987                         &hba->srb_dmat  /* tag */))
 1988         {
 1989                 device_printf(dev, "alloc srb_dmat failed\n");
 1990                 goto destroy_io_dmat;
 1991         }
 1992 
 1993         if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
 1994                         BUS_DMA_WAITOK | BUS_DMA_COHERENT,
 1995                         &hba->srb_dmamap) != 0)
 1996         {
 1997                 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
 1998                 goto destroy_srb_dmat;
 1999         }
 2000 
 2001         if (bus_dmamap_load(hba->srb_dmat,
 2002                         hba->srb_dmamap, hba->uncached_ptr,
 2003                         (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
 2004                         hptiop_map_srb, hba, 0))
 2005         {
 2006                 device_printf(dev, "bus_dmamap_load failed!\n");
 2007                 goto srb_dmamem_free;
 2008         }
 2009 
 2010         if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
 2011                 device_printf(dev, "cam_simq_alloc failed\n");
 2012                 goto srb_dmamap_unload;
 2013         }
 2014 
 2015         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
 2016                         hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
 2017         if (!hba->sim) {
 2018                 device_printf(dev, "cam_sim_alloc failed\n");
 2019                 cam_simq_free(devq);
 2020                 goto srb_dmamap_unload;
 2021         }
 2022         hptiop_lock_adapter(hba);
 2023         if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
 2024         {
 2025                 device_printf(dev, "xpt_bus_register failed\n");
 2026                 goto free_cam_sim;
 2027         }
 2028 
 2029         if (xpt_create_path(&hba->path, /*periph */ NULL,
 2030                         cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
 2031                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
 2032                 device_printf(dev, "xpt_create_path failed\n");
 2033                 goto deregister_xpt_bus;
 2034         }
 2035         hptiop_unlock_adapter(hba);
 2036 
 2037         bzero(&set_config, sizeof(set_config));
 2038         set_config.iop_id = unit;
 2039         set_config.vbus_id = cam_sim_path(hba->sim);
 2040         set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
 2041 
 2042         if (hba->ops->set_config(hba, &set_config)) {
 2043                 device_printf(dev, "set iop config failed.\n");
 2044                 goto free_hba_path;
 2045         }
 2046 
 2047         xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
 2048         ccb.ccb_h.func_code = XPT_SASYNC_CB;
 2049         ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
 2050         ccb.callback = hptiop_async;
 2051         ccb.callback_arg = hba->sim;
 2052         xpt_action((union ccb *)&ccb);
 2053 
 2054         rid = 0;
 2055         if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
 2056                         &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
 2057                 device_printf(dev, "allocate irq failed!\n");
 2058                 goto free_hba_path;
 2059         }
 2060 
 2061         if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
 2062                                 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
 2063         {
 2064                 device_printf(dev, "allocate intr function failed!\n");
 2065                 goto free_irq_resource;
 2066         }
 2067 
 2068         if (hptiop_send_sync_msg(hba,
 2069                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
 2070                 device_printf(dev, "fail to start background task\n");
 2071                 goto teartown_irq_resource;
 2072         }
 2073 
 2074         hba->ops->enable_intr(hba);
 2075         hba->initialized = 1;
 2076 
 2077         hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
 2078                                 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
 2079                                 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
 2080 
 2081 
 2082         return 0;
 2083 
 2084 
 2085 teartown_irq_resource:
 2086         bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
 2087 
 2088 free_irq_resource:
 2089         bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
 2090 
 2091         hptiop_lock_adapter(hba);
 2092 free_hba_path:
 2093         xpt_free_path(hba->path);
 2094 
 2095 deregister_xpt_bus:
 2096         xpt_bus_deregister(cam_sim_path(hba->sim));
 2097 
 2098 free_cam_sim:
 2099         cam_sim_free(hba->sim, /*free devq*/ TRUE);
 2100         hptiop_unlock_adapter(hba);
 2101 
 2102 srb_dmamap_unload:
 2103         if (hba->uncached_ptr)
 2104                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
 2105 
 2106 srb_dmamem_free:
 2107         if (hba->uncached_ptr)
 2108                 bus_dmamem_free(hba->srb_dmat,
 2109                         hba->uncached_ptr, hba->srb_dmamap);
 2110 
 2111 destroy_srb_dmat:
 2112         if (hba->srb_dmat)
 2113                 bus_dma_tag_destroy(hba->srb_dmat);
 2114 
 2115 destroy_io_dmat:
 2116         if (hba->io_dmat)
 2117                 bus_dma_tag_destroy(hba->io_dmat);
 2118 
 2119 get_config_failed:
 2120         hba->ops->internal_memfree(hba);
 2121 
 2122 destroy_parent_tag:
 2123         if (hba->parent_dmat)
 2124                 bus_dma_tag_destroy(hba->parent_dmat);
 2125 
 2126 release_pci_res:
 2127         if (hba->ops->release_pci_res)
 2128                 hba->ops->release_pci_res(hba);
 2129 
 2130         return ENXIO;
 2131 }
 2132 
 2133 static int hptiop_detach(device_t dev)
 2134 {
 2135         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
 2136         int i;
 2137         int error = EBUSY;
 2138 
 2139         hptiop_lock_adapter(hba);
 2140         for (i = 0; i < hba->max_devices; i++)
 2141                 if (hptiop_os_query_remove_device(hba, i)) {
 2142                         device_printf(dev, "%d file system is busy. id=%d",
 2143                                                 hba->pciunit, i);
 2144                         goto out;
 2145                 }
 2146 
 2147         if ((error = hptiop_shutdown(dev)) != 0)
 2148                 goto out;
 2149         if (hptiop_send_sync_msg(hba,
 2150                 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
 2151                 goto out;
 2152         hptiop_unlock_adapter(hba);
 2153 
 2154         hptiop_release_resource(hba);
 2155         return (0);
 2156 out:
 2157         hptiop_unlock_adapter(hba);
 2158         return error;
 2159 }
 2160 
 2161 static int hptiop_shutdown(device_t dev)
 2162 {
 2163         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
 2164 
 2165         int error = 0;
 2166 
 2167         if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
 2168                 device_printf(dev, "%d device is busy", hba->pciunit);
 2169                 return EBUSY;
 2170         }
 2171 
 2172         hba->ops->disable_intr(hba);
 2173 
 2174         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
 2175                 error = EBUSY;
 2176 
 2177         return error;
 2178 }
 2179 
 2180 static void hptiop_pci_intr(void *arg)
 2181 {
 2182         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
 2183         hptiop_lock_adapter(hba);
 2184         hba->ops->iop_intr(hba);
 2185         hptiop_unlock_adapter(hba);
 2186 }
 2187 
 2188 static void hptiop_poll(struct cam_sim *sim)
 2189 {
 2190         struct hpt_iop_hba *hba;
 2191 
 2192         hba = cam_sim_softc(sim);
 2193         hba->ops->iop_intr(hba);
 2194 }
 2195 
 2196 static void hptiop_async(void * callback_arg, u_int32_t code,
 2197                                         struct cam_path * path, void * arg)
 2198 {
 2199 }
 2200 
 2201 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
 2202 {
 2203         BUS_SPACE_WRT4_ITL(outbound_intmask,
 2204                 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
 2205 }
 2206 
 2207 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
 2208 {
 2209         u_int32_t int_mask;
 2210 
 2211         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
 2212                         
 2213         int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
 2214                         | MVIOP_MU_OUTBOUND_INT_MSG;
 2215         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
 2216 }
 2217 
 2218 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
 2219 {
 2220         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
 2221         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
 2222 
 2223         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
 2224         BUS_SPACE_RD4_MVFREY2(isr_enable);
 2225 
 2226         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
 2227         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
 2228 }
 2229 
 2230 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
 2231 {
 2232         u_int32_t int_mask;
 2233 
 2234         int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
 2235 
 2236         int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
 2237         BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
 2238         BUS_SPACE_RD4_ITL(outbound_intstatus);
 2239 }
 2240 
 2241 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
 2242 {
 2243         u_int32_t int_mask;
 2244         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
 2245         
 2246         int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
 2247                         | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
 2248         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
 2249         BUS_SPACE_RD4_MV0(outbound_intmask);
 2250 }
 2251 
 2252 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
 2253 {
 2254         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
 2255         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
 2256 
 2257         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
 2258         BUS_SPACE_RD4_MVFREY2(isr_enable);
 2259 
 2260         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
 2261         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
 2262 }
 2263 
 2264 static void hptiop_reset_adapter(void *argv)
 2265 {
 2266         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
 2267         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
 2268                 return;
 2269         hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
 2270 }
 2271 
 2272 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
 2273 {
 2274         struct hpt_iop_srb * srb;
 2275 
 2276         if (hba->srb_list) {
 2277                 srb = hba->srb_list;
 2278                 hba->srb_list = srb->next;
 2279                 return srb;
 2280         }
 2281 
 2282         return NULL;
 2283 }
 2284 
 2285 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
 2286 {
 2287         srb->next = hba->srb_list;
 2288         hba->srb_list = srb;
 2289 }
 2290 
 2291 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
 2292 {
 2293         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
 2294         struct hpt_iop_srb * srb;
 2295         int error;
 2296 
 2297         switch (ccb->ccb_h.func_code) {
 2298 
 2299         case XPT_SCSI_IO:
 2300                 if (ccb->ccb_h.target_lun != 0 ||
 2301                         ccb->ccb_h.target_id >= hba->max_devices ||
 2302                         (ccb->ccb_h.flags & CAM_CDB_PHYS))
 2303                 {
 2304                         ccb->ccb_h.status = CAM_TID_INVALID;
 2305                         xpt_done(ccb);
 2306                         return;
 2307                 }
 2308 
 2309                 if ((srb = hptiop_get_srb(hba)) == NULL) {
 2310                         device_printf(hba->pcidev, "srb allocated failed");
 2311                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2312                         xpt_done(ccb);
 2313                         return;
 2314                 }
 2315 
 2316                 srb->ccb = ccb;
 2317                 error = bus_dmamap_load_ccb(hba->io_dmat,
 2318                                             srb->dma_map,
 2319                                             ccb,
 2320                                             hptiop_post_scsi_command,
 2321                                             srb,
 2322                                             0);
 2323 
 2324                 if (error && error != EINPROGRESS) {
 2325                         device_printf(hba->pcidev,
 2326                                 "%d bus_dmamap_load error %d",
 2327                                 hba->pciunit, error);
 2328                         xpt_freeze_simq(hba->sim, 1);
 2329                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2330                         hptiop_free_srb(hba, srb);
 2331                         xpt_done(ccb);
 2332                         return;
 2333                 }
 2334 
 2335                 return;
 2336 
 2337         case XPT_RESET_BUS:
 2338                 device_printf(hba->pcidev, "reset adapter");
 2339                 hba->msg_done = 0;
 2340                 hptiop_reset_adapter(hba);
 2341                 break;
 2342 
 2343         case XPT_GET_TRAN_SETTINGS:
 2344         case XPT_SET_TRAN_SETTINGS:
 2345                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 2346                 break;
 2347 
 2348         case XPT_CALC_GEOMETRY:
 2349                 cam_calc_geometry(&ccb->ccg, 1);
 2350                 break;
 2351 
 2352         case XPT_PATH_INQ:
 2353         {
 2354                 struct ccb_pathinq *cpi = &ccb->cpi;
 2355 
 2356                 cpi->version_num = 1;
 2357                 cpi->hba_inquiry = PI_SDTR_ABLE;
 2358                 cpi->target_sprt = 0;
 2359                 cpi->hba_misc = PIM_NOBUSRESET;
 2360                 cpi->hba_eng_cnt = 0;
 2361                 cpi->max_target = hba->max_devices;
 2362                 cpi->max_lun = 0;
 2363                 cpi->unit_number = cam_sim_unit(sim);
 2364                 cpi->bus_id = cam_sim_bus(sim);
 2365                 cpi->initiator_id = hba->max_devices;
 2366                 cpi->base_transfer_speed = 3300;
 2367 
 2368                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 2369                 strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
 2370                 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
 2371                 cpi->transport = XPORT_SPI;
 2372                 cpi->transport_version = 2;
 2373                 cpi->protocol = PROTO_SCSI;
 2374                 cpi->protocol_version = SCSI_REV_2;
 2375                 cpi->ccb_h.status = CAM_REQ_CMP;
 2376                 break;
 2377         }
 2378 
 2379         default:
 2380                 ccb->ccb_h.status = CAM_REQ_INVALID;
 2381                 break;
 2382         }
 2383 
 2384         xpt_done(ccb);
 2385         return;
 2386 }
 2387 
 2388 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
 2389                                 struct hpt_iop_srb *srb,
 2390                                 bus_dma_segment_t *segs, int nsegs)
 2391 {
 2392         int idx;
 2393         union ccb *ccb = srb->ccb;
 2394         u_int8_t *cdb;
 2395 
 2396         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
 2397                 cdb = ccb->csio.cdb_io.cdb_ptr;
 2398         else
 2399                 cdb = ccb->csio.cdb_io.cdb_bytes;
 2400 
 2401         KdPrint(("ccb=%p %x-%x-%x\n",
 2402                 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
 2403 
 2404         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
 2405                 u_int32_t iop_req32;
 2406                 struct hpt_iop_request_scsi_command req;
 2407 
 2408                 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
 2409 
 2410                 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
 2411                         device_printf(hba->pcidev, "invalid req offset\n");
 2412                         ccb->ccb_h.status = CAM_BUSY;
 2413                         bus_dmamap_unload(hba->io_dmat, srb->dma_map);
 2414                         hptiop_free_srb(hba, srb);
 2415                         xpt_done(ccb);
 2416                         return;
 2417                 }
 2418 
 2419                 if (ccb->csio.dxfer_len && nsegs > 0) {
 2420                         struct hpt_iopsg *psg = req.sg_list;
 2421                         for (idx = 0; idx < nsegs; idx++, psg++) {
 2422                                 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
 2423                                 psg->size = segs[idx].ds_len;
 2424                                 psg->eot = 0;
 2425                         }
 2426                         psg[-1].eot = 1;
 2427                 }
 2428 
 2429                 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
 2430 
 2431                 req.header.size =
 2432                                 offsetof(struct hpt_iop_request_scsi_command, sg_list)
 2433                                 + nsegs*sizeof(struct hpt_iopsg);
 2434                 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2435                 req.header.flags = 0;
 2436                 req.header.result = IOP_RESULT_PENDING;
 2437                 req.header.context = (u_int64_t)(unsigned long)srb;
 2438                 req.dataxfer_length = ccb->csio.dxfer_len;
 2439                 req.channel =  0;
 2440                 req.target =  ccb->ccb_h.target_id;
 2441                 req.lun =  ccb->ccb_h.target_lun;
 2442 
 2443                 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
 2444                         (u_int8_t *)&req, req.header.size);
 2445 
 2446                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2447                         bus_dmamap_sync(hba->io_dmat,
 2448                                 srb->dma_map, BUS_DMASYNC_PREREAD);
 2449                 }
 2450                 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
 2451                         bus_dmamap_sync(hba->io_dmat,
 2452                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
 2453 
 2454                 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
 2455         } else {
 2456                 struct hpt_iop_request_scsi_command *req;
 2457 
 2458                 req = (struct hpt_iop_request_scsi_command *)srb;
 2459                 if (ccb->csio.dxfer_len && nsegs > 0) {
 2460                         struct hpt_iopsg *psg = req->sg_list;
 2461                         for (idx = 0; idx < nsegs; idx++, psg++) {
 2462                                 psg->pci_address = 
 2463                                         (u_int64_t)segs[idx].ds_addr;
 2464                                 psg->size = segs[idx].ds_len;
 2465                                 psg->eot = 0;
 2466                         }
 2467                         psg[-1].eot = 1;
 2468                 }
 2469 
 2470                 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
 2471 
 2472                 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2473                 req->header.result = IOP_RESULT_PENDING;
 2474                 req->dataxfer_length = ccb->csio.dxfer_len;
 2475                 req->channel =  0;
 2476                 req->target =  ccb->ccb_h.target_id;
 2477                 req->lun =  ccb->ccb_h.target_lun;
 2478                 req->header.size =
 2479                         offsetof(struct hpt_iop_request_scsi_command, sg_list)
 2480                         + nsegs*sizeof(struct hpt_iopsg);
 2481                 req->header.context = (u_int64_t)srb->index |
 2482                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
 2483                 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
 2484 
 2485                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2486                         bus_dmamap_sync(hba->io_dmat,
 2487                                 srb->dma_map, BUS_DMASYNC_PREREAD);
 2488                 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
 2489                         bus_dmamap_sync(hba->io_dmat,
 2490                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
 2491                 }
 2492 
 2493                 if (hba->firmware_version > 0x01020000
 2494                         || hba->interface_version > 0x01020000) {
 2495                         u_int32_t size_bits;
 2496 
 2497                         if (req->header.size < 256)
 2498                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
 2499                         else if (req->header.size < 512)
 2500                                 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
 2501                         else
 2502                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
 2503                                                 | IOPMU_QUEUE_ADDR_HOST_BIT;
 2504 
 2505                         BUS_SPACE_WRT4_ITL(inbound_queue,
 2506                                 (u_int32_t)srb->phy_addr | size_bits);
 2507                 } else
 2508                         BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
 2509                                 |IOPMU_QUEUE_ADDR_HOST_BIT);
 2510         }
 2511 }
 2512 
 2513 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
 2514                                 struct hpt_iop_srb *srb,
 2515                                 bus_dma_segment_t *segs, int nsegs)
 2516 {
 2517         int idx, size;
 2518         union ccb *ccb = srb->ccb;
 2519         u_int8_t *cdb;
 2520         struct hpt_iop_request_scsi_command *req;
 2521         u_int64_t req_phy;
 2522 
 2523         req = (struct hpt_iop_request_scsi_command *)srb;
 2524         req_phy = srb->phy_addr;
 2525 
 2526         if (ccb->csio.dxfer_len && nsegs > 0) {
 2527                 struct hpt_iopsg *psg = req->sg_list;
 2528                 for (idx = 0; idx < nsegs; idx++, psg++) {
 2529                         psg->pci_address = (u_int64_t)segs[idx].ds_addr;
 2530                         psg->size = segs[idx].ds_len;
 2531                         psg->eot = 0;
 2532                 }
 2533                 psg[-1].eot = 1;
 2534         }
 2535         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
 2536                 cdb = ccb->csio.cdb_io.cdb_ptr;
 2537         else
 2538                 cdb = ccb->csio.cdb_io.cdb_bytes;
 2539 
 2540         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
 2541         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2542         req->header.result = IOP_RESULT_PENDING;
 2543         req->dataxfer_length = ccb->csio.dxfer_len;
 2544         req->channel = 0;
 2545         req->target =  ccb->ccb_h.target_id;
 2546         req->lun =  ccb->ccb_h.target_lun;
 2547         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
 2548                                 - sizeof(struct hpt_iopsg)
 2549                                 + nsegs * sizeof(struct hpt_iopsg);
 2550         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2551                 bus_dmamap_sync(hba->io_dmat,
 2552                         srb->dma_map, BUS_DMASYNC_PREREAD);
 2553         }
 2554         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
 2555                 bus_dmamap_sync(hba->io_dmat,
 2556                         srb->dma_map, BUS_DMASYNC_PREWRITE);
 2557         req->header.context = (u_int64_t)srb->index
 2558                                         << MVIOP_REQUEST_NUMBER_START_BIT
 2559                                         | MVIOP_CMD_TYPE_SCSI;
 2560         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
 2561         size = req->header.size >> 8;
 2562         hptiop_mv_inbound_write(req_phy
 2563                         | MVIOP_MU_QUEUE_ADDR_HOST_BIT
 2564                         | imin(3, size), hba);
 2565 }
 2566 
 2567 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
 2568                                 struct hpt_iop_srb *srb,
 2569                                 bus_dma_segment_t *segs, int nsegs)
 2570 {
 2571         int idx, index;
 2572         union ccb *ccb = srb->ccb;
 2573         u_int8_t *cdb;
 2574         struct hpt_iop_request_scsi_command *req;
 2575         u_int64_t req_phy;
 2576 
 2577         req = (struct hpt_iop_request_scsi_command *)srb;
 2578         req_phy = srb->phy_addr;
 2579 
 2580         if (ccb->csio.dxfer_len && nsegs > 0) {
 2581                 struct hpt_iopsg *psg = req->sg_list;
 2582                 for (idx = 0; idx < nsegs; idx++, psg++) {
 2583                         psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
 2584                         psg->size = segs[idx].ds_len;
 2585                         psg->eot = 0;
 2586                 }
 2587                 psg[-1].eot = 1;
 2588         }
 2589         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
 2590                 cdb = ccb->csio.cdb_io.cdb_ptr;
 2591         else
 2592                 cdb = ccb->csio.cdb_io.cdb_bytes;
 2593 
 2594         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
 2595         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2596         req->header.result = IOP_RESULT_PENDING;
 2597         req->dataxfer_length = ccb->csio.dxfer_len;
 2598         req->channel = 0;
 2599         req->target = ccb->ccb_h.target_id;
 2600         req->lun = ccb->ccb_h.target_lun;
 2601         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
 2602                                 - sizeof(struct hpt_iopsg)
 2603                                 + nsegs * sizeof(struct hpt_iopsg);
 2604         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2605                 bus_dmamap_sync(hba->io_dmat,
 2606                         srb->dma_map, BUS_DMASYNC_PREREAD);
 2607         }
 2608         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
 2609                 bus_dmamap_sync(hba->io_dmat,
 2610                         srb->dma_map, BUS_DMASYNC_PREWRITE);
 2611 
 2612         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
 2613                                                 | IOP_REQUEST_FLAG_ADDR_BITS
 2614                                                 | ((req_phy >> 16) & 0xffff0000);
 2615         req->header.context = ((req_phy & 0xffffffff) << 32 )
 2616                                                 | srb->index << 4
 2617                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
 2618 
 2619         hba->u.mvfrey.inlist_wptr++;
 2620         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
 2621 
 2622         if (index == hba->u.mvfrey.list_count) {
 2623                 index = 0;
 2624                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
 2625                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
 2626         }
 2627 
 2628         hba->u.mvfrey.inlist[index].addr = req_phy;
 2629         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
 2630 
 2631         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
 2632         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
 2633 
 2634         if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
 2635                 callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
 2636         }
 2637 }
 2638 
 2639 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
 2640                                         int nsegs, int error)
 2641 {
 2642         struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
 2643         union ccb *ccb = srb->ccb;
 2644         struct hpt_iop_hba *hba = srb->hba;
 2645 
 2646         if (error || nsegs > hba->max_sg_count) {
 2647                 KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n",
 2648                         ccb->ccb_h.func_code,
 2649                         ccb->ccb_h.target_id,
 2650                         (uintmax_t)ccb->ccb_h.target_lun, nsegs));
 2651                 ccb->ccb_h.status = CAM_BUSY;
 2652                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
 2653                 hptiop_free_srb(hba, srb);
 2654                 xpt_done(ccb);
 2655                 return;
 2656         }
 2657 
 2658         hba->ops->post_req(hba, srb, segs, nsegs);
 2659 }
 2660 
 2661 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
 2662                                 int nsegs, int error)
 2663 {
 2664         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
 2665         hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) 
 2666                                 & ~(u_int64_t)0x1F;
 2667         hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
 2668                                 & ~0x1F);
 2669 }
 2670 
 2671 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
 2672                                 int nsegs, int error)
 2673 {
 2674         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
 2675         char *p;
 2676         u_int64_t phy;
 2677         u_int32_t list_count = hba->u.mvfrey.list_count;
 2678 
 2679         phy = ((u_int64_t)segs->ds_addr + 0x1F) 
 2680                                 & ~(u_int64_t)0x1F;
 2681         p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
 2682                                 & ~0x1F);
 2683         
 2684         hba->ctlcfgcmd_phy = phy;
 2685         hba->ctlcfg_ptr = p;
 2686 
 2687         p += 0x800;
 2688         phy += 0x800;
 2689 
 2690         hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
 2691         hba->u.mvfrey.inlist_phy = phy;
 2692 
 2693         p += list_count * sizeof(struct mvfrey_inlist_entry);
 2694         phy += list_count * sizeof(struct mvfrey_inlist_entry);
 2695 
 2696         hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
 2697         hba->u.mvfrey.outlist_phy = phy;
 2698 
 2699         p += list_count * sizeof(struct mvfrey_outlist_entry);
 2700         phy += list_count * sizeof(struct mvfrey_outlist_entry);
 2701 
 2702         hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
 2703         hba->u.mvfrey.outlist_cptr_phy = phy;
 2704 }
 2705 
 2706 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
 2707                                 int nsegs, int error)
 2708 {
 2709         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
 2710         bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
 2711         struct hpt_iop_srb *srb, *tmp_srb;
 2712         int i;
 2713 
 2714         if (error || nsegs == 0) {
 2715                 device_printf(hba->pcidev, "hptiop_map_srb error");
 2716                 return;
 2717         }
 2718 
 2719         /* map srb */
 2720         srb = (struct hpt_iop_srb *)
 2721                 (((unsigned long)hba->uncached_ptr + 0x1F)
 2722                 & ~(unsigned long)0x1F);
 2723 
 2724         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
 2725                 tmp_srb = (struct hpt_iop_srb *)
 2726                                         ((char *)srb + i * HPT_SRB_MAX_SIZE);
 2727                 if (((unsigned long)tmp_srb & 0x1F) == 0) {
 2728                         if (bus_dmamap_create(hba->io_dmat,
 2729                                                 0, &tmp_srb->dma_map)) {
 2730                                 device_printf(hba->pcidev, "dmamap create failed");
 2731                                 return;
 2732                         }
 2733 
 2734                         bzero(tmp_srb, sizeof(struct hpt_iop_srb));
 2735                         tmp_srb->hba = hba;
 2736                         tmp_srb->index = i;
 2737                         if (hba->ctlcfg_ptr == 0) {/*itl iop*/
 2738                                 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
 2739                                                         (phy_addr >> 5);
 2740                                 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
 2741                                         tmp_srb->srb_flag =
 2742                                                 HPT_SRB_FLAG_HIGH_MEM_ACESS;
 2743                         } else {
 2744                                 tmp_srb->phy_addr = phy_addr;
 2745                         }
 2746 
 2747                         callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
 2748                         hptiop_free_srb(hba, tmp_srb);
 2749                         hba->srb[i] = tmp_srb;
 2750                         phy_addr += HPT_SRB_MAX_SIZE;
 2751                 }
 2752                 else {
 2753                         device_printf(hba->pcidev, "invalid alignment");
 2754                         return;
 2755                 }
 2756         }
 2757 }
 2758 
 2759 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
 2760 {
 2761         hba->msg_done = 1;
 2762 }
 2763 
 2764 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
 2765                                                 int target_id)
 2766 {
 2767         struct cam_periph       *periph = NULL;
 2768         struct cam_path         *path;
 2769         int                     status, retval = 0;
 2770 
 2771         status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
 2772 
 2773         if (status == CAM_REQ_CMP) {
 2774                 if ((periph = cam_periph_find(path, "da")) != NULL) {
 2775                         if (periph->refcount >= 1) {
 2776                                 device_printf(hba->pcidev, "%d ,"
 2777                                         "target_id=0x%x,"
 2778                                         "refcount=%d",
 2779                                     hba->pciunit, target_id, periph->refcount);
 2780                                 retval = -1;
 2781                         }
 2782                 }
 2783                 xpt_free_path(path);
 2784         }
 2785         return retval;
 2786 }
 2787 
 2788 static void hptiop_release_resource(struct hpt_iop_hba *hba)
 2789 {
 2790         int i;
 2791 
 2792         if (hba->ioctl_dev)
 2793                 destroy_dev(hba->ioctl_dev);
 2794 
 2795         if (hba->path) {
 2796                 struct ccb_setasync ccb;
 2797 
 2798                 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
 2799                 ccb.ccb_h.func_code = XPT_SASYNC_CB;
 2800                 ccb.event_enable = 0;
 2801                 ccb.callback = hptiop_async;
 2802                 ccb.callback_arg = hba->sim;
 2803                 xpt_action((union ccb *)&ccb);
 2804                 xpt_free_path(hba->path);
 2805         }
 2806 
 2807         if (hba->irq_handle)
 2808                 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
 2809 
 2810         if (hba->sim) {
 2811                 hptiop_lock_adapter(hba);
 2812                 xpt_bus_deregister(cam_sim_path(hba->sim));
 2813                 cam_sim_free(hba->sim, TRUE);
 2814                 hptiop_unlock_adapter(hba);
 2815         }
 2816 
 2817         if (hba->ctlcfg_dmat) {
 2818                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
 2819                 bus_dmamem_free(hba->ctlcfg_dmat,
 2820                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 2821                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
 2822         }
 2823 
 2824         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
 2825                 struct hpt_iop_srb *srb = hba->srb[i];
 2826                 if (srb->dma_map)
 2827                         bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
 2828                 callout_drain(&srb->timeout);
 2829         }
 2830 
 2831         if (hba->srb_dmat) {
 2832                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
 2833                 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
 2834                 bus_dma_tag_destroy(hba->srb_dmat);
 2835         }
 2836 
 2837         if (hba->io_dmat)
 2838                 bus_dma_tag_destroy(hba->io_dmat);
 2839 
 2840         if (hba->parent_dmat)
 2841                 bus_dma_tag_destroy(hba->parent_dmat);
 2842 
 2843         if (hba->irq_res)
 2844                 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
 2845                                         0, hba->irq_res);
 2846 
 2847         if (hba->bar0_res)
 2848                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 2849                                         hba->bar0_rid, hba->bar0_res);
 2850         if (hba->bar2_res)
 2851                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 2852                                         hba->bar2_rid, hba->bar2_res);
 2853         mtx_destroy(&hba->lock);
 2854 }

Cache object: c077108b98c3346c6bf777878b288799


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.