The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/raid/hptiop/hptiop.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
    3  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD: src/sys/dev/hptiop/hptiop.c,v 1.15 2012/10/25 17:29:11 delphij Exp $
   27  */
   28 
   29 #include <sys/param.h>
   30 #include <sys/types.h>
   31 #include <sys/cons.h>
   32 #include <sys/time.h>
   33 #include <sys/systm.h>
   34 
   35 #include <sys/stat.h>
   36 #include <sys/malloc.h>
   37 #include <sys/conf.h>
   38 #include <sys/libkern.h>
   39 #include <sys/kernel.h>
   40 
   41 #include <sys/kthread.h>
   42 #include <sys/lock.h>
   43 #include <sys/module.h>
   44 
   45 #include <sys/eventhandler.h>
   46 #include <sys/bus.h>
   47 #include <sys/taskqueue.h>
   48 #include <sys/device.h>
   49 #include <sys/mplock2.h>
   50 
   51 #include <machine/stdarg.h>
   52 #include <sys/rman.h>
   53 
   54 #include <vm/vm.h>
   55 #include <vm/pmap.h>
   56 
   57 #include <bus/pci/pcireg.h>
   58 #include <bus/pci/pcivar.h>
   59 
   60 #include <bus/cam/cam.h>
   61 #include <bus/cam/cam_ccb.h>
   62 #include <bus/cam/cam_sim.h>
   63 #include <bus/cam/cam_xpt_periph.h>
   64 #include <bus/cam/cam_xpt_sim.h>
   65 #include <bus/cam/cam_debug.h>
   66 #include <bus/cam/cam_periph.h>
   67 #include <bus/cam/scsi/scsi_all.h>
   68 #include <bus/cam/scsi/scsi_message.h>
   69 
   70 #include <dev/raid/hptiop/hptiop.h>
   71 
   72 static const char driver_name[] = "hptiop";
   73 static const char driver_version[] = "v1.8";
   74 
   75 static devclass_t hptiop_devclass;
   76 
   77 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
   78                                 u_int32_t msg, u_int32_t millisec);
   79 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
   80                                                         u_int32_t req);
   81 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
   82 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
   83                                                         u_int32_t req);
   84 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
   85 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
   86                                 struct hpt_iop_ioctl_param *pParams);
   87 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
   88                                 struct hpt_iop_ioctl_param *pParams);
   89 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
   90                                 struct hpt_iop_ioctl_param *pParams);
   91 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
   92 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
   93 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
   94 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
   95 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
   96 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
   97                                 struct hpt_iop_request_get_config *config);
   98 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
   99                                 struct hpt_iop_request_get_config *config);
  100 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
  101                                 struct hpt_iop_request_get_config *config);
  102 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
  103                                 struct hpt_iop_request_set_config *config);
  104 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
  105                                 struct hpt_iop_request_set_config *config);
  106 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
  107                                 struct hpt_iop_request_set_config *config);
  108 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
  109 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
  110 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
  111 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
  112 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
  113 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
  114                         u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
  115 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
  116                                 struct hpt_iop_request_ioctl_command *req,
  117                                 struct hpt_iop_ioctl_param *pParams);
  118 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
  119                                 struct hpt_iop_request_ioctl_command *req,
  120                                 struct hpt_iop_ioctl_param *pParams);
  121 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
  122                                 struct hpt_iop_srb *srb,
  123                                 bus_dma_segment_t *segs, int nsegs);
  124 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
  125                                 struct hpt_iop_srb *srb,
  126                                 bus_dma_segment_t *segs, int nsegs);
  127 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
  128                                 struct hpt_iop_srb *srb,
  129                                 bus_dma_segment_t *segs, int nsegs);
  130 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
  131 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
  132 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
  133 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
  134 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
  135 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
  136 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
  137 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
  138 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
  139 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
  140 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
  141 static int  hptiop_probe(device_t dev);
  142 static int  hptiop_attach(device_t dev);
  143 static int  hptiop_detach(device_t dev);
  144 static int  hptiop_shutdown(device_t dev);
  145 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
  146 static void hptiop_poll(struct cam_sim *sim);
  147 static void hptiop_async(void *callback_arg, u_int32_t code,
  148                                         struct cam_path *path, void *arg);
  149 static void hptiop_pci_intr(void *arg);
  150 static void hptiop_release_resource(struct hpt_iop_hba *hba);
  151 static void hptiop_reset_adapter(void *argv);
  152 static d_open_t hptiop_open;
  153 static d_close_t hptiop_close;
  154 static d_ioctl_t hptiop_ioctl;
  155 
  156 static struct dev_ops hptiop_ops = {
  157         { driver_name, 0, 0 },
  158         .d_open = hptiop_open,
  159         .d_close = hptiop_close,
  160         .d_ioctl = hptiop_ioctl,
  161 };
  162 
  163 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
  164 
  165 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
  166                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
  167 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
  168                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
  169 
  170 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
  171                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
  172 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
  173                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
  174 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
  175                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
  176 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
  177                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
  178 
  179 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
  180                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
  181 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
  182                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
  183 
  184 static int hptiop_open(struct dev_open_args *ap)
  185 {
  186         cdev_t dev = ap->a_head.a_dev;
  187         struct hpt_iop_hba *hba = hba_from_dev(dev);
  188 
  189         if (hba==NULL)
  190                 return ENXIO;
  191         if (hba->flag & HPT_IOCTL_FLAG_OPEN)
  192                 return EBUSY;
  193         hba->flag |= HPT_IOCTL_FLAG_OPEN;
  194         return 0;
  195 }
  196 
  197 static int hptiop_close(struct dev_close_args *ap)
  198 {
  199         cdev_t dev = ap->a_head.a_dev;
  200         struct hpt_iop_hba *hba = hba_from_dev(dev);
  201         hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
  202         return 0;
  203 }
  204 
  205 static int hptiop_ioctl(struct dev_ioctl_args *ap)
  206 {
  207         cdev_t dev = ap->a_head.a_dev;
  208         u_long cmd = ap->a_cmd;
  209         caddr_t data = ap->a_data;
  210         int ret = EFAULT;
  211         struct hpt_iop_hba *hba = hba_from_dev(dev);
  212 
  213         get_mplock();
  214 
  215         switch (cmd) {
  216         case HPT_DO_IOCONTROL:
  217                 ret = hba->ops->do_ioctl(hba,
  218                                 (struct hpt_iop_ioctl_param *)data);
  219                 break;
  220         case HPT_SCAN_BUS:
  221                 ret = hptiop_rescan_bus(hba);
  222                 break;
  223         }
  224 
  225         rel_mplock();
  226 
  227         return ret;
  228 }
  229 
  230 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
  231 {
  232         u_int64_t p;
  233         u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
  234         u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
  235 
  236         if (outbound_tail != outbound_head) {
  237                 bus_space_read_region_4(hba->bar2t, hba->bar2h,
  238                         offsetof(struct hpt_iopmu_mv,
  239                                 outbound_q[outbound_tail]),
  240                         (u_int32_t *)&p, 2);
  241 
  242                 outbound_tail++;
  243 
  244                 if (outbound_tail == MVIOP_QUEUE_LEN)
  245                         outbound_tail = 0;
  246 
  247                 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
  248                 return p;
  249         } else
  250                 return 0;
  251 }
  252 
  253 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
  254 {
  255         u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
  256         u_int32_t head = inbound_head + 1;
  257 
  258         if (head == MVIOP_QUEUE_LEN)
  259                 head = 0;
  260 
  261         bus_space_write_region_4(hba->bar2t, hba->bar2h,
  262                         offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
  263                         (u_int32_t *)&p, 2);
  264         BUS_SPACE_WRT4_MV2(inbound_head, head);
  265         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
  266 }
  267 
  268 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
  269 {
  270         BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
  271         BUS_SPACE_RD4_ITL(outbound_intstatus);
  272 }
  273 
  274 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
  275 {
  276 
  277         BUS_SPACE_WRT4_MV2(inbound_msg, msg);
  278         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
  279 
  280         BUS_SPACE_RD4_MV0(outbound_intmask);
  281 }
  282 
  283 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
  284 {
  285         BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
  286         BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
  287 }
  288 
  289 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
  290 {
  291         u_int32_t req=0;
  292         int i;
  293 
  294         for (i = 0; i < millisec; i++) {
  295                 req = BUS_SPACE_RD4_ITL(inbound_queue);
  296                 if (req != IOPMU_QUEUE_EMPTY)
  297                         break;
  298                 DELAY(1000);
  299         }
  300 
  301         if (req!=IOPMU_QUEUE_EMPTY) {
  302                 BUS_SPACE_WRT4_ITL(outbound_queue, req);
  303                 BUS_SPACE_RD4_ITL(outbound_intstatus);
  304                 return 0;
  305         }
  306 
  307         return -1;
  308 }
  309 
  310 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
  311 {
  312         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
  313                 return -1;
  314 
  315         return 0;
  316 }
  317 
  318 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
  319                                                         u_int32_t millisec)
  320 {
  321         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
  322                 return -1;
  323 
  324         return 0;
  325 }
  326 
  327 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
  328                                                         u_int32_t index)
  329 {
  330         struct hpt_iop_srb *srb;
  331         struct hpt_iop_request_scsi_command *req=NULL;
  332         union ccb *ccb;
  333         u_int8_t *cdb;
  334         u_int32_t result, temp, dxfer;
  335         u_int64_t temp64;
  336 
  337         if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
  338                 if (hba->firmware_version > 0x01020000 ||
  339                         hba->interface_version > 0x01020000) {
  340                         srb = hba->srb[index & ~(u_int32_t)
  341                                 (IOPMU_QUEUE_ADDR_HOST_BIT
  342                                 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
  343                         req = (struct hpt_iop_request_scsi_command *)srb;
  344                         if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
  345                                 result = IOP_RESULT_SUCCESS;
  346                         else
  347                                 result = req->header.result;
  348                 } else {
  349                         srb = hba->srb[index &
  350                                 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
  351                         req = (struct hpt_iop_request_scsi_command *)srb;
  352                         result = req->header.result;
  353                 }
  354                 dxfer = req->dataxfer_length;
  355                 goto srb_complete;
  356         }
  357 
  358         /*iop req*/
  359         temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
  360                 offsetof(struct hpt_iop_request_header, type));
  361         result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
  362                 offsetof(struct hpt_iop_request_header, result));
  363         switch(temp) {
  364         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
  365         {
  366                 temp64 = 0;
  367                 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
  368                         offsetof(struct hpt_iop_request_header, context),
  369                         (u_int32_t *)&temp64, 2);
  370                 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
  371                 break;
  372         }
  373 
  374         case IOP_REQUEST_TYPE_SCSI_COMMAND:
  375                 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
  376                         offsetof(struct hpt_iop_request_header, context),
  377                         (u_int32_t *)&temp64, 2);
  378                 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
  379                 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
  380                                 index + offsetof(struct hpt_iop_request_scsi_command,
  381                                 dataxfer_length));
  382 srb_complete:
  383                 ccb = (union ccb *)srb->ccb;
  384                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
  385                         cdb = ccb->csio.cdb_io.cdb_ptr;
  386                 else
  387                         cdb = ccb->csio.cdb_io.cdb_bytes;
  388 
  389                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
  390                         ccb->ccb_h.status = CAM_REQ_CMP;
  391                         goto scsi_done;
  392                 }
  393 
  394                 switch (result) {
  395                 case IOP_RESULT_SUCCESS:
  396                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
  397                         case CAM_DIR_IN:
  398                                 bus_dmamap_sync(hba->io_dmat,
  399                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
  400                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  401                                 break;
  402                         case CAM_DIR_OUT:
  403                                 bus_dmamap_sync(hba->io_dmat,
  404                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
  405                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  406                                 break;
  407                         }
  408 
  409                         ccb->ccb_h.status = CAM_REQ_CMP;
  410                         break;
  411 
  412                 case IOP_RESULT_BAD_TARGET:
  413                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
  414                         break;
  415                 case IOP_RESULT_BUSY:
  416                         ccb->ccb_h.status = CAM_BUSY;
  417                         break;
  418                 case IOP_RESULT_INVALID_REQUEST:
  419                         ccb->ccb_h.status = CAM_REQ_INVALID;
  420                         break;
  421                 case IOP_RESULT_FAIL:
  422                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  423                         break;
  424                 case IOP_RESULT_RESET:
  425                         ccb->ccb_h.status = CAM_BUSY;
  426                         break;
  427                 case IOP_RESULT_CHECK_CONDITION:
  428                         memset(&ccb->csio.sense_data, 0,
  429                             sizeof(ccb->csio.sense_data));
  430                         if (dxfer < ccb->csio.sense_len)
  431                                 ccb->csio.sense_resid = ccb->csio.sense_len -
  432                                     dxfer;
  433                         else
  434                                 ccb->csio.sense_resid = 0;
  435                         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
  436                                 bus_space_read_region_1(hba->bar0t, hba->bar0h,
  437                                         index + offsetof(struct hpt_iop_request_scsi_command,
  438                                         sg_list), (u_int8_t *)&ccb->csio.sense_data,
  439                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
  440                         } else {
  441                                 memcpy(&ccb->csio.sense_data, &req->sg_list,
  442                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
  443                         }
  444                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  445                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
  446                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
  447                         break;
  448                 default:
  449                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  450                         break;
  451                 }
  452 scsi_done:
  453                 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
  454                         BUS_SPACE_WRT4_ITL(outbound_queue, index);
  455 
  456                 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
  457 
  458                 hptiop_free_srb(hba, srb);
  459                 xpt_done(ccb);
  460                 break;
  461         }
  462 }
  463 
  464 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
  465 {
  466         u_int32_t req, temp;
  467 
  468         while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
  469                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
  470                         hptiop_request_callback_itl(hba, req);
  471                 else {
  472                         temp = bus_space_read_4(hba->bar0t,
  473                                         hba->bar0h,req +
  474                                         offsetof(struct hpt_iop_request_header,
  475                                                 flags));
  476                         if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
  477                                 u_int64_t temp64;
  478                                 bus_space_read_region_4(hba->bar0t,
  479                                         hba->bar0h,req +
  480                                         offsetof(struct hpt_iop_request_header,
  481                                                 context),
  482                                         (u_int32_t *)&temp64, 2);
  483                                 if (temp64) {
  484                                         hptiop_request_callback_itl(hba, req);
  485                                 } else {
  486                                         temp64 = 1;
  487                                         bus_space_write_region_4(hba->bar0t,
  488                                                 hba->bar0h,req +
  489                                                 offsetof(struct hpt_iop_request_header,
  490                                                         context),
  491                                                 (u_int32_t *)&temp64, 2);
  492                                 }
  493                         } else
  494                                 hptiop_request_callback_itl(hba, req);
  495                 }
  496         }
  497 }
  498 
  499 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
  500 {
  501         u_int32_t status;
  502         int ret = 0;
  503 
  504         status = BUS_SPACE_RD4_ITL(outbound_intstatus);
  505 
  506         if (status & IOPMU_OUTBOUND_INT_MSG0) {
  507                 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
  508                 KdPrint(("hptiop: received outbound msg %x\n", msg));
  509                 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
  510                 hptiop_os_message_callback(hba, msg);
  511                 ret = 1;
  512         }
  513 
  514         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
  515                 hptiop_drain_outbound_queue_itl(hba);
  516                 ret = 1;
  517         }
  518 
  519         return ret;
  520 }
  521 
  522 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
  523                                                         u_int64_t _tag)
  524 {
  525         u_int32_t context = (u_int32_t)_tag;
  526 
  527         if (context & MVIOP_CMD_TYPE_SCSI) {
  528                 struct hpt_iop_srb *srb;
  529                 struct hpt_iop_request_scsi_command *req;
  530                 union ccb *ccb;
  531                 u_int8_t *cdb;
  532 
  533                 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
  534                 req = (struct hpt_iop_request_scsi_command *)srb;
  535                 ccb = (union ccb *)srb->ccb;
  536                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
  537                         cdb = ccb->csio.cdb_io.cdb_ptr;
  538                 else
  539                         cdb = ccb->csio.cdb_io.cdb_bytes;
  540 
  541                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
  542                         ccb->ccb_h.status = CAM_REQ_CMP;
  543                         goto scsi_done;
  544                 }
  545                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
  546                         req->header.result = IOP_RESULT_SUCCESS;
  547 
  548                 switch (req->header.result) {
  549                 case IOP_RESULT_SUCCESS:
  550                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
  551                         case CAM_DIR_IN:
  552                                 bus_dmamap_sync(hba->io_dmat,
  553                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
  554                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  555                                 break;
  556                         case CAM_DIR_OUT:
  557                                 bus_dmamap_sync(hba->io_dmat,
  558                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
  559                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  560                                 break;
  561                         }
  562                         ccb->ccb_h.status = CAM_REQ_CMP;
  563                         break;
  564                 case IOP_RESULT_BAD_TARGET:
  565                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
  566                         break;
  567                 case IOP_RESULT_BUSY:
  568                         ccb->ccb_h.status = CAM_BUSY;
  569                         break;
  570                 case IOP_RESULT_INVALID_REQUEST:
  571                         ccb->ccb_h.status = CAM_REQ_INVALID;
  572                         break;
  573                 case IOP_RESULT_FAIL:
  574                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  575                         break;
  576                 case IOP_RESULT_RESET:
  577                         ccb->ccb_h.status = CAM_BUSY;
  578                         break;
  579                 case IOP_RESULT_CHECK_CONDITION:
  580                         memset(&ccb->csio.sense_data, 0,
  581                             sizeof(ccb->csio.sense_data));
  582                         if (req->dataxfer_length < ccb->csio.sense_len)
  583                                 ccb->csio.sense_resid = ccb->csio.sense_len -
  584                                     req->dataxfer_length;
  585                         else
  586                                 ccb->csio.sense_resid = 0;
  587                         memcpy(&ccb->csio.sense_data, &req->sg_list,
  588                                 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
  589                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  590                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
  591                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
  592                         break;
  593                 default:
  594                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  595                         break;
  596                 }
  597 scsi_done:
  598                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
  599 
  600                 hptiop_free_srb(hba, srb);
  601                 xpt_done(ccb);
  602         } else if (context & MVIOP_CMD_TYPE_IOCTL) {
  603                 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
  604                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
  605                         hba->config_done = 1;
  606                 else
  607                         hba->config_done = -1;
  608                 wakeup(req);
  609         } else if (context &
  610                         (MVIOP_CMD_TYPE_SET_CONFIG |
  611                                 MVIOP_CMD_TYPE_GET_CONFIG))
  612                 hba->config_done = 1;
  613         else {
  614                 device_printf(hba->pcidev, "wrong callback type\n");
  615         }
  616 }
  617 
  618 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
  619                                 u_int32_t _tag)
  620 {
  621         u_int32_t req_type = _tag & 0xf;
  622 
  623         struct hpt_iop_srb *srb;
  624         struct hpt_iop_request_scsi_command *req;
  625         union ccb *ccb;
  626         u_int8_t *cdb;
  627 
  628         switch (req_type) {
  629         case IOP_REQUEST_TYPE_GET_CONFIG:
  630         case IOP_REQUEST_TYPE_SET_CONFIG:
  631                 hba->config_done = 1;
  632                 break;
  633 
  634         case IOP_REQUEST_TYPE_SCSI_COMMAND:
  635                 srb = hba->srb[(_tag >> 4) & 0xff];
  636                 req = (struct hpt_iop_request_scsi_command *)srb;
  637 
  638                 ccb = (union ccb *)srb->ccb;
  639 
  640                 callout_stop(&ccb->ccb_h.timeout_ch);
  641 
  642                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
  643                         cdb = ccb->csio.cdb_io.cdb_ptr;
  644                 else
  645                         cdb = ccb->csio.cdb_io.cdb_bytes;
  646 
  647                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
  648                         ccb->ccb_h.status = CAM_REQ_CMP;
  649                         goto scsi_done;
  650                 }
  651 
  652                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
  653                         req->header.result = IOP_RESULT_SUCCESS;
  654 
  655                 switch (req->header.result) {
  656                 case IOP_RESULT_SUCCESS:
  657                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
  658                         case CAM_DIR_IN:
  659                                 bus_dmamap_sync(hba->io_dmat,
  660                                                 srb->dma_map, BUS_DMASYNC_POSTREAD);
  661                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  662                                 break;
  663                         case CAM_DIR_OUT:
  664                                 bus_dmamap_sync(hba->io_dmat,
  665                                                 srb->dma_map, BUS_DMASYNC_POSTWRITE);
  666                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
  667                                 break;
  668                         }
  669                         ccb->ccb_h.status = CAM_REQ_CMP;
  670                         break;
  671                 case IOP_RESULT_BAD_TARGET:
  672                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
  673                         break;
  674                 case IOP_RESULT_BUSY:
  675                         ccb->ccb_h.status = CAM_BUSY;
  676                         break;
  677                 case IOP_RESULT_INVALID_REQUEST:
  678                         ccb->ccb_h.status = CAM_REQ_INVALID;
  679                         break;
  680                 case IOP_RESULT_FAIL:
  681                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  682                         break;
  683                 case IOP_RESULT_RESET:
  684                         ccb->ccb_h.status = CAM_BUSY;
  685                         break;
  686                 case IOP_RESULT_CHECK_CONDITION:
  687                         memset(&ccb->csio.sense_data, 0,
  688                                sizeof(ccb->csio.sense_data));
  689                         if (req->dataxfer_length < ccb->csio.sense_len)
  690                                 ccb->csio.sense_resid = ccb->csio.sense_len -
  691                                 req->dataxfer_length;
  692                         else
  693                                 ccb->csio.sense_resid = 0;
  694                         memcpy(&ccb->csio.sense_data, &req->sg_list,
  695                                MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
  696                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  697                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
  698                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
  699                         break;
  700                 default:
  701                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
  702                         break;
  703                 }
  704 scsi_done:
  705                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
  706 
  707                 hptiop_free_srb(hba, srb);
  708                 xpt_done(ccb);
  709                 break;
  710         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
  711                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
  712                         hba->config_done = 1;
  713                 else
  714                         hba->config_done = -1;
  715                 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
  716                 break;
  717         default:
  718                 device_printf(hba->pcidev, "wrong callback type\n");
  719                 break;
  720         }
  721 }
  722 
  723 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
  724 {
  725         u_int64_t req;
  726 
  727         while ((req = hptiop_mv_outbound_read(hba))) {
  728                 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
  729                         if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
  730                                 hptiop_request_callback_mv(hba, req);
  731                         }
  732                 }
  733         }
  734 }
  735 
  736 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
  737 {
  738         u_int32_t status;
  739         int ret = 0;
  740 
  741         status = BUS_SPACE_RD4_MV0(outbound_doorbell);
  742 
  743         if (status)
  744                 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
  745 
  746         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
  747                 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
  748                 KdPrint(("hptiop: received outbound msg %x\n", msg));
  749                 hptiop_os_message_callback(hba, msg);
  750                 ret = 1;
  751         }
  752 
  753         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
  754                 hptiop_drain_outbound_queue_mv(hba);
  755                 ret = 1;
  756         }
  757 
  758         return ret;
  759 }
  760 
  761 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
  762 {
  763         u_int32_t status, _tag, cptr;
  764         int ret = 0;
  765 
  766         if (hba->initialized) {
  767                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
  768         }
  769 
  770         status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
  771         if (status) {
  772                 BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
  773                 if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
  774                         u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
  775                         hptiop_os_message_callback(hba, msg);
  776                 }
  777                 ret = 1;
  778         }
  779 
  780         status = BUS_SPACE_RD4_MVFREY2(isr_cause);
  781         if (status) {
  782                 BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
  783                 do {
  784                         cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
  785                         while (hba->u.mvfrey.outlist_rptr != cptr) {
  786                                 hba->u.mvfrey.outlist_rptr++;
  787                                 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
  788                                         hba->u.mvfrey.outlist_rptr = 0;
  789                                 }
  790 
  791                                 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
  792                                 hptiop_request_callback_mvfrey(hba, _tag);
  793                                 ret = 2;
  794                         }
  795                 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
  796         }
  797 
  798         if (hba->initialized) {
  799                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
  800         }
  801 
  802         return ret;
  803 }
  804 
  805 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
  806                                         u_int32_t req32, u_int32_t millisec)
  807 {
  808         u_int32_t i;
  809         u_int64_t temp64;
  810 
  811         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
  812         BUS_SPACE_RD4_ITL(outbound_intstatus);
  813 
  814         for (i = 0; i < millisec; i++) {
  815                 hptiop_intr_itl(hba);
  816                 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
  817                         offsetof(struct hpt_iop_request_header, context),
  818                         (u_int32_t *)&temp64, 2);
  819                 if (temp64)
  820                         return 0;
  821                 DELAY(1000);
  822         }
  823 
  824         return -1;
  825 }
  826 
  827 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
  828                                         void *req, u_int32_t millisec)
  829 {
  830         u_int32_t i;
  831         u_int64_t phy_addr;
  832         hba->config_done = 0;
  833 
  834         phy_addr = hba->ctlcfgcmd_phy |
  835                         (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
  836         ((struct hpt_iop_request_get_config *)req)->header.flags |=
  837                 IOP_REQUEST_FLAG_SYNC_REQUEST |
  838                 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
  839         hptiop_mv_inbound_write(phy_addr, hba);
  840         BUS_SPACE_RD4_MV0(outbound_intmask);
  841 
  842         for (i = 0; i < millisec; i++) {
  843                 hptiop_intr_mv(hba);
  844                 if (hba->config_done)
  845                         return 0;
  846                 DELAY(1000);
  847         }
  848         return -1;
  849 }
  850 
  851 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
  852                                         void *req, u_int32_t millisec)
  853 {
  854         u_int32_t i, index;
  855         u_int64_t phy_addr;
  856         struct hpt_iop_request_header *reqhdr = (struct hpt_iop_request_header *)req;
  857 
  858         hba->config_done = 0;
  859 
  860         phy_addr = hba->ctlcfgcmd_phy;
  861         reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
  862                                         | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
  863                                         | IOP_REQUEST_FLAG_ADDR_BITS
  864                                         | ((phy_addr >> 16) & 0xffff0000);
  865         reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
  866                                         | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
  867 
  868         hba->u.mvfrey.inlist_wptr++;
  869         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
  870 
  871         if (index == hba->u.mvfrey.list_count) {
  872                 index = 0;
  873                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
  874                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
  875         }
  876 
  877         hba->u.mvfrey.inlist[index].addr = phy_addr;
  878         hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
  879 
  880         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
  881         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
  882 
  883         for (i = 0; i < millisec; i++) {
  884                 hptiop_intr_mvfrey(hba);
  885                 if (hba->config_done)
  886                         return 0;
  887                 DELAY(1000);
  888         }
  889         return -1;
  890 }
  891 
  892 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
  893                                         u_int32_t msg, u_int32_t millisec)
  894 {
  895         u_int32_t i;
  896 
  897         hba->msg_done = 0;
  898         hba->ops->post_msg(hba, msg);
  899 
  900         for (i=0; i<millisec; i++) {
  901                 hba->ops->iop_intr(hba);
  902                 if (hba->msg_done)
  903                         break;
  904                 DELAY(1000);
  905         }
  906 
  907         return hba->msg_done? 0 : -1;
  908 }
  909 
  910 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
  911                                 struct hpt_iop_request_get_config * config)
  912 {
  913         u_int32_t req32;
  914 
  915         config->header.size = sizeof(struct hpt_iop_request_get_config);
  916         config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
  917         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
  918         config->header.result = IOP_RESULT_PENDING;
  919         config->header.context = 0;
  920 
  921         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
  922         if (req32 == IOPMU_QUEUE_EMPTY)
  923                 return -1;
  924 
  925         bus_space_write_region_4(hba->bar0t, hba->bar0h,
  926                         req32, (u_int32_t *)config,
  927                         sizeof(struct hpt_iop_request_header) >> 2);
  928 
  929         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
  930                 KdPrint(("hptiop: get config send cmd failed"));
  931                 return -1;
  932         }
  933 
  934         bus_space_read_region_4(hba->bar0t, hba->bar0h,
  935                         req32, (u_int32_t *)config,
  936                         sizeof(struct hpt_iop_request_get_config) >> 2);
  937 
  938         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
  939 
  940         return 0;
  941 }
  942 
  943 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
  944                                 struct hpt_iop_request_get_config * config)
  945 {
  946         struct hpt_iop_request_get_config *req;
  947 
  948         if (!(req = hba->ctlcfg_ptr))
  949                 return -1;
  950 
  951         req->header.flags = 0;
  952         req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
  953         req->header.size = sizeof(struct hpt_iop_request_get_config);
  954         req->header.result = IOP_RESULT_PENDING;
  955         req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
  956 
  957         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
  958                 KdPrint(("hptiop: get config send cmd failed"));
  959                 return -1;
  960         }
  961 
  962         *config = *req;
  963         return 0;
  964 }
  965 
  966 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
  967                                 struct hpt_iop_request_get_config * config)
  968 {
  969         struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
  970 
  971         if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
  972             info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
  973                 KdPrint(("hptiop: header size %x/%x type %x/%x",
  974                          info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
  975                          info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
  976                 return -1;
  977         }
  978 
  979         config->interface_version = info->interface_version;
  980         config->firmware_version = info->firmware_version;
  981         config->max_requests = info->max_requests;
  982         config->request_size = info->request_size;
  983         config->max_sg_count = info->max_sg_count;
  984         config->data_transfer_length = info->data_transfer_length;
  985         config->alignment_mask = info->alignment_mask;
  986         config->max_devices = info->max_devices;
  987         config->sdram_size = info->sdram_size;
  988 
  989         KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
  990                  config->max_requests, config->request_size,
  991                  config->data_transfer_length, config->max_devices,
  992                  config->sdram_size));
  993 
  994         return 0;
  995 }
  996 
  997 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
  998                                 struct hpt_iop_request_set_config *config)
  999 {
 1000         u_int32_t req32;
 1001 
 1002         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
 1003 
 1004         if (req32 == IOPMU_QUEUE_EMPTY)
 1005                 return -1;
 1006 
 1007         config->header.size = sizeof(struct hpt_iop_request_set_config);
 1008         config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
 1009         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
 1010         config->header.result = IOP_RESULT_PENDING;
 1011         config->header.context = 0;
 1012 
 1013         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
 1014                 (u_int32_t *)config,
 1015                 sizeof(struct hpt_iop_request_set_config) >> 2);
 1016 
 1017         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
 1018                 KdPrint(("hptiop: set config send cmd failed"));
 1019                 return -1;
 1020         }
 1021 
 1022         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
 1023 
 1024         return 0;
 1025 }
 1026 
 1027 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
 1028                                 struct hpt_iop_request_set_config *config)
 1029 {
 1030         struct hpt_iop_request_set_config *req;
 1031 
 1032         if (!(req = hba->ctlcfg_ptr))
 1033                 return -1;
 1034 
 1035         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
 1036                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
 1037                 sizeof(struct hpt_iop_request_set_config) -
 1038                         sizeof(struct hpt_iop_request_header));
 1039 
 1040         req->header.flags = 0;
 1041         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
 1042         req->header.size = sizeof(struct hpt_iop_request_set_config);
 1043         req->header.result = IOP_RESULT_PENDING;
 1044         req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
 1045 
 1046         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
 1047                 KdPrint(("hptiop: set config send cmd failed"));
 1048                 return -1;
 1049         }
 1050 
 1051         return 0;
 1052 }
 1053 
 1054 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
 1055                                 struct hpt_iop_request_set_config *config)
 1056 {
 1057         struct hpt_iop_request_set_config *req;
 1058 
 1059         if (!(req = hba->ctlcfg_ptr))
 1060                 return -1;
 1061 
 1062         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
 1063                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
 1064                 sizeof(struct hpt_iop_request_set_config) -
 1065                         sizeof(struct hpt_iop_request_header));
 1066 
 1067         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
 1068         req->header.size = sizeof(struct hpt_iop_request_set_config);
 1069         req->header.result = IOP_RESULT_PENDING;
 1070 
 1071         if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
 1072                 KdPrint(("hptiop: set config send cmd failed"));
 1073                 return -1;
 1074         }
 1075 
 1076         return 0;
 1077 }
 1078 
 1079 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
 1080                                 u_int32_t req32,
 1081                                 struct hpt_iop_ioctl_param *pParams)
 1082 {
 1083         u_int64_t temp64;
 1084         struct hpt_iop_request_ioctl_command req;
 1085 
 1086         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
 1087                         (hba->max_request_size -
 1088                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
 1089                 device_printf(hba->pcidev, "request size beyond max value");
 1090                 return -1;
 1091         }
 1092 
 1093         req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
 1094                 + pParams->nInBufferSize;
 1095         req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
 1096         req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
 1097         req.header.result = IOP_RESULT_PENDING;
 1098         req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
 1099         req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
 1100         req.inbuf_size = pParams->nInBufferSize;
 1101         req.outbuf_size = pParams->nOutBufferSize;
 1102         req.bytes_returned = 0;
 1103 
 1104         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
 1105                 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
 1106 
 1107         hptiop_lock_adapter(hba);
 1108 
 1109         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
 1110         BUS_SPACE_RD4_ITL(outbound_intstatus);
 1111 
 1112         bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
 1113                 offsetof(struct hpt_iop_request_ioctl_command, header.context),
 1114                 (u_int32_t *)&temp64, 2);
 1115         while (temp64) {
 1116                 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
 1117                                 0, "hptctl", HPT_OSM_TIMEOUT)==0)
 1118                         break;
 1119                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
 1120                 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
 1121                         offsetof(struct hpt_iop_request_ioctl_command,
 1122                                 header.context),
 1123                         (u_int32_t *)&temp64, 2);
 1124         }
 1125 
 1126         hptiop_unlock_adapter(hba);
 1127         return 0;
 1128 }
 1129 
 1130 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
 1131     void *user, int size)
 1132 {
 1133         unsigned char byte;
 1134         int i;
 1135 
 1136         for (i=0; i<size; i++) {
 1137                 if (copyin((u_int8_t *)user + i, &byte, 1))
 1138                         return -1;
 1139                 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
 1140         }
 1141 
 1142         return 0;
 1143 }
 1144 
 1145 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
 1146     void *user, int size)
 1147 {
 1148         unsigned char byte;
 1149         int i;
 1150 
 1151         for (i=0; i<size; i++) {
 1152                 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
 1153                 if (copyout(&byte, (u_int8_t *)user + i, 1))
 1154                         return -1;
 1155         }
 1156 
 1157         return 0;
 1158 }
 1159 
 1160 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
 1161                                 struct hpt_iop_ioctl_param * pParams)
 1162 {
 1163         u_int32_t req32;
 1164         u_int32_t result;
 1165 
 1166         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
 1167                 (pParams->Magic != HPT_IOCTL_MAGIC32))
 1168                 return EFAULT;
 1169 
 1170         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
 1171         if (req32 == IOPMU_QUEUE_EMPTY)
 1172                 return EFAULT;
 1173 
 1174         if (pParams->nInBufferSize)
 1175                 if (hptiop_bus_space_copyin(hba, req32 +
 1176                         offsetof(struct hpt_iop_request_ioctl_command, buf),
 1177                         (void *)pParams->lpInBuffer, pParams->nInBufferSize))
 1178                         goto invalid;
 1179 
 1180         if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
 1181                 goto invalid;
 1182 
 1183         result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
 1184                         offsetof(struct hpt_iop_request_ioctl_command,
 1185                                 header.result));
 1186 
 1187         if (result == IOP_RESULT_SUCCESS) {
 1188                 if (pParams->nOutBufferSize)
 1189                         if (hptiop_bus_space_copyout(hba, req32 +
 1190                                 offsetof(struct hpt_iop_request_ioctl_command, buf) +
 1191                                         ((pParams->nInBufferSize + 3) & ~3),
 1192                                 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
 1193                                 goto invalid;
 1194 
 1195                 if (pParams->lpBytesReturned) {
 1196                         if (hptiop_bus_space_copyout(hba, req32 +
 1197                                 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
 1198                                 (void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
 1199                                 goto invalid;
 1200                 }
 1201 
 1202                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
 1203 
 1204                 return 0;
 1205         } else{
 1206 invalid:
 1207                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
 1208 
 1209                 return EFAULT;
 1210         }
 1211 }
 1212 
 1213 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
 1214                                 struct hpt_iop_request_ioctl_command *req,
 1215                                 struct hpt_iop_ioctl_param *pParams)
 1216 {
 1217         u_int64_t req_phy;
 1218         int size = 0;
 1219 
 1220         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
 1221                         (hba->max_request_size -
 1222                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
 1223                 device_printf(hba->pcidev, "request size beyond max value");
 1224                 return -1;
 1225         }
 1226 
 1227         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
 1228         req->inbuf_size = pParams->nInBufferSize;
 1229         req->outbuf_size = pParams->nOutBufferSize;
 1230         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
 1231                                         + pParams->nInBufferSize;
 1232         req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
 1233         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
 1234         req->header.result = IOP_RESULT_PENDING;
 1235         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
 1236         size = req->header.size >> 8;
 1237         size = size > 3 ? 3 : size;
 1238         req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
 1239         hptiop_mv_inbound_write(req_phy, hba);
 1240 
 1241         BUS_SPACE_RD4_MV0(outbound_intmask);
 1242 
 1243         while (hba->config_done == 0) {
 1244                 if (hptiop_sleep(hba, req, 0,
 1245                         "hptctl", HPT_OSM_TIMEOUT)==0)
 1246                         continue;
 1247                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
 1248         }
 1249         return 0;
 1250 }
 1251 
 1252 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
 1253                                 struct hpt_iop_ioctl_param *pParams)
 1254 {
 1255         struct hpt_iop_request_ioctl_command *req;
 1256 
 1257         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
 1258                 (pParams->Magic != HPT_IOCTL_MAGIC32))
 1259                 return EFAULT;
 1260 
 1261         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
 1262         hba->config_done = 0;
 1263         hptiop_lock_adapter(hba);
 1264         if (pParams->nInBufferSize)
 1265                 if (copyin((void *)pParams->lpInBuffer,
 1266                                 req->buf, pParams->nInBufferSize))
 1267                         goto invalid;
 1268         if (hptiop_post_ioctl_command_mv(hba, req, pParams))
 1269                 goto invalid;
 1270 
 1271         if (hba->config_done == 1) {
 1272                 if (pParams->nOutBufferSize)
 1273                         if (copyout(req->buf +
 1274                                 ((pParams->nInBufferSize + 3) & ~3),
 1275                                 (void *)pParams->lpOutBuffer,
 1276                                 pParams->nOutBufferSize))
 1277                                 goto invalid;
 1278 
 1279                 if (pParams->lpBytesReturned)
 1280                         if (copyout(&req->bytes_returned,
 1281                                 (void*)pParams->lpBytesReturned,
 1282                                 sizeof(u_int32_t)))
 1283                                 goto invalid;
 1284                 hptiop_unlock_adapter(hba);
 1285                 return 0;
 1286         } else{
 1287 invalid:
 1288                 hptiop_unlock_adapter(hba);
 1289                 return EFAULT;
 1290         }
 1291 }
 1292 
 1293 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
 1294                                 struct hpt_iop_request_ioctl_command *req,
 1295                                 struct hpt_iop_ioctl_param *pParams)
 1296 {
 1297         u_int64_t phy_addr;
 1298         u_int32_t index;
 1299 
 1300         phy_addr = hba->ctlcfgcmd_phy;
 1301 
 1302         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
 1303                         (hba->max_request_size -
 1304                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
 1305                 device_printf(hba->pcidev, "request size beyond max value");
 1306                 return -1;
 1307         }
 1308 
 1309         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
 1310         req->inbuf_size = pParams->nInBufferSize;
 1311         req->outbuf_size = pParams->nOutBufferSize;
 1312         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
 1313                                         + pParams->nInBufferSize;
 1314 
 1315         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
 1316         req->header.result = IOP_RESULT_PENDING;
 1317 
 1318         req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
 1319                                                 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
 1320                                                 | IOP_REQUEST_FLAG_ADDR_BITS
 1321                                                 | ((phy_addr >> 16) & 0xffff0000);
 1322         req->header.context = ((phy_addr & 0xffffffff) << 32 )
 1323                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
 1324 
 1325         hba->u.mvfrey.inlist_wptr++;
 1326         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
 1327 
 1328         if (index == hba->u.mvfrey.list_count) {
 1329                 index = 0;
 1330                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
 1331                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
 1332         }
 1333 
 1334         hba->u.mvfrey.inlist[index].addr = phy_addr;
 1335         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
 1336 
 1337         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
 1338         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
 1339 
 1340         while (hba->config_done == 0) {
 1341                 if (hptiop_sleep(hba, req, 0, "hptctl", HPT_OSM_TIMEOUT) == 0)
 1342                         continue;
 1343                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
 1344         }
 1345         return 0;
 1346 }
 1347 
 1348 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
 1349                                 struct hpt_iop_ioctl_param *pParams)
 1350 {
 1351         struct hpt_iop_request_ioctl_command *req;
 1352 
 1353         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
 1354                 (pParams->Magic != HPT_IOCTL_MAGIC32))
 1355                 return EFAULT;
 1356 
 1357         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
 1358         hba->config_done = 0;
 1359         hptiop_lock_adapter(hba);
 1360         if (pParams->nInBufferSize)
 1361                 if (copyin((void *)pParams->lpInBuffer,
 1362                                 req->buf, pParams->nInBufferSize))
 1363                         goto invalid;
 1364         if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
 1365                 goto invalid;
 1366 
 1367         if (hba->config_done == 1) {
 1368                 if (pParams->nOutBufferSize)
 1369                         if (copyout(req->buf +
 1370                                 ((pParams->nInBufferSize + 3) & ~3),
 1371                                 (void *)pParams->lpOutBuffer,
 1372                                 pParams->nOutBufferSize))
 1373                                 goto invalid;
 1374 
 1375                 if (pParams->lpBytesReturned)
 1376                         if (copyout(&req->bytes_returned,
 1377                                 (void*)pParams->lpBytesReturned,
 1378                                 sizeof(u_int32_t)))
 1379                                 goto invalid;
 1380                 hptiop_unlock_adapter(hba);
 1381                 return 0;
 1382         } else{
 1383 invalid:
 1384                 hptiop_unlock_adapter(hba);
 1385                 return EFAULT;
 1386         }
 1387 }
 1388 
 1389 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
 1390 {
 1391         union ccb           *ccb;
 1392 
 1393         if ((ccb = xpt_alloc_ccb()) == NULL)
 1394                 return(ENOMEM);
 1395         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim),
 1396                 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
 1397                 xpt_free_ccb(ccb);
 1398                 return(EIO);
 1399         }
 1400 
 1401         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
 1402         ccb->ccb_h.func_code = XPT_SCAN_BUS;
 1403         ccb->ccb_h.cbfcnp = hptiop_bus_scan_cb;
 1404         ccb->crcn.flags = CAM_FLAG_NONE;
 1405         xpt_action(ccb);
 1406         return(0);
 1407 }
 1408 
 1409 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
 1410 {
 1411         xpt_free_path(ccb->ccb_h.path);
 1412         kfree(ccb, M_TEMP);
 1413 }
 1414 
 1415 static  bus_dmamap_callback_t   hptiop_map_srb;
 1416 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
 1417 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
 1418 static  bus_dmamap_callback_t   hptiop_mvfrey_map_ctlcfg;
 1419 
 1420 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
 1421 {
 1422         hba->bar0_rid = 0x10;
 1423         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
 1424                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
 1425 
 1426         if (hba->bar0_res == NULL) {
 1427                 device_printf(hba->pcidev,
 1428                         "failed to get iop base adrress.\n");
 1429                 return -1;
 1430         }
 1431         hba->bar0t = rman_get_bustag(hba->bar0_res);
 1432         hba->bar0h = rman_get_bushandle(hba->bar0_res);
 1433         hba->u.itl.mu = (struct hpt_iopmu_itl *)
 1434                                 rman_get_virtual(hba->bar0_res);
 1435 
 1436         if (!hba->u.itl.mu) {
 1437                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1438                                         hba->bar0_rid, hba->bar0_res);
 1439                 device_printf(hba->pcidev, "alloc mem res failed\n");
 1440                 return -1;
 1441         }
 1442 
 1443         return 0;
 1444 }
 1445 
 1446 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
 1447 {
 1448         hba->bar0_rid = 0x10;
 1449         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
 1450                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
 1451 
 1452         if (hba->bar0_res == NULL) {
 1453                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
 1454                 return -1;
 1455         }
 1456         hba->bar0t = rman_get_bustag(hba->bar0_res);
 1457         hba->bar0h = rman_get_bushandle(hba->bar0_res);
 1458         hba->u.mv.regs = (struct hpt_iopmv_regs *)
 1459                                 rman_get_virtual(hba->bar0_res);
 1460 
 1461         if (!hba->u.mv.regs) {
 1462                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1463                                         hba->bar0_rid, hba->bar0_res);
 1464                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
 1465                 return -1;
 1466         }
 1467 
 1468         hba->bar2_rid = 0x18;
 1469         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
 1470                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
 1471 
 1472         if (hba->bar2_res == NULL) {
 1473                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1474                                         hba->bar0_rid, hba->bar0_res);
 1475                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
 1476                 return -1;
 1477         }
 1478 
 1479         hba->bar2t = rman_get_bustag(hba->bar2_res);
 1480         hba->bar2h = rman_get_bushandle(hba->bar2_res);
 1481         hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
 1482 
 1483         if (!hba->u.mv.mu) {
 1484                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1485                                         hba->bar0_rid, hba->bar0_res);
 1486                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1487                                         hba->bar2_rid, hba->bar2_res);
 1488                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
 1489                 return -1;
 1490         }
 1491 
 1492         return 0;
 1493 }
 1494 
 1495 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
 1496 {
 1497         hba->bar0_rid = 0x10;
 1498         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
 1499                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
 1500 
 1501         if (hba->bar0_res == NULL) {
 1502                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
 1503                 return -1;
 1504         }
 1505         hba->bar0t = rman_get_bustag(hba->bar0_res);
 1506         hba->bar0h = rman_get_bushandle(hba->bar0_res);
 1507         hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
 1508                                 rman_get_virtual(hba->bar0_res);
 1509 
 1510         if (!hba->u.mvfrey.config) {
 1511                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1512                                         hba->bar0_rid, hba->bar0_res);
 1513                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
 1514                 return -1;
 1515         }
 1516 
 1517         hba->bar2_rid = 0x18;
 1518         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
 1519                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
 1520 
 1521         if (hba->bar2_res == NULL) {
 1522                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1523                                         hba->bar0_rid, hba->bar0_res);
 1524                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
 1525                 return -1;
 1526         }
 1527 
 1528         hba->bar2t = rman_get_bustag(hba->bar2_res);
 1529         hba->bar2h = rman_get_bushandle(hba->bar2_res);
 1530         hba->u.mvfrey.mu =
 1531                                         (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
 1532 
 1533         if (!hba->u.mvfrey.mu) {
 1534                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1535                                         hba->bar0_rid, hba->bar0_res);
 1536                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1537                                         hba->bar2_rid, hba->bar2_res);
 1538                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
 1539                 return -1;
 1540         }
 1541 
 1542         return 0;
 1543 }
 1544 
 1545 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
 1546 {
 1547         if (hba->bar0_res)
 1548                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1549                         hba->bar0_rid, hba->bar0_res);
 1550 }
 1551 
 1552 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
 1553 {
 1554         if (hba->bar0_res)
 1555                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1556                         hba->bar0_rid, hba->bar0_res);
 1557         if (hba->bar2_res)
 1558                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1559                         hba->bar2_rid, hba->bar2_res);
 1560 }
 1561 
 1562 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
 1563 {
 1564         if (hba->bar0_res)
 1565                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1566                         hba->bar0_rid, hba->bar0_res);
 1567         if (hba->bar2_res)
 1568                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 1569                         hba->bar2_rid, hba->bar2_res);
 1570 }
 1571 
 1572 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
 1573 {
 1574         if (bus_dma_tag_create(hba->parent_dmat,
 1575                                 1,
 1576                                 0,
 1577                                 BUS_SPACE_MAXADDR_32BIT,
 1578                                 BUS_SPACE_MAXADDR,
 1579                                 NULL, NULL,
 1580                                 0x800 - 0x8,
 1581                                 1,
 1582                                 BUS_SPACE_MAXSIZE_32BIT,
 1583                                 BUS_DMA_ALLOCNOW,
 1584                                 &hba->ctlcfg_dmat)) {
 1585                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
 1586                 return -1;
 1587         }
 1588 
 1589         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
 1590                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
 1591                 &hba->ctlcfg_dmamap) != 0) {
 1592                         device_printf(hba->pcidev,
 1593                                         "bus_dmamem_alloc failed!\n");
 1594                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1595                         return -1;
 1596         }
 1597 
 1598         if (bus_dmamap_load(hba->ctlcfg_dmat,
 1599                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
 1600                         MVIOP_IOCTLCFG_SIZE,
 1601                         hptiop_mv_map_ctlcfg, hba, 0)) {
 1602                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
 1603                 if (hba->ctlcfg_dmat) {
 1604                         bus_dmamem_free(hba->ctlcfg_dmat,
 1605                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1606                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1607                 }
 1608                 return -1;
 1609         }
 1610 
 1611         return 0;
 1612 }
 1613 
 1614 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
 1615 {
 1616         u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
 1617 
 1618         list_count >>= 16;
 1619 
 1620         if (list_count == 0) {
 1621                 return -1;
 1622         }
 1623 
 1624         hba->u.mvfrey.list_count = list_count;
 1625         hba->u.mvfrey.internal_mem_size = 0x800
 1626                                                         + list_count * sizeof(struct mvfrey_inlist_entry)
 1627                                                         + list_count * sizeof(struct mvfrey_outlist_entry)
 1628                                                         + sizeof(int);
 1629         if (bus_dma_tag_create(hba->parent_dmat,
 1630                                 1,
 1631                                 0,
 1632                                 BUS_SPACE_MAXADDR_32BIT,
 1633                                 BUS_SPACE_MAXADDR,
 1634                                 NULL, NULL,
 1635                                 hba->u.mvfrey.internal_mem_size,
 1636                                 1,
 1637                                 BUS_SPACE_MAXSIZE_32BIT,
 1638                                 BUS_DMA_ALLOCNOW,
 1639                                 &hba->ctlcfg_dmat)) {
 1640                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
 1641                 return -1;
 1642         }
 1643 
 1644         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
 1645                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
 1646                 &hba->ctlcfg_dmamap) != 0) {
 1647                         device_printf(hba->pcidev,
 1648                                         "bus_dmamem_alloc failed!\n");
 1649                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1650                         return -1;
 1651         }
 1652 
 1653         if (bus_dmamap_load(hba->ctlcfg_dmat,
 1654                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
 1655                         hba->u.mvfrey.internal_mem_size,
 1656                         hptiop_mvfrey_map_ctlcfg, hba, 0)) {
 1657                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
 1658                 if (hba->ctlcfg_dmat) {
 1659                         bus_dmamem_free(hba->ctlcfg_dmat,
 1660                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1661                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1662                 }
 1663                 return -1;
 1664         }
 1665 
 1666         return 0;
 1667 }
 1668 
 1669 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
 1670         return 0;
 1671 }
 1672 
 1673 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
 1674 {
 1675         if (hba->ctlcfg_dmat) {
 1676                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
 1677                 bus_dmamem_free(hba->ctlcfg_dmat,
 1678                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1679                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1680         }
 1681 
 1682         return 0;
 1683 }
 1684 
 1685 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
 1686 {
 1687         if (hba->ctlcfg_dmat) {
 1688                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
 1689                 bus_dmamem_free(hba->ctlcfg_dmat,
 1690                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 1691                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
 1692         }
 1693 
 1694         return 0;
 1695 }
 1696 
 1697 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
 1698 {
 1699         u_int32_t i = 100;
 1700 
 1701         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
 1702                 return -1;
 1703 
 1704         /* wait 100ms for MCU ready */
 1705         while(i--) {
 1706                 DELAY(1000);
 1707         }
 1708 
 1709         BUS_SPACE_WRT4_MVFREY2(inbound_base,
 1710                                                         hba->u.mvfrey.inlist_phy & 0xffffffff);
 1711         BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
 1712                                                         (hba->u.mvfrey.inlist_phy >> 16) >> 16);
 1713 
 1714         BUS_SPACE_WRT4_MVFREY2(outbound_base,
 1715                                                         hba->u.mvfrey.outlist_phy & 0xffffffff);
 1716         BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
 1717                                                         (hba->u.mvfrey.outlist_phy >> 16) >> 16);
 1718 
 1719         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
 1720                                                         hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
 1721         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
 1722                                                         (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
 1723 
 1724         hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
 1725                                                                 | CL_POINTER_TOGGLE;
 1726         *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
 1727                                                                 | CL_POINTER_TOGGLE;
 1728         hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
 1729 
 1730         return 0;
 1731 }
 1732 
 1733 /*
 1734  * CAM driver interface
 1735  */
 1736 static device_method_t driver_methods[] = {
 1737         /* Device interface */
 1738         DEVMETHOD(device_probe,     hptiop_probe),
 1739         DEVMETHOD(device_attach,    hptiop_attach),
 1740         DEVMETHOD(device_detach,    hptiop_detach),
 1741         DEVMETHOD(device_shutdown,  hptiop_shutdown),
 1742         DEVMETHOD_END
 1743 };
 1744 
 1745 static struct hptiop_adapter_ops hptiop_itl_ops = {
 1746         .family            = INTEL_BASED_IOP,
 1747         .iop_wait_ready    = hptiop_wait_ready_itl,
 1748         .internal_memalloc = 0,
 1749         .internal_memfree  = hptiop_internal_memfree_itl,
 1750         .alloc_pci_res     = hptiop_alloc_pci_res_itl,
 1751         .release_pci_res   = hptiop_release_pci_res_itl,
 1752         .enable_intr       = hptiop_enable_intr_itl,
 1753         .disable_intr      = hptiop_disable_intr_itl,
 1754         .get_config        = hptiop_get_config_itl,
 1755         .set_config        = hptiop_set_config_itl,
 1756         .iop_intr          = hptiop_intr_itl,
 1757         .post_msg          = hptiop_post_msg_itl,
 1758         .post_req          = hptiop_post_req_itl,
 1759         .do_ioctl          = hptiop_do_ioctl_itl,
 1760         .reset_comm        = 0,
 1761 };
 1762 
 1763 static struct hptiop_adapter_ops hptiop_mv_ops = {
 1764         .family            = MV_BASED_IOP,
 1765         .iop_wait_ready    = hptiop_wait_ready_mv,
 1766         .internal_memalloc = hptiop_internal_memalloc_mv,
 1767         .internal_memfree  = hptiop_internal_memfree_mv,
 1768         .alloc_pci_res     = hptiop_alloc_pci_res_mv,
 1769         .release_pci_res   = hptiop_release_pci_res_mv,
 1770         .enable_intr       = hptiop_enable_intr_mv,
 1771         .disable_intr      = hptiop_disable_intr_mv,
 1772         .get_config        = hptiop_get_config_mv,
 1773         .set_config        = hptiop_set_config_mv,
 1774         .iop_intr          = hptiop_intr_mv,
 1775         .post_msg          = hptiop_post_msg_mv,
 1776         .post_req          = hptiop_post_req_mv,
 1777         .do_ioctl          = hptiop_do_ioctl_mv,
 1778         .reset_comm        = 0,
 1779 };
 1780 
 1781 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
 1782         .family            = MVFREY_BASED_IOP,
 1783         .iop_wait_ready    = hptiop_wait_ready_mvfrey,
 1784         .internal_memalloc = hptiop_internal_memalloc_mvfrey,
 1785         .internal_memfree  = hptiop_internal_memfree_mvfrey,
 1786         .alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
 1787         .release_pci_res   = hptiop_release_pci_res_mvfrey,
 1788         .enable_intr       = hptiop_enable_intr_mvfrey,
 1789         .disable_intr      = hptiop_disable_intr_mvfrey,
 1790         .get_config        = hptiop_get_config_mvfrey,
 1791         .set_config        = hptiop_set_config_mvfrey,
 1792         .iop_intr          = hptiop_intr_mvfrey,
 1793         .post_msg          = hptiop_post_msg_mvfrey,
 1794         .post_req          = hptiop_post_req_mvfrey,
 1795         .do_ioctl          = hptiop_do_ioctl_mvfrey,
 1796         .reset_comm        = hptiop_reset_comm_mvfrey,
 1797 };
 1798 
 1799 static driver_t hptiop_pci_driver = {
 1800         driver_name,
 1801         driver_methods,
 1802         sizeof(struct hpt_iop_hba)
 1803 };
 1804 
 1805 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, NULL, NULL);
 1806 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
 1807 MODULE_VERSION(hptiop, 1);
 1808 
 1809 static int hptiop_probe(device_t dev)
 1810 {
 1811         struct hpt_iop_hba *hba;
 1812         u_int32_t id;
 1813         static char buf[256];
 1814         int sas = 0;
 1815         struct hptiop_adapter_ops *ops;
 1816 
 1817         if (pci_get_vendor(dev) != 0x1103)
 1818                 return (ENXIO);
 1819 
 1820         id = pci_get_device(dev);
 1821 
 1822         switch (id) {
 1823                 case 0x4520:
 1824                 case 0x4522:
 1825                         sas = 1;
 1826                         ops = &hptiop_mvfrey_ops;
 1827                         break;
 1828                 case 0x4210:
 1829                 case 0x4211:
 1830                 case 0x4310:
 1831                 case 0x4311:
 1832                 case 0x4320:
 1833                 case 0x4321:
 1834                 case 0x4322:
 1835                         sas = 1;
 1836                 case 0x3220:
 1837                 case 0x3320:
 1838                 case 0x3410:
 1839                 case 0x3520:
 1840                 case 0x3510:
 1841                 case 0x3511:
 1842                 case 0x3521:
 1843                 case 0x3522:
 1844                 case 0x3530:
 1845                 case 0x3540:
 1846                 case 0x3560:
 1847                         ops = &hptiop_itl_ops;
 1848                         break;
 1849                 case 0x3020:
 1850                 case 0x3120:
 1851                 case 0x3122:
 1852                         ops = &hptiop_mv_ops;
 1853                         break;
 1854                 default:
 1855                         return (ENXIO);
 1856         }
 1857 
 1858         device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
 1859                 pci_get_bus(dev), pci_get_slot(dev),
 1860                 pci_get_function(dev), pci_get_irq(dev));
 1861 
 1862         ksprintf(buf, "RocketRAID %x %s Controller",
 1863                                 id, sas ? "SAS" : "SATA");
 1864         device_set_desc_copy(dev, buf);
 1865 
 1866         hba = (struct hpt_iop_hba *)device_get_softc(dev);
 1867         bzero(hba, sizeof(struct hpt_iop_hba));
 1868         hba->ops = ops;
 1869 
 1870         KdPrint(("hba->ops=%p\n", hba->ops));
 1871         return 0;
 1872 }
 1873 
 1874 static int hptiop_attach(device_t dev)
 1875 {
 1876         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
 1877         struct hpt_iop_request_get_config  iop_config;
 1878         struct hpt_iop_request_set_config  set_config;
 1879         int rid = 0;
 1880         struct cam_devq *devq;
 1881         struct ccb_setasync ccb;
 1882         u_int32_t unit = device_get_unit(dev);
 1883 
 1884         device_printf(dev, "RocketRAID 3xxx/4xxx controller driver %s\n",
 1885             driver_version);
 1886 
 1887         KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
 1888                 pci_get_bus(dev), pci_get_slot(dev),
 1889                 pci_get_function(dev), hba->ops));
 1890 
 1891         pci_enable_busmaster(dev);
 1892         hba->pcidev = dev;
 1893 
 1894         if (hba->ops->alloc_pci_res(hba))
 1895                 return ENXIO;
 1896 
 1897         if (hba->ops->iop_wait_ready(hba, 2000)) {
 1898                 device_printf(dev, "adapter is not ready\n");
 1899                 goto release_pci_res;
 1900         }
 1901 
 1902         lockinit(&hba->lock, "hptioplock", 0, LK_CANRECURSE);
 1903 
 1904         if (bus_dma_tag_create(NULL,/* parent */
 1905                         1,  /* alignment */
 1906                         0, /* boundary */
 1907                         BUS_SPACE_MAXADDR,  /* lowaddr */
 1908                         BUS_SPACE_MAXADDR,  /* highaddr */
 1909                         NULL, NULL,         /* filter, filterarg */
 1910                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 1911                         BUS_SPACE_UNRESTRICTED, /* nsegments */
 1912                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1913                         0,      /* flags */
 1914                         &hba->parent_dmat   /* tag */))
 1915         {
 1916                 device_printf(dev, "alloc parent_dmat failed\n");
 1917                 goto release_pci_res;
 1918         }
 1919 
 1920         if (hba->ops->family == MV_BASED_IOP) {
 1921                 if (hba->ops->internal_memalloc(hba)) {
 1922                         device_printf(dev, "alloc srb_dmat failed\n");
 1923                         goto destroy_parent_tag;
 1924                 }
 1925         }
 1926 
 1927         if (hba->ops->get_config(hba, &iop_config)) {
 1928                 device_printf(dev, "get iop config failed.\n");
 1929                 goto get_config_failed;
 1930         }
 1931 
 1932         hba->firmware_version = iop_config.firmware_version;
 1933         hba->interface_version = iop_config.interface_version;
 1934         hba->max_requests = iop_config.max_requests;
 1935         hba->max_devices = iop_config.max_devices;
 1936         hba->max_request_size = iop_config.request_size;
 1937         hba->max_sg_count = iop_config.max_sg_count;
 1938 
 1939         if (hba->ops->family == MVFREY_BASED_IOP) {
 1940                 if (hba->ops->internal_memalloc(hba)) {
 1941                         device_printf(dev, "alloc srb_dmat failed\n");
 1942                         goto destroy_parent_tag;
 1943                 }
 1944                 if (hba->ops->reset_comm(hba)) {
 1945                         device_printf(dev, "reset comm failed\n");
 1946                         goto get_config_failed;
 1947                 }
 1948         }
 1949 
 1950         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
 1951                         4,  /* alignment */
 1952                         BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
 1953                         BUS_SPACE_MAXADDR,  /* lowaddr */
 1954                         BUS_SPACE_MAXADDR,  /* highaddr */
 1955                         NULL, NULL,         /* filter, filterarg */
 1956                         PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
 1957                         hba->max_sg_count,  /* nsegments */
 1958                         0x20000,    /* maxsegsize */
 1959                         BUS_DMA_ALLOCNOW,       /* flags */
 1960                         &hba->io_dmat   /* tag */))
 1961         {
 1962                 device_printf(dev, "alloc io_dmat failed\n");
 1963                 goto get_config_failed;
 1964         }
 1965 
 1966         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
 1967                         1,  /* alignment */
 1968                         0, /* boundary */
 1969                         BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1970                         BUS_SPACE_MAXADDR,  /* highaddr */
 1971                         NULL, NULL,         /* filter, filterarg */
 1972                         HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
 1973                         1,  /* nsegments */
 1974                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1975                         0,      /* flags */
 1976                         &hba->srb_dmat  /* tag */))
 1977         {
 1978                 device_printf(dev, "alloc srb_dmat failed\n");
 1979                 goto destroy_io_dmat;
 1980         }
 1981 
 1982         if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
 1983                         BUS_DMA_WAITOK | BUS_DMA_COHERENT,
 1984                         &hba->srb_dmamap) != 0)
 1985         {
 1986                 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
 1987                 goto destroy_srb_dmat;
 1988         }
 1989 
 1990         if (bus_dmamap_load(hba->srb_dmat,
 1991                         hba->srb_dmamap, hba->uncached_ptr,
 1992                         (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
 1993                         hptiop_map_srb, hba, 0))
 1994         {
 1995                 device_printf(dev, "bus_dmamap_load failed!\n");
 1996                 goto srb_dmamem_free;
 1997         }
 1998 
 1999         if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
 2000                 device_printf(dev, "cam_simq_alloc failed\n");
 2001                 goto srb_dmamap_unload;
 2002         }
 2003 
 2004         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
 2005                         hba, unit, &sim_mplock, hba->max_requests - 1, 1, devq);
 2006         cam_simq_release(devq);
 2007         if (!hba->sim) {
 2008                 device_printf(dev, "cam_sim_alloc failed\n");
 2009                 goto srb_dmamap_unload;
 2010         }
 2011         if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
 2012         {
 2013                 device_printf(dev, "xpt_bus_register failed\n");
 2014                 goto free_cam_sim;
 2015         }
 2016 
 2017         if (xpt_create_path(&hba->path, /*periph */ NULL,
 2018                         cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
 2019                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
 2020                 device_printf(dev, "xpt_create_path failed\n");
 2021                 goto deregister_xpt_bus;
 2022         }
 2023 
 2024         bzero(&set_config, sizeof(set_config));
 2025         set_config.iop_id = unit;
 2026         set_config.vbus_id = cam_sim_path(hba->sim);
 2027         set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
 2028 
 2029         if (hba->ops->set_config(hba, &set_config)) {
 2030                 device_printf(dev, "set iop config failed.\n");
 2031                 goto free_hba_path;
 2032         }
 2033 
 2034         xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
 2035         ccb.ccb_h.func_code = XPT_SASYNC_CB;
 2036         ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
 2037         ccb.callback = hptiop_async;
 2038         ccb.callback_arg = hba->sim;
 2039         xpt_action((union ccb *)&ccb);
 2040 
 2041         rid = 0;
 2042         if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
 2043                         &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
 2044                 device_printf(dev, "allocate irq failed!\n");
 2045                 goto free_hba_path;
 2046         }
 2047 
 2048         if (bus_setup_intr(hba->pcidev, hba->irq_res, 0,
 2049                                 hptiop_pci_intr, hba, &hba->irq_handle, NULL))
 2050         {
 2051                 device_printf(dev, "allocate intr function failed!\n");
 2052                 goto free_irq_resource;
 2053         }
 2054 
 2055         if (hptiop_send_sync_msg(hba,
 2056                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
 2057                 device_printf(dev, "fail to start background task\n");
 2058                 goto teartown_irq_resource;
 2059         }
 2060 
 2061         hba->ops->enable_intr(hba);
 2062         hba->initialized = 1;
 2063 
 2064         hba->ioctl_dev = make_dev(&hptiop_ops, unit,
 2065                                 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
 2066                                 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
 2067 
 2068         hba->ioctl_dev->si_drv1 = hba;
 2069 
 2070         hptiop_rescan_bus(hba);
 2071 
 2072         return 0;
 2073 
 2074 
 2075 teartown_irq_resource:
 2076         bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
 2077 
 2078 free_irq_resource:
 2079         bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
 2080 
 2081 free_hba_path:
 2082         xpt_free_path(hba->path);
 2083 
 2084 deregister_xpt_bus:
 2085         xpt_bus_deregister(cam_sim_path(hba->sim));
 2086 
 2087 free_cam_sim:
 2088         cam_sim_free(hba->sim);
 2089 
 2090 srb_dmamap_unload:
 2091         if (hba->uncached_ptr)
 2092                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
 2093 
 2094 srb_dmamem_free:
 2095         if (hba->uncached_ptr)
 2096                 bus_dmamem_free(hba->srb_dmat,
 2097                         hba->uncached_ptr, hba->srb_dmamap);
 2098 
 2099 destroy_srb_dmat:
 2100         if (hba->srb_dmat)
 2101                 bus_dma_tag_destroy(hba->srb_dmat);
 2102 
 2103 destroy_io_dmat:
 2104         if (hba->io_dmat)
 2105                 bus_dma_tag_destroy(hba->io_dmat);
 2106 
 2107 get_config_failed:
 2108         hba->ops->internal_memfree(hba);
 2109 
 2110 destroy_parent_tag:
 2111         if (hba->parent_dmat)
 2112                 bus_dma_tag_destroy(hba->parent_dmat);
 2113 
 2114 release_pci_res:
 2115         if (hba->ops->release_pci_res)
 2116                 hba->ops->release_pci_res(hba);
 2117 
 2118         return ENXIO;
 2119 }
 2120 
 2121 static int hptiop_detach(device_t dev)
 2122 {
 2123         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
 2124         int i;
 2125         int error = EBUSY;
 2126 
 2127         hptiop_lock_adapter(hba);
 2128         for (i = 0; i < hba->max_devices; i++)
 2129                 if (hptiop_os_query_remove_device(hba, i)) {
 2130                         device_printf(dev, "file system is busy. id=%d", i);
 2131                         goto out;
 2132                 }
 2133 
 2134         if ((error = hptiop_shutdown(dev)) != 0)
 2135                 goto out;
 2136         if (hptiop_send_sync_msg(hba,
 2137                 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
 2138                 goto out;
 2139 
 2140         hptiop_release_resource(hba);
 2141         error = 0;
 2142 out:
 2143         hptiop_unlock_adapter(hba);
 2144         return error;
 2145 }
 2146 
 2147 static int hptiop_shutdown(device_t dev)
 2148 {
 2149         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
 2150 
 2151         int error = 0;
 2152 
 2153         if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
 2154                 device_printf(dev, "device is busy");
 2155                 return EBUSY;
 2156         }
 2157 
 2158         hba->ops->disable_intr(hba);
 2159 
 2160         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
 2161                 error = EBUSY;
 2162 
 2163         return error;
 2164 }
 2165 
 2166 static void hptiop_pci_intr(void *arg)
 2167 {
 2168         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
 2169         hptiop_lock_adapter(hba);
 2170         hba->ops->iop_intr(hba);
 2171         hptiop_unlock_adapter(hba);
 2172 }
 2173 
 2174 static void hptiop_poll(struct cam_sim *sim)
 2175 {
 2176         hptiop_pci_intr(cam_sim_softc(sim));
 2177 }
 2178 
 2179 static void hptiop_async(void * callback_arg, u_int32_t code,
 2180                                         struct cam_path * path, void * arg)
 2181 {
 2182 }
 2183 
 2184 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
 2185 {
 2186         BUS_SPACE_WRT4_ITL(outbound_intmask,
 2187                 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
 2188 }
 2189 
 2190 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
 2191 {
 2192         u_int32_t int_mask;
 2193 
 2194         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
 2195 
 2196         int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
 2197                         | MVIOP_MU_OUTBOUND_INT_MSG;
 2198         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
 2199 }
 2200 
 2201 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
 2202 {
 2203         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
 2204         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
 2205 
 2206         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
 2207         BUS_SPACE_RD4_MVFREY2(isr_enable);
 2208 
 2209         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
 2210         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
 2211 }
 2212 
 2213 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
 2214 {
 2215         u_int32_t int_mask;
 2216 
 2217         int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
 2218 
 2219         int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
 2220         BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
 2221         BUS_SPACE_RD4_ITL(outbound_intstatus);
 2222 }
 2223 
 2224 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
 2225 {
 2226         u_int32_t int_mask;
 2227         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
 2228 
 2229         int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
 2230                         | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
 2231         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
 2232         BUS_SPACE_RD4_MV0(outbound_intmask);
 2233 }
 2234 
 2235 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
 2236 {
 2237         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
 2238         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
 2239 
 2240         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
 2241         BUS_SPACE_RD4_MVFREY2(isr_enable);
 2242 
 2243         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
 2244         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
 2245 }
 2246 
 2247 static void hptiop_reset_adapter(void *argv)
 2248 {
 2249         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
 2250         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
 2251                 return;
 2252         hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
 2253 }
 2254 
 2255 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
 2256 {
 2257         struct hpt_iop_srb * srb;
 2258 
 2259         if (hba->srb_list) {
 2260                 srb = hba->srb_list;
 2261                 hba->srb_list = srb->next;
 2262                 return srb;
 2263         }
 2264 
 2265         return NULL;
 2266 }
 2267 
 2268 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
 2269 {
 2270         srb->next = hba->srb_list;
 2271         hba->srb_list = srb;
 2272 }
 2273 
 2274 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
 2275 {
 2276         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
 2277         struct hpt_iop_srb * srb;
 2278 
 2279         switch (ccb->ccb_h.func_code) {
 2280 
 2281         case XPT_SCSI_IO:
 2282                 hptiop_lock_adapter(hba);
 2283                 if (ccb->ccb_h.target_lun != 0 ||
 2284                         ccb->ccb_h.target_id >= hba->max_devices ||
 2285                         (ccb->ccb_h.flags & CAM_CDB_PHYS))
 2286                 {
 2287                         ccb->ccb_h.status = CAM_TID_INVALID;
 2288                         xpt_done(ccb);
 2289                         goto scsi_done;
 2290                 }
 2291 
 2292                 if ((srb = hptiop_get_srb(hba)) == NULL) {
 2293                         device_printf(hba->pcidev, "srb allocated failed");
 2294                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2295                         xpt_done(ccb);
 2296                         goto scsi_done;
 2297                 }
 2298 
 2299                 srb->ccb = ccb;
 2300 
 2301                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
 2302                         hptiop_post_scsi_command(srb, NULL, 0, 0);
 2303                 else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
 2304                         if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
 2305                                 int error;
 2306 
 2307                                 error = bus_dmamap_load(hba->io_dmat,
 2308                                                 srb->dma_map,
 2309                                                 ccb->csio.data_ptr,
 2310                                                 ccb->csio.dxfer_len,
 2311                                                 hptiop_post_scsi_command,
 2312                                                 srb, 0);
 2313 
 2314                                 if (error && error != EINPROGRESS) {
 2315                                         device_printf(hba->pcidev,
 2316                                             "bus_dmamap_load error %d", error);
 2317                                         xpt_freeze_simq(hba->sim, 1);
 2318                                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2319 invalid:
 2320                                         hptiop_free_srb(hba, srb);
 2321                                         xpt_done(ccb);
 2322                                         goto scsi_done;
 2323                                 }
 2324                         }
 2325                         else {
 2326                                 device_printf(hba->pcidev,
 2327                                         "CAM_DATA_PHYS not supported");
 2328                                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
 2329                                 goto invalid;
 2330                         }
 2331                 }
 2332                 else {
 2333                         struct bus_dma_segment *segs;
 2334 
 2335                         if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
 2336                                 (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
 2337                                 device_printf(hba->pcidev, "SCSI cmd failed");
 2338                                 ccb->ccb_h.status=CAM_PROVIDE_FAIL;
 2339                                 goto invalid;
 2340                         }
 2341 
 2342                         segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
 2343                         hptiop_post_scsi_command(srb, segs,
 2344                                                 ccb->csio.sglist_cnt, 0);
 2345                 }
 2346 
 2347 scsi_done:
 2348                 hptiop_unlock_adapter(hba);
 2349                 return;
 2350 
 2351         case XPT_RESET_BUS:
 2352                 device_printf(hba->pcidev, "reset adapter");
 2353                 hptiop_lock_adapter(hba);
 2354                 hba->msg_done = 0;
 2355                 hptiop_reset_adapter(hba);
 2356                 hptiop_unlock_adapter(hba);
 2357                 break;
 2358 
 2359         case XPT_GET_TRAN_SETTINGS:
 2360         case XPT_SET_TRAN_SETTINGS:
 2361                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
 2362                 break;
 2363 
 2364         case XPT_CALC_GEOMETRY:
 2365                 cam_calc_geometry(&ccb->ccg, 1);
 2366                 break;
 2367 
 2368         case XPT_PATH_INQ:
 2369         {
 2370                 struct ccb_pathinq *cpi = &ccb->cpi;
 2371 
 2372                 cpi->version_num = 1;
 2373                 cpi->hba_inquiry = PI_SDTR_ABLE;
 2374                 cpi->target_sprt = 0;
 2375                 cpi->hba_misc = PIM_NOBUSRESET;
 2376                 cpi->hba_eng_cnt = 0;
 2377                 cpi->max_target = hba->max_devices;
 2378                 cpi->max_lun = 0;
 2379                 cpi->unit_number = cam_sim_unit(sim);
 2380                 cpi->bus_id = cam_sim_bus(sim);
 2381                 cpi->initiator_id = hba->max_devices;
 2382                 cpi->base_transfer_speed = 3300;
 2383 
 2384                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
 2385                 strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
 2386                 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
 2387                 cpi->transport = XPORT_SPI;
 2388                 cpi->transport_version = 2;
 2389                 cpi->protocol = PROTO_SCSI;
 2390                 cpi->protocol_version = SCSI_REV_2;
 2391                 cpi->ccb_h.status = CAM_REQ_CMP;
 2392                 break;
 2393         }
 2394 
 2395         default:
 2396                 ccb->ccb_h.status = CAM_REQ_INVALID;
 2397                 break;
 2398         }
 2399 
 2400         xpt_done(ccb);
 2401         return;
 2402 }
 2403 
 2404 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
 2405                                 struct hpt_iop_srb *srb,
 2406                                 bus_dma_segment_t *segs, int nsegs)
 2407 {
 2408         int idx;
 2409         union ccb *ccb = srb->ccb;
 2410         u_int8_t *cdb;
 2411 
 2412         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
 2413                 cdb = ccb->csio.cdb_io.cdb_ptr;
 2414         else
 2415                 cdb = ccb->csio.cdb_io.cdb_bytes;
 2416 
 2417         KdPrint(("ccb=%p %x-%x-%x\n",
 2418                 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
 2419 
 2420         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
 2421                 u_int32_t iop_req32;
 2422                 struct hpt_iop_request_scsi_command req;
 2423 
 2424                 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
 2425 
 2426                 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
 2427                         device_printf(hba->pcidev, "invalid req offset\n");
 2428                         ccb->ccb_h.status = CAM_BUSY;
 2429                         bus_dmamap_unload(hba->io_dmat, srb->dma_map);
 2430                         hptiop_free_srb(hba, srb);
 2431                         xpt_done(ccb);
 2432                         return;
 2433                 }
 2434 
 2435                 if (ccb->csio.dxfer_len && nsegs > 0) {
 2436                         struct hpt_iopsg *psg = req.sg_list;
 2437                         for (idx = 0; idx < nsegs; idx++, psg++) {
 2438                                 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
 2439                                 psg->size = segs[idx].ds_len;
 2440                                 psg->eot = 0;
 2441                         }
 2442                         psg[-1].eot = 1;
 2443                 }
 2444 
 2445                 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
 2446 
 2447                 req.header.size =
 2448                                 offsetof(struct hpt_iop_request_scsi_command, sg_list)
 2449                                 + nsegs*sizeof(struct hpt_iopsg);
 2450                 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2451                 req.header.flags = 0;
 2452                 req.header.result = IOP_RESULT_PENDING;
 2453                 req.header.context = (u_int64_t)(unsigned long)srb;
 2454                 req.dataxfer_length = ccb->csio.dxfer_len;
 2455                 req.channel =  0;
 2456                 req.target =  ccb->ccb_h.target_id;
 2457                 req.lun =  ccb->ccb_h.target_lun;
 2458 
 2459                 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
 2460                         (u_int8_t *)&req, req.header.size);
 2461 
 2462                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2463                         bus_dmamap_sync(hba->io_dmat,
 2464                                 srb->dma_map, BUS_DMASYNC_PREREAD);
 2465                 }
 2466                 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
 2467                         bus_dmamap_sync(hba->io_dmat,
 2468                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
 2469 
 2470                 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
 2471         } else {
 2472                 struct hpt_iop_request_scsi_command *req;
 2473 
 2474                 req = (struct hpt_iop_request_scsi_command *)srb;
 2475                 if (ccb->csio.dxfer_len && nsegs > 0) {
 2476                         struct hpt_iopsg *psg = req->sg_list;
 2477                         for (idx = 0; idx < nsegs; idx++, psg++) {
 2478                                 psg->pci_address =
 2479                                         (u_int64_t)segs[idx].ds_addr;
 2480                                 psg->size = segs[idx].ds_len;
 2481                                 psg->eot = 0;
 2482                         }
 2483                         psg[-1].eot = 1;
 2484                 }
 2485 
 2486                 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
 2487 
 2488                 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2489                 req->header.result = IOP_RESULT_PENDING;
 2490                 req->dataxfer_length = ccb->csio.dxfer_len;
 2491                 req->channel =  0;
 2492                 req->target =  ccb->ccb_h.target_id;
 2493                 req->lun =  ccb->ccb_h.target_lun;
 2494                 req->header.size =
 2495                         offsetof(struct hpt_iop_request_scsi_command, sg_list)
 2496                         + nsegs*sizeof(struct hpt_iopsg);
 2497                 req->header.context = (u_int64_t)srb->index |
 2498                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
 2499                 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
 2500 
 2501                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2502                         bus_dmamap_sync(hba->io_dmat,
 2503                                 srb->dma_map, BUS_DMASYNC_PREREAD);
 2504                 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
 2505                         bus_dmamap_sync(hba->io_dmat,
 2506                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
 2507                 }
 2508 
 2509                 if (hba->firmware_version > 0x01020000
 2510                         || hba->interface_version > 0x01020000) {
 2511                         u_int32_t size_bits;
 2512 
 2513                         if (req->header.size < 256)
 2514                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
 2515                         else if (req->header.size < 512)
 2516                                 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
 2517                         else
 2518                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
 2519                                                 | IOPMU_QUEUE_ADDR_HOST_BIT;
 2520 
 2521                         BUS_SPACE_WRT4_ITL(inbound_queue,
 2522                                 (u_int32_t)srb->phy_addr | size_bits);
 2523                 } else
 2524                         BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
 2525                                 |IOPMU_QUEUE_ADDR_HOST_BIT);
 2526         }
 2527 }
 2528 
 2529 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
 2530                                 struct hpt_iop_srb *srb,
 2531                                 bus_dma_segment_t *segs, int nsegs)
 2532 {
 2533         int idx, size;
 2534         union ccb *ccb = srb->ccb;
 2535         u_int8_t *cdb;
 2536         struct hpt_iop_request_scsi_command *req;
 2537         u_int64_t req_phy;
 2538 
 2539         req = (struct hpt_iop_request_scsi_command *)srb;
 2540         req_phy = srb->phy_addr;
 2541 
 2542         if (ccb->csio.dxfer_len && nsegs > 0) {
 2543                 struct hpt_iopsg *psg = req->sg_list;
 2544                 for (idx = 0; idx < nsegs; idx++, psg++) {
 2545                         psg->pci_address = (u_int64_t)segs[idx].ds_addr;
 2546                         psg->size = segs[idx].ds_len;
 2547                         psg->eot = 0;
 2548                 }
 2549                 psg[-1].eot = 1;
 2550         }
 2551         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
 2552                 cdb = ccb->csio.cdb_io.cdb_ptr;
 2553         else
 2554                 cdb = ccb->csio.cdb_io.cdb_bytes;
 2555 
 2556         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
 2557         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2558         req->header.result = IOP_RESULT_PENDING;
 2559         req->dataxfer_length = ccb->csio.dxfer_len;
 2560         req->channel = 0;
 2561         req->target =  ccb->ccb_h.target_id;
 2562         req->lun =  ccb->ccb_h.target_lun;
 2563         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
 2564                                 - sizeof(struct hpt_iopsg)
 2565                                 + nsegs * sizeof(struct hpt_iopsg);
 2566         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2567                 bus_dmamap_sync(hba->io_dmat,
 2568                         srb->dma_map, BUS_DMASYNC_PREREAD);
 2569         }
 2570         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
 2571                 bus_dmamap_sync(hba->io_dmat,
 2572                         srb->dma_map, BUS_DMASYNC_PREWRITE);
 2573         req->header.context = (u_int64_t)srb->index
 2574                                         << MVIOP_REQUEST_NUMBER_START_BIT
 2575                                         | MVIOP_CMD_TYPE_SCSI;
 2576         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
 2577         size = req->header.size >> 8;
 2578         hptiop_mv_inbound_write(req_phy
 2579                         | MVIOP_MU_QUEUE_ADDR_HOST_BIT
 2580                         | (size > 3 ? 3 : size), hba);
 2581 }
 2582 
 2583 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
 2584                                 struct hpt_iop_srb *srb,
 2585                                 bus_dma_segment_t *segs, int nsegs)
 2586 {
 2587         int idx, index;
 2588         union ccb *ccb = srb->ccb;
 2589         u_int8_t *cdb;
 2590         struct hpt_iop_request_scsi_command *req;
 2591         u_int64_t req_phy;
 2592 
 2593         req = (struct hpt_iop_request_scsi_command *)srb;
 2594         req_phy = srb->phy_addr;
 2595 
 2596         if (ccb->csio.dxfer_len && nsegs > 0) {
 2597                 struct hpt_iopsg *psg = req->sg_list;
 2598                 for (idx = 0; idx < nsegs; idx++, psg++) {
 2599                         psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
 2600                         psg->size = segs[idx].ds_len;
 2601                         psg->eot = 0;
 2602                 }
 2603                 psg[-1].eot = 1;
 2604         }
 2605         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
 2606                 cdb = ccb->csio.cdb_io.cdb_ptr;
 2607         else
 2608                 cdb = ccb->csio.cdb_io.cdb_bytes;
 2609 
 2610         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
 2611         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
 2612         req->header.result = IOP_RESULT_PENDING;
 2613         req->dataxfer_length = ccb->csio.dxfer_len;
 2614         req->channel = 0;
 2615         req->target = ccb->ccb_h.target_id;
 2616         req->lun = ccb->ccb_h.target_lun;
 2617         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
 2618                                 - sizeof(struct hpt_iopsg)
 2619                                 + nsegs * sizeof(struct hpt_iopsg);
 2620         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
 2621                 bus_dmamap_sync(hba->io_dmat,
 2622                         srb->dma_map, BUS_DMASYNC_PREREAD);
 2623         }
 2624         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
 2625                 bus_dmamap_sync(hba->io_dmat,
 2626                         srb->dma_map, BUS_DMASYNC_PREWRITE);
 2627 
 2628         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
 2629                                                 | IOP_REQUEST_FLAG_ADDR_BITS
 2630                                                 | ((req_phy >> 16) & 0xffff0000);
 2631         req->header.context = ((req_phy & 0xffffffff) << 32 )
 2632                                                 | srb->index << 4
 2633                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
 2634 
 2635         hba->u.mvfrey.inlist_wptr++;
 2636         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
 2637 
 2638         if (index == hba->u.mvfrey.list_count) {
 2639                 index = 0;
 2640                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
 2641                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
 2642         }
 2643 
 2644         hba->u.mvfrey.inlist[index].addr = req_phy;
 2645         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
 2646 
 2647         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
 2648         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
 2649 
 2650         if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
 2651                 callout_reset(&ccb->ccb_h.timeout_ch, 20*hz,
 2652                     hptiop_reset_adapter, hba);
 2653         }
 2654 }
 2655 
 2656 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
 2657                                         int nsegs, int error)
 2658 {
 2659         struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
 2660         union ccb *ccb = srb->ccb;
 2661         struct hpt_iop_hba *hba = srb->hba;
 2662 
 2663         if (error || nsegs > hba->max_sg_count) {
 2664                 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
 2665                         ccb->ccb_h.func_code,
 2666                         ccb->ccb_h.target_id,
 2667                         ccb->ccb_h.target_lun, nsegs));
 2668                 ccb->ccb_h.status = CAM_BUSY;
 2669                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
 2670                 hptiop_free_srb(hba, srb);
 2671                 xpt_done(ccb);
 2672                 return;
 2673         }
 2674 
 2675         hba->ops->post_req(hba, srb, segs, nsegs);
 2676 }
 2677 
 2678 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
 2679                                 int nsegs, int error)
 2680 {
 2681         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
 2682         hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
 2683                                 & ~(u_int64_t)0x1F;
 2684         hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
 2685                                 & ~0x1F);
 2686 }
 2687 
 2688 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
 2689                                 int nsegs, int error)
 2690 {
 2691         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
 2692         char *p;
 2693         u_int64_t phy;
 2694         u_int32_t list_count = hba->u.mvfrey.list_count;
 2695 
 2696         phy = ((u_int64_t)segs->ds_addr + 0x1F)
 2697                                 & ~(u_int64_t)0x1F;
 2698         p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
 2699                                 & ~0x1F);
 2700 
 2701         hba->ctlcfgcmd_phy = phy;
 2702         hba->ctlcfg_ptr = p;
 2703 
 2704         p += 0x800;
 2705         phy += 0x800;
 2706 
 2707         hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
 2708         hba->u.mvfrey.inlist_phy = phy;
 2709 
 2710         p += list_count * sizeof(struct mvfrey_inlist_entry);
 2711         phy += list_count * sizeof(struct mvfrey_inlist_entry);
 2712 
 2713         hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
 2714         hba->u.mvfrey.outlist_phy = phy;
 2715 
 2716         p += list_count * sizeof(struct mvfrey_outlist_entry);
 2717         phy += list_count * sizeof(struct mvfrey_outlist_entry);
 2718 
 2719         hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
 2720         hba->u.mvfrey.outlist_cptr_phy = phy;
 2721 }
 2722 
 2723 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
 2724                                 int nsegs, int error)
 2725 {
 2726         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
 2727         bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
 2728         struct hpt_iop_srb *srb, *tmp_srb;
 2729         int i;
 2730 
 2731         if (error || nsegs == 0) {
 2732                 device_printf(hba->pcidev, "hptiop_map_srb error");
 2733                 return;
 2734         }
 2735 
 2736         /* map srb */
 2737         srb = (struct hpt_iop_srb *)
 2738                 (((unsigned long)hba->uncached_ptr + 0x1F)
 2739                 & ~(unsigned long)0x1F);
 2740 
 2741         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
 2742                 tmp_srb = (struct hpt_iop_srb *)
 2743                                         ((char *)srb + i * HPT_SRB_MAX_SIZE);
 2744                 if (((unsigned long)tmp_srb & 0x1F) == 0) {
 2745                         if (bus_dmamap_create(hba->io_dmat,
 2746                                                 0, &tmp_srb->dma_map)) {
 2747                                 device_printf(hba->pcidev, "dmamap create failed");
 2748                                 return;
 2749                         }
 2750 
 2751                         bzero(tmp_srb, sizeof(struct hpt_iop_srb));
 2752                         tmp_srb->hba = hba;
 2753                         tmp_srb->index = i;
 2754                         if (hba->ctlcfg_ptr == 0) {/*itl iop*/
 2755                                 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
 2756                                                         (phy_addr >> 5);
 2757                                 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
 2758                                         tmp_srb->srb_flag =
 2759                                                 HPT_SRB_FLAG_HIGH_MEM_ACESS;
 2760                         } else {
 2761                                 tmp_srb->phy_addr = phy_addr;
 2762                         }
 2763 
 2764                         hptiop_free_srb(hba, tmp_srb);
 2765                         hba->srb[i] = tmp_srb;
 2766                         phy_addr += HPT_SRB_MAX_SIZE;
 2767                 }
 2768                 else {
 2769                         device_printf(hba->pcidev, "invalid alignment");
 2770                         return;
 2771                 }
 2772         }
 2773 }
 2774 
 2775 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
 2776 {
 2777         hba->msg_done = 1;
 2778 }
 2779 
 2780 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
 2781                                                 int target_id)
 2782 {
 2783         struct cam_periph       *periph = NULL;
 2784         struct cam_path         *path;
 2785         int                     status, retval = 0;
 2786 
 2787         status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
 2788 
 2789         if (status == CAM_REQ_CMP) {
 2790                 if ((periph = cam_periph_find(path, "da")) != NULL) {
 2791                         if (periph->refcount >= 1) {
 2792                                 device_printf(hba->pcidev, "target_id=0x%x,"
 2793                                     "refcount=%d", target_id, periph->refcount);
 2794                                 retval = -1;
 2795                         }
 2796                 }
 2797                 xpt_free_path(path);
 2798         }
 2799         return retval;
 2800 }
 2801 
 2802 static void hptiop_release_resource(struct hpt_iop_hba *hba)
 2803 {
 2804         int i;
 2805         if (hba->path) {
 2806                 struct ccb_setasync ccb;
 2807 
 2808                 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
 2809                 ccb.ccb_h.func_code = XPT_SASYNC_CB;
 2810                 ccb.event_enable = 0;
 2811                 ccb.callback = hptiop_async;
 2812                 ccb.callback_arg = hba->sim;
 2813                 xpt_action((union ccb *)&ccb);
 2814                 xpt_free_path(hba->path);
 2815         }
 2816 
 2817         if (hba->sim) {
 2818                 xpt_bus_deregister(cam_sim_path(hba->sim));
 2819                 cam_sim_free(hba->sim);
 2820         }
 2821 
 2822         if (hba->ctlcfg_dmat) {
 2823                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
 2824                 bus_dmamem_free(hba->ctlcfg_dmat,
 2825                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
 2826                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
 2827         }
 2828 
 2829         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
 2830                 struct hpt_iop_srb *srb = hba->srb[i];
 2831                 if (srb->dma_map)
 2832                         bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
 2833         }
 2834 
 2835         if (hba->srb_dmat) {
 2836                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
 2837                 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
 2838                 bus_dma_tag_destroy(hba->srb_dmat);
 2839         }
 2840 
 2841         if (hba->io_dmat)
 2842                 bus_dma_tag_destroy(hba->io_dmat);
 2843 
 2844         if (hba->parent_dmat)
 2845                 bus_dma_tag_destroy(hba->parent_dmat);
 2846 
 2847         if (hba->irq_handle)
 2848                 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
 2849 
 2850         if (hba->irq_res)
 2851                 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
 2852                                         0, hba->irq_res);
 2853 
 2854         if (hba->bar0_res)
 2855                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 2856                                         hba->bar0_rid, hba->bar0_res);
 2857         if (hba->bar2_res)
 2858                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
 2859                                         hba->bar2_rid, hba->bar2_res);
 2860         if (hba->ioctl_dev)
 2861                 destroy_dev(hba->ioctl_dev);
 2862         dev_ops_remove_minor(&hptiop_ops, device_get_unit(hba->pcidev));
 2863 }

Cache object: 5dfeb9ef9f06e33cb6e8c06c9cb53ee3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.