The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/twa/tw_cl_io.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2004-05 Applied Micro Circuits Corporation.
    3  * Copyright (c) 2004-05 Vinod Kashyap
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  *      $FreeBSD: src/sys/dev/twa/tw_cl_io.c,v 1.1.2.2 2005/12/07 18:20:37 vkashyap Exp $
   28  */
   29 
   30 /*
   31  * AMCC'S 3ware driver for 9000 series storage controllers.
   32  *
   33  * Author: Vinod Kashyap
   34  */
   35 
   36 
   37 /*
   38  * Common Layer I/O functions.
   39  */
   40 
   41 
   42 #include "tw_osl_share.h"
   43 #include "tw_cl_share.h"
   44 #include "tw_cl_fwif.h"
   45 #include "tw_cl_ioctl.h"
   46 #include "tw_cl.h"
   47 #include "tw_cl_externs.h"
   48 #include "tw_osl_ioctl.h"
   49 
   50 
   51 
   52 /*
   53  * Function name:       tw_cl_start_io
   54  * Description:         Interface to OS Layer for accepting SCSI requests.
   55  *
   56  * Input:               ctlr_handle     -- controller handle
   57  *                      req_pkt         -- OSL built request packet
   58  *                      req_handle      -- request handle
   59  * Output:              None
   60  * Return value:        0       -- success
   61  *                      non-zero-- failure
   62  */
   63 TW_INT32
   64 tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
   65         struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
   66 {
   67         struct tw_cli_ctlr_context              *ctlr;
   68         struct tw_cli_req_context               *req;
   69         struct tw_cl_command_9k                 *cmd;
   70         struct tw_cl_scsi_req_packet            *scsi_req;
   71         TW_INT32                                error;
   72 
   73         tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
   74 
   75         ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
   76 
   77         if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
   78                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
   79                         "I/O during reset: returning busy. Ctlr state = 0x%x",
   80                         ctlr->state);
   81                 tw_osl_ctlr_busy(ctlr_handle, req_handle);
   82                 return(TW_OSL_EBUSY);
   83         }
   84 
   85         /*
   86          * If working with a firmware version that does not support multiple
   87          * luns, and this request is directed at a non-zero lun, error it
   88          * back right away.
   89          */
   90         if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
   91                 (ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
   92                 req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
   93                         TW_CL_ERR_REQ_SCSI_ERROR);
   94                 req_pkt->tw_osl_callback(req_handle);
   95                 return(TW_CL_ERR_REQ_SUCCESS);
   96         }
   97 
   98         if ((req = tw_cli_get_request(ctlr
   99 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
  100                 , req_pkt
  101 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
  102                 )) == TW_CL_NULL) {
  103                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
  104                         "Out of request context packets: returning busy");
  105                 tw_osl_ctlr_busy(ctlr_handle, req_handle);
  106                 return(TW_OSL_EBUSY);
  107         }
  108 
  109         req_handle->cl_req_ctxt = req;
  110 
  111 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
  112 
  113         req->cmd_pkt = req_pkt->dma_mem;
  114         req->cmd_pkt_phys = req_pkt->dma_mem_phys;
  115         tw_osl_memzero(req->cmd_pkt,
  116                 sizeof(struct tw_cl_command_header) +
  117                 28 /* max bytes before sglist */);
  118 
  119 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
  120 
  121         req->req_handle = req_handle;
  122         req->orig_req = req_pkt;
  123         req->tw_cli_callback = tw_cli_complete_io;
  124 
  125         req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
  126         req->flags |= TW_CLI_REQ_FLAGS_9K;
  127 
  128         scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
  129 
  130         /* Build the cmd pkt. */
  131         cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
  132 
  133         req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
  134 
  135         cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
  136         cmd->unit = (TW_UINT8)(scsi_req->unit);
  137         cmd->lun_l4__req_id = TW_CL_SWAP16(
  138                 BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
  139         cmd->status = 0;
  140         cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
  141         tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
  142 
  143         if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
  144                 TW_UINT32       num_sgl_entries;
  145 
  146                 req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
  147                         &num_sgl_entries);
  148                 cmd->lun_h4__sgl_entries =
  149                         TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
  150                                 num_sgl_entries));
  151         } else {
  152                 cmd->lun_h4__sgl_entries =
  153                         TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
  154                                 scsi_req->sgl_entries));
  155                 tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
  156                         cmd->sg_list, scsi_req->sgl_entries);
  157         }
  158 
  159         if ((error = tw_cli_submit_cmd(req))) {
  160                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
  161                         "Could not start request. request = %p, error = %d",
  162                         req, error);
  163                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
  164         }
  165         return(error);
  166 }
  167 
  168 
  169 
  170 /*
  171  * Function name:       tw_cli_submit_cmd
  172  * Description:         Submits a cmd to firmware.
  173  *
  174  * Input:               req     -- ptr to CL internal request context
  175  * Output:              None
  176  * Return value:        0       -- success
  177  *                      non-zero-- failure
  178  */
  179 TW_INT32
  180 tw_cli_submit_cmd(struct tw_cli_req_context *req)
  181 {
  182         struct tw_cli_ctlr_context      *ctlr = req->ctlr;
  183         struct tw_cl_ctlr_handle        *ctlr_handle = ctlr->ctlr_handle;
  184         TW_UINT32                       status_reg;
  185         TW_INT32                        error;
  186         TW_UINT8                        notify_osl_of_ctlr_busy = TW_CL_FALSE;
  187 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
  188         TW_SYNC_HANDLE                  sync_handle;
  189 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
  190 
  191         tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
  192 
  193         /* Serialize access to the controller cmd queue. */
  194         tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
  195 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
  196         if (req->flags & TW_CLI_REQ_FLAGS_EXTERNAL) {
  197                 if (!(ctlr->flags & TW_CL_DEFERRED_INTR_USED))
  198                         tw_osl_sync_isr_block(ctlr_handle, &sync_handle);
  199         } else {
  200                 if (ctlr->flags & TW_CL_DEFERRED_INTR_USED)
  201                         tw_osl_sync_io_block(ctlr_handle, &sync_handle);
  202         }
  203 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
  204 
  205         /* Check to see if we can post a command. */
  206         status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
  207         if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
  208                 goto out;
  209 
  210         if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
  211                 struct tw_cl_req_packet *req_pkt =
  212                         (struct tw_cl_req_packet *)(req->orig_req);
  213 
  214                 tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
  215                         "Cmd queue full");
  216 
  217                 if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
  218 #ifndef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
  219                         || ((req_pkt) &&
  220                         (req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
  221 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
  222                         ) {
  223                         if (req->state != TW_CLI_REQ_STATE_PENDING) {
  224                                 tw_cli_dbg_printf(2, ctlr_handle,
  225                                         tw_osl_cur_func(),
  226                                         "pending internal/ioctl request");
  227                                 req->state = TW_CLI_REQ_STATE_PENDING;
  228                                 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
  229                                 error = 0;
  230                         } else
  231                                 error = TW_OSL_EBUSY;
  232                 } else {
  233                         notify_osl_of_ctlr_busy = TW_CL_TRUE;
  234                         error = TW_OSL_EBUSY;
  235                 }
  236         } else {
  237                 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
  238                         "Submitting command");
  239 
  240                 /*
  241                  * The controller cmd queue is not full.  Mark the request as
  242                  * currently being processed by the firmware, and move it into
  243                  * the busy queue.  Then submit the cmd.
  244                  */
  245                 req->state = TW_CLI_REQ_STATE_BUSY;
  246                 tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
  247                 TW_CLI_WRITE_COMMAND_QUEUE(ctlr_handle,
  248                         req->cmd_pkt_phys +
  249                         sizeof(struct tw_cl_command_header));
  250         }
  251 
  252 out:
  253 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
  254         if (req->flags & TW_CLI_REQ_FLAGS_EXTERNAL) {
  255                 if (!(ctlr->flags & TW_CL_DEFERRED_INTR_USED))
  256                         tw_osl_sync_isr_unblock(ctlr_handle, &sync_handle);
  257         } else {
  258                 if (ctlr->flags & TW_CL_DEFERRED_INTR_USED)
  259                         tw_osl_sync_io_unblock(ctlr_handle, &sync_handle);
  260         }
  261 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
  262         tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
  263 
  264         if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
  265                 if (notify_osl_of_ctlr_busy)
  266                         tw_osl_ctlr_busy(ctlr_handle, req->req_handle);
  267 
  268                 /*
  269                  * Synchronize access between writes to command and control
  270                  * registers in 64-bit environments, on G66.
  271                  */
  272                 if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
  273                         tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
  274 
  275                 /* Unmask command interrupt. */
  276                 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
  277                         TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
  278 
  279                 if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
  280                         tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
  281         }
  282 
  283         return(error);
  284 }
  285 
  286 
  287 
  288 /*
  289  * Function name:       tw_cl_fw_passthru
  290  * Description:         Interface to OS Layer for accepting firmware
  291  *                      passthru requests.
  292  * Input:               ctlr_handle     -- controller handle
  293  *                      req_pkt         -- OSL built request packet
  294  *                      req_handle      -- request handle
  295  * Output:              None
  296  * Return value:        0       -- success
  297  *                      non-zero-- failure
  298  */
  299 TW_INT32
  300 tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
  301         struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
  302 {
  303         struct tw_cli_ctlr_context              *ctlr;
  304         struct tw_cli_req_context               *req;
  305         union tw_cl_command_7k                  *cmd_7k;
  306         struct tw_cl_command_9k                 *cmd_9k;
  307         struct tw_cl_passthru_req_packet        *pt_req;
  308         TW_UINT8                                opcode;
  309         TW_UINT8                                sgl_offset;
  310         TW_VOID                                 *sgl = TW_CL_NULL;
  311         TW_INT32                                error;
  312 
  313         tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
  314 
  315         ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
  316 
  317         if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
  318                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
  319                         "Passthru request during reset: returning busy. "
  320                         "Ctlr state = 0x%x",
  321                         ctlr->state);
  322                 tw_osl_ctlr_busy(ctlr_handle, req_handle);
  323                 return(TW_OSL_EBUSY);
  324         }
  325 
  326         if ((req = tw_cli_get_request(ctlr
  327 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
  328                 , req_pkt
  329 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
  330                 )) == TW_CL_NULL) {
  331                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
  332                         "Out of request context packets: returning busy");
  333                 tw_osl_ctlr_busy(ctlr_handle, req_handle);
  334                 return(TW_OSL_EBUSY);
  335         }
  336 
  337         req_handle->cl_req_ctxt = req;
  338 
  339 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
  340 
  341         req->cmd_pkt = req_pkt->dma_mem;
  342         req->cmd_pkt_phys = req_pkt->dma_mem_phys;
  343         tw_osl_memzero(req->cmd_pkt,
  344                 sizeof(struct tw_cl_command_header) +
  345                 28 /* max bytes before sglist */);
  346 
  347 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
  348 
  349         req->req_handle = req_handle;
  350         req->orig_req = req_pkt;
  351         req->tw_cli_callback = tw_cli_complete_io;
  352 
  353         req->flags |= (TW_CLI_REQ_FLAGS_EXTERNAL | TW_CLI_REQ_FLAGS_PASSTHRU);
  354 
  355         pt_req = &(req_pkt->gen_req_pkt.pt_req);
  356 
  357         tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
  358                 pt_req->cmd_pkt_length);
  359         /* Build the cmd pkt. */
  360         if ((opcode = GET_OPCODE(((TW_UINT8 *)
  361                 (pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
  362                         == TWA_FW_CMD_EXECUTE_SCSI) {
  363                 TW_UINT16       lun_l4, lun_h4;
  364 
  365                 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
  366                         "passthru: 9k cmd pkt");
  367                 req->flags |= TW_CLI_REQ_FLAGS_9K;
  368                 cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
  369                 lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
  370                 lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
  371                 cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
  372                         BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
  373                 if (pt_req->sgl_entries) {
  374                         cmd_9k->lun_h4__sgl_entries =
  375                                 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
  376                                         pt_req->sgl_entries));
  377                         sgl = (TW_VOID *)(cmd_9k->sg_list);
  378                 }
  379         } else {
  380                 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
  381                         "passthru: 7k cmd pkt");
  382                 cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
  383                 cmd_7k->generic.request_id =
  384                         (TW_UINT8)(TW_CL_SWAP16(req->request_id));
  385                 if ((sgl_offset =
  386                         GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
  387                         sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
  388                         cmd_7k->generic.size += pt_req->sgl_entries *
  389                                 ((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
  390                 }
  391         }
  392 
  393         if (sgl)
  394                 tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
  395                         sgl, pt_req->sgl_entries);
  396 
  397         if ((error = tw_cli_submit_cmd(req))) {
  398                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
  399                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
  400                         0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
  401                         "Failed to start passthru command",
  402                         "error = %d", error);
  403                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
  404         }
  405         return(error);
  406 }
  407 
  408 
  409 
  410 /*
  411  * Function name:       tw_cl_ioctl
  412  * Description:         Handler of CL supported ioctl cmds.
  413  *
  414  * Input:               ctlr    -- ptr to per ctlr structure
  415  *                      cmd     -- ioctl cmd
  416  *                      buf     -- ptr to buffer in kernel memory, which is
  417  *                                 a copy of the input buffer in user-space
  418  * Output:              buf     -- ptr to buffer in kernel memory, which will
  419  *                                 need to be copied to the output buffer in
  420  *                                 user-space
  421  * Return value:        0       -- success
  422  *                      non-zero-- failure
  423  */
  424 TW_INT32
  425 tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, TW_INT32 cmd, TW_VOID *buf)
  426 {
  427         struct tw_cli_ctlr_context      *ctlr =
  428                 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
  429         struct tw_cl_ioctl_packet       *user_buf =
  430                 (struct tw_cl_ioctl_packet *)buf;
  431         struct tw_cl_event_packet       event_buf;
  432         TW_INT32                        event_index;
  433         TW_INT32                        start_index;
  434         TW_INT32                        error = TW_OSL_ESUCCESS;
  435 
  436         tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
  437 
  438         /* Serialize access to the AEN queue and the ioctl lock. */
  439         tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
  440 
  441         switch (cmd) {
  442         case TW_CL_IOCTL_GET_FIRST_EVENT:
  443                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  444                         "Get First Event");
  445 
  446                 if (ctlr->aen_q_wrapped) {
  447                         if (ctlr->aen_q_overflow) {
  448                                 /*
  449                                  * The aen queue has wrapped, even before some
  450                                  * events have been retrieved.  Let the caller
  451                                  * know that he missed out on some AEN's.
  452                                  */
  453                                 user_buf->driver_pkt.status =
  454                                         TW_CL_ERROR_AEN_OVERFLOW;
  455                                 ctlr->aen_q_overflow = TW_CL_FALSE;
  456                         } else
  457                                 user_buf->driver_pkt.status = 0;
  458                         event_index = ctlr->aen_head;
  459                 } else {
  460                         if (ctlr->aen_head == ctlr->aen_tail) {
  461                                 user_buf->driver_pkt.status =
  462                                         TW_CL_ERROR_AEN_NO_EVENTS;
  463                                 break;
  464                         }
  465                         user_buf->driver_pkt.status = 0;
  466                         event_index = ctlr->aen_tail;   /* = 0 */
  467                 }
  468                 tw_osl_memcpy(user_buf->data_buf,
  469                         &(ctlr->aen_queue[event_index]),
  470                         sizeof(struct tw_cl_event_packet));
  471 
  472                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
  473 
  474                 break;
  475 
  476 
  477         case TW_CL_IOCTL_GET_LAST_EVENT:
  478                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  479                         "Get Last Event");
  480 
  481                 if (ctlr->aen_q_wrapped) {
  482                         if (ctlr->aen_q_overflow) {
  483                                 /*
  484                                  * The aen queue has wrapped, even before some
  485                                  * events have been retrieved.  Let the caller
  486                                  * know that he missed out on some AEN's.
  487                                  */
  488                                 user_buf->driver_pkt.status =
  489                                         TW_CL_ERROR_AEN_OVERFLOW;
  490                                 ctlr->aen_q_overflow = TW_CL_FALSE;
  491                         } else
  492                                 user_buf->driver_pkt.status = 0;
  493                 } else {
  494                         if (ctlr->aen_head == ctlr->aen_tail) {
  495                                 user_buf->driver_pkt.status =
  496                                         TW_CL_ERROR_AEN_NO_EVENTS;
  497                                 break;
  498                         }
  499                         user_buf->driver_pkt.status = 0;
  500                 }
  501                 event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
  502                         ctlr->max_aens_supported;
  503 
  504                 tw_osl_memcpy(user_buf->data_buf,
  505                         &(ctlr->aen_queue[event_index]),
  506                         sizeof(struct tw_cl_event_packet));
  507 
  508                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
  509                 
  510                 break;
  511 
  512 
  513         case TW_CL_IOCTL_GET_NEXT_EVENT:
  514                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  515                         "Get Next Event");
  516 
  517                 user_buf->driver_pkt.status = 0;
  518                 if (ctlr->aen_q_wrapped) {
  519                         tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  520                                 "Get Next Event: wrapped");
  521                         if (ctlr->aen_q_overflow) {
  522                                 /*
  523                                  * The aen queue has wrapped, even before some
  524                                  * events have been retrieved.  Let the caller
  525                                  * know that he missed out on some AEN's.
  526                                  */
  527                                 tw_cli_dbg_printf(2, ctlr_handle,
  528                                         tw_osl_cur_func(),
  529                                         "Get Next Event: overflow");
  530                                 user_buf->driver_pkt.status =
  531                                         TW_CL_ERROR_AEN_OVERFLOW;
  532                                 ctlr->aen_q_overflow = TW_CL_FALSE;
  533                         }
  534                         start_index = ctlr->aen_head;
  535                 } else {
  536                         if (ctlr->aen_head == ctlr->aen_tail) {
  537                                 tw_cli_dbg_printf(3, ctlr_handle,
  538                                         tw_osl_cur_func(),
  539                                         "Get Next Event: empty queue");
  540                                 user_buf->driver_pkt.status =
  541                                         TW_CL_ERROR_AEN_NO_EVENTS;
  542                                 break;
  543                         }
  544                         start_index = ctlr->aen_tail;   /* = 0 */
  545                 }
  546                 tw_osl_memcpy(&event_buf, user_buf->data_buf,
  547                         sizeof(struct tw_cl_event_packet));
  548 
  549                 event_index = (start_index + event_buf.sequence_id -
  550                         ctlr->aen_queue[start_index].sequence_id + 1) %
  551                         ctlr->max_aens_supported;
  552 
  553                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  554                         "Get Next Event: si = %x, ei = %x, ebsi = %x, "
  555                         "sisi = %x, eisi = %x",
  556                         start_index, event_index, event_buf.sequence_id,
  557                         ctlr->aen_queue[start_index].sequence_id,
  558                         ctlr->aen_queue[event_index].sequence_id);
  559 
  560                 if (! (ctlr->aen_queue[event_index].sequence_id >
  561                         event_buf.sequence_id)) {
  562                         /*
  563                          * We don't have any event matching the criterion.  So,
  564                          * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
  565                          * encountered an overflow condition above, we cannot
  566                          * report both conditions during this call.  We choose
  567                          * to report NO_EVENTS this time, and an overflow the
  568                          * next time we are called.
  569                          */
  570                         if (user_buf->driver_pkt.status ==
  571                                 TW_CL_ERROR_AEN_OVERFLOW) {
  572                                 /*
  573                                  * Make a note so we report the overflow
  574                                  * next time.
  575                                  */
  576                                 ctlr->aen_q_overflow = TW_CL_TRUE;
  577                         }
  578                         user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
  579                         break;
  580                 }
  581                 /* Copy the event -- even if there has been an overflow. */
  582                 tw_osl_memcpy(user_buf->data_buf,
  583                         &(ctlr->aen_queue[event_index]),
  584                         sizeof(struct tw_cl_event_packet));
  585 
  586                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
  587 
  588                 break;
  589 
  590 
  591         case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
  592                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  593                         "Get Previous Event");
  594 
  595                 user_buf->driver_pkt.status = 0;
  596                 if (ctlr->aen_q_wrapped) {
  597                         if (ctlr->aen_q_overflow) {
  598                                 /*
  599                                  * The aen queue has wrapped, even before some
  600                                  * events have been retrieved.  Let the caller
  601                                  * know that he missed out on some AEN's.
  602                                  */
  603                                 user_buf->driver_pkt.status =
  604                                         TW_CL_ERROR_AEN_OVERFLOW;
  605                                 ctlr->aen_q_overflow = TW_CL_FALSE;
  606                         }
  607                         start_index = ctlr->aen_head;
  608                 } else {
  609                         if (ctlr->aen_head == ctlr->aen_tail) {
  610                                 user_buf->driver_pkt.status =
  611                                         TW_CL_ERROR_AEN_NO_EVENTS;
  612                                 break;
  613                         }
  614                         start_index = ctlr->aen_tail;   /* = 0 */
  615                 }
  616                 tw_osl_memcpy(&event_buf, user_buf->data_buf,
  617                         sizeof(struct tw_cl_event_packet));
  618 
  619                 event_index = (start_index + event_buf.sequence_id -
  620                         ctlr->aen_queue[start_index].sequence_id - 1) %
  621                         ctlr->max_aens_supported;
  622 
  623                 if (! (ctlr->aen_queue[event_index].sequence_id <
  624                         event_buf.sequence_id)) {
  625                         /*
  626                          * We don't have any event matching the criterion.  So,
  627                          * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
  628                          * encountered an overflow condition above, we cannot
  629                          * report both conditions during this call.  We choose
  630                          * to report NO_EVENTS this time, and an overflow the
  631                          * next time we are called.
  632                          */
  633                         if (user_buf->driver_pkt.status ==
  634                                 TW_CL_ERROR_AEN_OVERFLOW) {
  635                                 /*
  636                                  * Make a note so we report the overflow
  637                                  * next time.
  638                                  */
  639                                 ctlr->aen_q_overflow = TW_CL_TRUE;
  640                         }
  641                         user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
  642                         break;
  643                 }
  644                 /* Copy the event -- even if there has been an overflow. */
  645                 tw_osl_memcpy(user_buf->data_buf,
  646                         &(ctlr->aen_queue[event_index]),
  647                         sizeof(struct tw_cl_event_packet));
  648 
  649                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
  650 
  651                 break;
  652 
  653 
  654         case TW_CL_IOCTL_GET_LOCK:
  655         {
  656                 struct tw_cl_lock_packet        lock_pkt;
  657                 TW_TIME                         cur_time;
  658 
  659                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  660                         "Get ioctl lock");
  661 
  662                 cur_time = tw_osl_get_local_time();
  663                 tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
  664                         sizeof(struct tw_cl_lock_packet));
  665 
  666                 if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
  667                         (lock_pkt.force_flag) ||
  668                         (cur_time >= ctlr->ioctl_lock.timeout)) {
  669                         tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  670                                 "GET_LOCK: Getting lock!");
  671                         ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
  672                         ctlr->ioctl_lock.timeout =
  673                                 cur_time + (lock_pkt.timeout_msec / 1000);
  674                         lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
  675                         user_buf->driver_pkt.status = 0;
  676                 } else {
  677                         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
  678                                 "GET_LOCK: Lock already held!");
  679                         lock_pkt.time_remaining_msec = (TW_UINT32)(
  680                                 (ctlr->ioctl_lock.timeout - cur_time) * 1000);
  681                         user_buf->driver_pkt.status =
  682                                 TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
  683                 }
  684                 tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
  685                         sizeof(struct tw_cl_lock_packet));
  686                 break;
  687         }
  688 
  689 
  690         case TW_CL_IOCTL_RELEASE_LOCK:
  691                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  692                         "Release ioctl lock");
  693 
  694                 if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
  695                         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
  696                                 "twa_ioctl: RELEASE_LOCK: Lock not held!");
  697                         user_buf->driver_pkt.status =
  698                                 TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
  699                 } else {
  700                         tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  701                                 "RELEASE_LOCK: Releasing lock!");
  702                         ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
  703                         user_buf->driver_pkt.status = 0;
  704                 }
  705                 break;
  706 
  707 
  708         case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
  709         {
  710                 struct tw_cl_compatibility_packet       comp_pkt;
  711 
  712                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  713                         "Get compatibility info");
  714 
  715                 tw_osl_memcpy(comp_pkt.driver_version,
  716                         TW_OSL_DRIVER_VERSION_STRING,
  717                         sizeof(TW_OSL_DRIVER_VERSION_STRING));
  718                 comp_pkt.working_srl = ctlr->working_srl;
  719                 comp_pkt.working_branch = ctlr->working_branch;
  720                 comp_pkt.working_build = ctlr->working_build;
  721                 comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
  722                 comp_pkt.driver_branch_high =
  723                         TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
  724                 comp_pkt.driver_build_high =
  725                         TWA_CURRENT_FW_BUILD(ctlr->arch_id);
  726                 comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
  727                 comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
  728                 comp_pkt.driver_build_high = TWA_BASE_FW_BUILD;
  729                 comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
  730                 comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
  731                 comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
  732                 user_buf->driver_pkt.status = 0;
  733 
  734                 /* Copy compatibility information to user space. */
  735                 tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
  736                         (sizeof(struct tw_cl_compatibility_packet) <
  737                         user_buf->driver_pkt.buffer_length) ?
  738                         sizeof(struct tw_cl_compatibility_packet) :
  739                         user_buf->driver_pkt.buffer_length);
  740                 break;
  741         }
  742 
  743         default:        
  744                 /* Unknown opcode. */
  745                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
  746                         "Unknown ioctl cmd 0x%x", cmd);
  747                 error = TW_OSL_ENOTTY;
  748         }
  749 
  750         tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
  751         return(error);
  752 }
  753 
  754 
  755 
  756 /*
  757  * Function name:       tw_cli_get_param
  758  * Description:         Get a firmware parameter.
  759  *
  760  * Input:               ctlr            -- ptr to per ctlr structure
  761  *                      table_id        -- parameter table #
  762  *                      param_id        -- index of the parameter in the table
  763  *                      param_size      -- size of the parameter in bytes
  764  *                      callback        -- ptr to function, if any, to be called
  765  *                                      back on completion; TW_CL_NULL if no callback.
  766  * Output:              param_data      -- param value
  767  * Return value:        0       -- success
  768  *                      non-zero-- failure
  769  */
  770 TW_INT32
  771 tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
  772         TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
  773         TW_VOID (* callback)(struct tw_cli_req_context *req))
  774 {
  775         struct tw_cli_req_context       *req;
  776         union tw_cl_command_7k          *cmd;
  777         struct tw_cl_param_9k           *param = TW_CL_NULL;
  778         TW_INT32                        error = TW_OSL_EBUSY;
  779 
  780         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
  781 
  782         /* Get a request packet. */
  783         if ((req = tw_cli_get_request(ctlr
  784 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
  785                 , TW_CL_NULL
  786 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
  787                 )) == TW_CL_NULL)
  788                 goto out;
  789 
  790         /* Make sure this is the only CL internal request at this time. */
  791         if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
  792                 error = TW_OSL_EBUSY;
  793                 goto out;
  794         }
  795         ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
  796 
  797 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
  798 
  799         req->cmd_pkt = ctlr->cmd_pkt_buf;
  800         req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
  801         tw_osl_memzero(req->cmd_pkt,
  802                 sizeof(struct tw_cl_command_header) +
  803                 28 /* max bytes before sglist */);
  804 
  805 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
  806 
  807         req->data = ctlr->internal_req_data;
  808         req->data_phys = ctlr->internal_req_data_phys;
  809         req->length = TW_CLI_SECTOR_SIZE;
  810         req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
  811 
  812         /* Initialize memory to read data into. */
  813         param = (struct tw_cl_param_9k *)(req->data);
  814         tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
  815 
  816         /* Build the cmd pkt. */
  817         cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
  818 
  819         req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
  820 
  821         cmd->param.sgl_off__opcode =
  822                 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
  823         cmd->param.request_id =
  824                 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
  825         cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
  826         cmd->param.param_count = TW_CL_SWAP16(1);
  827 
  828         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
  829                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
  830                         TW_CL_SWAP64(req->data_phys);
  831                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
  832                         TW_CL_SWAP32(req->length);
  833                 cmd->param.size = 2 + 3;
  834         } else {
  835                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
  836                         TW_CL_SWAP32(req->data_phys);
  837                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
  838                         TW_CL_SWAP32(req->length);
  839                 cmd->param.size = 2 + 2;
  840         }
  841 
  842         /* Specify which parameter we need. */
  843         param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
  844         param->parameter_id = (TW_UINT8)(param_id);
  845         param->parameter_size_bytes = TW_CL_SWAP16(param_size);
  846 
  847         /* Submit the command. */
  848         if (callback == TW_CL_NULL) {
  849                 /* There's no call back; wait till the command completes. */
  850                 error = tw_cli_submit_and_poll_request(req,
  851                                 TW_CLI_REQUEST_TIMEOUT_PERIOD);
  852                 if (error == TW_OSL_ETIMEDOUT)
  853                         /* Clean-up done by tw_cli_submit_and_poll_request. */
  854                         return(error);
  855                 if (error)
  856                         goto out;
  857                 if ((error = cmd->param.status)) {
  858                         tw_cli_create_ctlr_event(ctlr,
  859                                 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
  860                                 &(req->cmd_pkt->cmd_hdr));
  861                         goto out;
  862                 }
  863                 tw_osl_memcpy(param_data, param->data, param_size);
  864                 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
  865                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
  866         } else {
  867                 /* There's a call back.  Simply submit the command. */
  868                 req->tw_cli_callback = callback;
  869                 if ((error = tw_cli_submit_cmd(req)))
  870                         goto out;
  871         }
  872         return(0);
  873 
  874 out:
  875         tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
  876                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
  877                 0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
  878                 "get_param failed",
  879                 "error = %d", error);
  880         if (param)
  881                 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
  882         if (req)
  883                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
  884         return(1);
  885 }
  886 
  887 
  888 
  889 /*
  890  * Function name:       tw_cli_set_param
  891  * Description:         Set a firmware parameter.
  892  *
  893  * Input:               ctlr            -- ptr to per ctlr structure
  894  *                      table_id        -- parameter table #
  895  *                      param_id        -- index of the parameter in the table
  896  *                      param_size      -- size of the parameter in bytes
  897  *                      callback        -- ptr to function, if any, to be called
  898  *                                      back on completion; TW_CL_NULL if no callback.
  899  * Output:              None
  900  * Return value:        0       -- success
  901  *                      non-zero-- failure
  902  */
  903 TW_INT32
  904 tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
  905         TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
  906         TW_VOID (* callback)(struct tw_cli_req_context *req))
  907 {
  908         struct tw_cli_req_context       *req;
  909         union tw_cl_command_7k          *cmd;
  910         struct tw_cl_param_9k           *param = TW_CL_NULL;
  911         TW_INT32                        error = TW_OSL_EBUSY;
  912 
  913         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
  914 
  915         /* Get a request packet. */
  916         if ((req = tw_cli_get_request(ctlr
  917 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
  918                 , TW_CL_NULL
  919 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
  920                 )) == TW_CL_NULL)
  921                 goto out;
  922 
  923         /* Make sure this is the only CL internal request at this time. */
  924         if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
  925                 error = TW_OSL_EBUSY;
  926                 goto out;
  927         }
  928         ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
  929 
  930 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
  931 
  932         req->cmd_pkt = ctlr->cmd_pkt_buf;
  933         req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
  934         tw_osl_memzero(req->cmd_pkt,
  935                 sizeof(struct tw_cl_command_header) +
  936                 28 /* max bytes before sglist */);
  937 
  938 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
  939 
  940         req->data = ctlr->internal_req_data;
  941         req->data_phys = ctlr->internal_req_data_phys;
  942         req->length = TW_CLI_SECTOR_SIZE;
  943         req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
  944 
  945         /* Initialize memory to send data using. */
  946         param = (struct tw_cl_param_9k *)(req->data);
  947         tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
  948 
  949         /* Build the cmd pkt. */
  950         cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
  951 
  952         req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
  953 
  954         cmd->param.sgl_off__opcode =
  955                 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
  956         cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
  957         cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
  958         cmd->param.param_count = TW_CL_SWAP16(1);
  959 
  960         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
  961                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
  962                         TW_CL_SWAP64(req->data_phys);
  963                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
  964                         TW_CL_SWAP32(req->length);
  965                 cmd->param.size = 2 + 3;
  966         } else {
  967                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
  968                         TW_CL_SWAP32(req->data_phys);
  969                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
  970                         TW_CL_SWAP32(req->length);
  971                 cmd->param.size = 2 + 2;
  972         }
  973 
  974         /* Specify which parameter we want to set. */
  975         param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
  976         param->parameter_id = (TW_UINT8)(param_id);
  977         param->parameter_size_bytes = TW_CL_SWAP16(param_size);
  978         tw_osl_memcpy(param->data, data, param_size);
  979 
  980         /* Submit the command. */
  981         if (callback == TW_CL_NULL) {
  982                 /* There's no call back;  wait till the command completes. */
  983                 error = tw_cli_submit_and_poll_request(req,
  984                         TW_CLI_REQUEST_TIMEOUT_PERIOD);
  985                 if (error == TW_OSL_ETIMEDOUT)
  986                         /* Clean-up done by tw_cli_submit_and_poll_request. */
  987                         return(error);
  988                 if (error)
  989                         goto out;
  990                 if ((error = cmd->param.status)) {
  991                         tw_cli_create_ctlr_event(ctlr,
  992                                 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
  993                                 &(req->cmd_pkt->cmd_hdr));
  994                         goto out;
  995                 }
  996                 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
  997                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
  998         } else {
  999                 /* There's a call back.  Simply submit the command. */
 1000                 req->tw_cli_callback = callback;
 1001                 if ((error = tw_cli_submit_cmd(req)))
 1002                         goto out;
 1003         }
 1004         return(error);
 1005 
 1006 out:
 1007         tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
 1008                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
 1009                 0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
 1010                 "set_param failed",
 1011                 "error = %d", error);
 1012         if (param)
 1013                 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
 1014         if (req)
 1015                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
 1016         return(error);
 1017 }
 1018 
 1019 
 1020 
 1021 /*
 1022  * Function name:       tw_cli_submit_and_poll_request
 1023  * Description:         Sends down a firmware cmd, and waits for the completion
 1024  *                      in a tight loop.
 1025  *
 1026  * Input:               req     -- ptr to request pkt
 1027  *                      timeout -- max # of seconds to wait before giving up
 1028  * Output:              None
 1029  * Return value:        0       -- success
 1030  *                      non-zero-- failure
 1031  */
 1032 TW_INT32
 1033 tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
 1034         TW_UINT32 timeout)
 1035 {
 1036         struct tw_cli_ctlr_context      *ctlr = req->ctlr;
 1037         TW_TIME                         end_time;
 1038         TW_INT32                        error;
 1039 
 1040         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
 1041 
 1042         /*
 1043          * If the cmd queue is full, tw_cli_submit_cmd will queue this
 1044          * request in the pending queue, since this is an internal request.
 1045          */
 1046         if ((error = tw_cli_submit_cmd(req))) {
 1047                 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
 1048                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
 1049                         0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
 1050                         "Failed to start internal request",
 1051                         "error = %d", error);
 1052                 return(error);
 1053         }
 1054 
 1055         /*
 1056          * Poll for the response until the command gets completed, or there's
 1057          * a timeout.
 1058          */
 1059         end_time = tw_osl_get_local_time() + timeout;
 1060         do {
 1061                 if ((error = req->error_code))
 1062                         /*
 1063                          * This will take care of completion due to a reset,
 1064                          * or a failure in tw_cli_submit_pending_queue.
 1065                          * The caller should do the clean-up.
 1066                          */
 1067                         return(error);
 1068 
 1069                 /* See if the command completed. */
 1070                 tw_cli_process_resp_intr(ctlr);
 1071 
 1072                 if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
 1073                         (req->state != TW_CLI_REQ_STATE_PENDING))
 1074                         return(req->state != TW_CLI_REQ_STATE_COMPLETE);
 1075         } while (tw_osl_get_local_time() <= end_time);
 1076 
 1077         /* Time out! */
 1078         tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
 1079                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
 1080                 0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
 1081                 "Internal request timed out",
 1082                 "request = %p", req);
 1083 
 1084         /*
 1085          * We will reset the controller only if the request has already been
 1086          * submitted, so as to not lose the request packet.  If a busy request
 1087          * timed out, the reset will take care of freeing resources.  If a
 1088          * pending request timed out, we will free resources for that request,
 1089          * right here, thereby avoiding a reset.  So, the caller is expected
 1090          * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
 1091          */
 1092 
 1093         /*
 1094          * We have to make sure that this timed out request, if it were in the
 1095          * pending queue, doesn't get submitted while we are here, from
 1096          * tw_cli_submit_pending_queue.  There could be a race in that case.
 1097          * Need to revisit.
 1098          */
 1099         if (req->state != TW_CLI_REQ_STATE_PENDING)
 1100                 tw_cl_reset_ctlr(ctlr->ctlr_handle);
 1101         else {
 1102                 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
 1103                         "Removing request from pending queue");
 1104                 /*
 1105                  * Request was never submitted.  Clean up.  Note that we did
 1106                  * not do a reset.  So, we have to remove the request ourselves
 1107                  * from the pending queue (as against tw_cli_drain_pendinq_queue
 1108                  * taking care of it).
 1109                  */
 1110                 tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
 1111                 if (req->data)
 1112                         ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
 1113                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
 1114         }
 1115 
 1116         return(TW_OSL_ETIMEDOUT);
 1117 }
 1118 
 1119 
 1120 
 1121 /*
 1122  * Function name:       tw_cl_reset_ctlr
 1123  * Description:         Soft resets and then initializes the controller;
 1124  *                      drains any incomplete requests.
 1125  *
 1126  * Input:               ctlr    -- ptr to per ctlr structure
 1127  * Output:              None
 1128  * Return value:        0       -- success
 1129  *                      non-zero-- failure
 1130  */
 1131 TW_INT32
 1132 tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
 1133 {
 1134         struct tw_cli_ctlr_context      *ctlr =
 1135                 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
 1136         TW_INT32                        reset_attempt = 1;
 1137         TW_INT32                        error;
 1138 
 1139         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
 1140 
 1141         ctlr->state |= TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
 1142 
 1143         /*
 1144          * Error back all requests in the complete, busy, and pending queues.
 1145          * If any request is already on its way to getting submitted, it's in
 1146          * none of these queues and so, will not be completed.  That request
 1147          * will continue its course and get submitted to the controller after
 1148          * the reset is done (and io_lock is released).
 1149          */
 1150         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
 1151                 "Draining all queues following reset");
 1152         tw_cli_drain_complete_queue(ctlr);
 1153         tw_cli_drain_busy_queue(ctlr);
 1154         tw_cli_drain_pending_queue(ctlr);
 1155 
 1156         tw_cli_disable_interrupts(ctlr);
 1157 
 1158         /* Soft reset the controller. */
 1159 try_reset:
 1160         if ((error = tw_cli_soft_reset(ctlr))) {
 1161                 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
 1162                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
 1163                         0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
 1164                         "Controller reset failed",
 1165                         "error = %d; attempt %d", error, reset_attempt++);
 1166                 if (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS)
 1167                         goto try_reset;
 1168                 else
 1169                         goto out;
 1170         }
 1171 
 1172         /* Re-establish logical connection with the controller. */
 1173         if ((error = tw_cli_init_connection(ctlr,
 1174                         (TW_UINT16)(ctlr->max_simult_reqs),
 1175                         0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
 1176                         TW_CL_NULL, TW_CL_NULL))) {
 1177                 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
 1178                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
 1179                         0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
 1180                         "Can't initialize connection after reset",
 1181                         "error = %d", error);
 1182                 goto out;
 1183         }
 1184 
 1185         tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
 1186                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
 1187                 0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
 1188                 "Controller reset done!",
 1189                 " ");
 1190 
 1191 out:
 1192         ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
 1193         /*
 1194          * Enable interrupts, and also clear attention and response interrupts.
 1195          */
 1196         tw_cli_enable_interrupts(ctlr);
 1197         
 1198         /* Request for a bus re-scan. */
 1199         if (!error)
 1200                 tw_osl_scan_bus(ctlr_handle);
 1201         return(error);
 1202 }
 1203 
 1204 
 1205 
 1206 /*
 1207  * Function name:       tw_cli_soft_reset
 1208  * Description:         Does the actual soft reset.
 1209  *
 1210  * Input:               ctlr    -- ptr to per ctlr structure
 1211  * Output:              None
 1212  * Return value:        0       -- success
 1213  *                      non-zero-- failure
 1214  */
 1215 TW_INT32
 1216 tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
 1217 {
 1218         struct tw_cl_ctlr_handle        *ctlr_handle = ctlr->ctlr_handle;
 1219         TW_UINT32                       status_reg;
 1220         TW_UINT32                       error;
 1221 
 1222         tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
 1223 
 1224         tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
 1225                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
 1226                 0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
 1227                 "Resetting controller...",
 1228                 " ");
 1229 
 1230         /* Don't let any new commands get submitted to the controller. */
 1231         tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
 1232 
 1233         TW_CLI_SOFT_RESET(ctlr_handle);
 1234 
 1235         if (ctlr->device_id == TW_CL_DEVICE_ID_9K_X) {
 1236                 /*
 1237                  * There's a hardware bug in the G133 ASIC, which can lead to
 1238                  * PCI parity errors and hangs, if the host accesses any
 1239                  * registers when the firmware is resetting the hardware, as
 1240                  * part of a hard/soft reset.  The window of time when the
 1241                  * problem can occur is about 10 ms.  Here, we will handshake
 1242                  * with the firmware to find out when the firmware is pulling
 1243                  * down the hardware reset pin, and wait for about 500 ms to
 1244                  * make sure we don't access any hardware registers (for
 1245                  * polling) during that window.
 1246                  */
 1247                 ctlr->state |= TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
 1248                 while (tw_cli_find_response(ctlr,
 1249                         TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) != TW_OSL_ESUCCESS)
 1250                         tw_osl_delay(10);
 1251                 tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
 1252                 ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
 1253         }
 1254 
 1255         if ((error = tw_cli_poll_status(ctlr,
 1256                         TWA_STATUS_MICROCONTROLLER_READY |
 1257                         TWA_STATUS_ATTENTION_INTERRUPT,
 1258                         TW_CLI_RESET_TIMEOUT_PERIOD))) {
 1259                 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
 1260                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
 1261                         0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
 1262                         "Micro-ctlr not ready/No attn intr after reset",
 1263                         "error = %d", error);
 1264                 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
 1265                 return(error);
 1266         }
 1267 
 1268         TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
 1269                 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
 1270 
 1271         if ((error = tw_cli_drain_response_queue(ctlr))) {
 1272                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
 1273                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
 1274                         0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
 1275                         "Can't drain response queue after reset",
 1276                         "error = %d", error);
 1277                 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
 1278                 return(error);
 1279         }
 1280         
 1281         tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
 1282 
 1283         if ((error = tw_cli_drain_aen_queue(ctlr))) {
 1284                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
 1285                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
 1286                         0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
 1287                         "Can't drain AEN queue after reset",
 1288                         "error = %d", error);
 1289                 return(error);
 1290         }
 1291         
 1292         if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
 1293                 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
 1294                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
 1295                         0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
 1296                         "Reset not reported by controller",
 1297                         "error = %d", error);
 1298                 return(error);
 1299         }
 1300         
 1301         status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
 1302         
 1303         if ((error = TW_CLI_STATUS_ERRORS(status_reg)) ||
 1304                         (error = tw_cli_check_ctlr_state(ctlr, status_reg))) {
 1305                 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
 1306                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
 1307                         0x110D, 0x1, TW_CL_SEVERITY_ERROR_STRING,
 1308                         "Controller errors detected after reset",
 1309                         "error = %d", error);
 1310                 return(error);
 1311         }
 1312         
 1313         return(TW_OSL_ESUCCESS);
 1314 }
 1315 
 1316 
 1317 
 1318 /*
 1319  * Function name:       tw_cli_send_scsi_cmd
 1320  * Description:         Sends down a scsi cmd to fw.
 1321  *
 1322  * Input:               req     -- ptr to request pkt
 1323  *                      cmd     -- opcode of scsi cmd to send
 1324  * Output:              None
 1325  * Return value:        0       -- success
 1326  *                      non-zero-- failure
 1327  */
 1328 TW_INT32
 1329 tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
 1330 {
 1331         struct tw_cl_command_packet     *cmdpkt;
 1332         struct tw_cl_command_9k         *cmd9k;
 1333         struct tw_cli_ctlr_context      *ctlr;
 1334         TW_INT32                        error;
 1335 
 1336         ctlr = req->ctlr;
 1337         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
 1338 
 1339         /* Make sure this is the only CL internal request at this time. */
 1340         if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY)
 1341                 return(TW_OSL_EBUSY);
 1342         ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
 1343         req->data = ctlr->internal_req_data;
 1344         req->data_phys = ctlr->internal_req_data_phys;
 1345         tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
 1346         req->length = TW_CLI_SECTOR_SIZE;
 1347 
 1348         /* Build the cmd pkt. */
 1349         cmdpkt = req->cmd_pkt;
 1350 
 1351         cmdpkt->cmd_hdr.header_desc.size_header = 128;
 1352                 
 1353         cmd9k = &(cmdpkt->command.cmd_pkt_9k);
 1354 
 1355         cmd9k->res__opcode =
 1356                 BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
 1357         cmd9k->unit = 0;
 1358         cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
 1359         cmd9k->status = 0;
 1360         cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
 1361         cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
 1362 
 1363         if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
 1364                 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
 1365                         TW_CL_SWAP64(req->data_phys);
 1366                 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
 1367                         TW_CL_SWAP32(req->length);
 1368         } else {
 1369                 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
 1370                         TW_CL_SWAP32(req->data_phys);
 1371                 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
 1372                         TW_CL_SWAP32(req->length);
 1373         }
 1374 
 1375         cmd9k->cdb[0] = (TW_UINT8)cmd;
 1376         cmd9k->cdb[4] = 128;
 1377 
 1378         if ((error = tw_cli_submit_cmd(req)))
 1379                 if (error != TW_OSL_EBUSY) {
 1380                         tw_cli_dbg_printf(1, ctlr->ctlr_handle,
 1381                                 tw_osl_cur_func(),
 1382                                 "Failed to start SCSI command",
 1383                                 "request = %p, error = %d", req, error);
 1384                         return(TW_OSL_EIO);
 1385                 }
 1386         return(TW_OSL_ESUCCESS);
 1387 }
 1388 
 1389 
 1390 
 1391 /*
 1392  * Function name:       tw_cli_get_aen
 1393  * Description:         Sends down a Request Sense cmd to fw to fetch an AEN.
 1394  *
 1395  * Input:               ctlr    -- ptr to per ctlr structure
 1396  * Output:              None
 1397  * Return value:        0       -- success
 1398  *                      non-zero-- failure
 1399  */
 1400 TW_INT32
 1401 tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
 1402 {
 1403         struct tw_cli_req_context       *req;
 1404         TW_INT32                        error;
 1405 
 1406         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
 1407 
 1408         if ((req = tw_cli_get_request(ctlr
 1409 #ifdef TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST
 1410                 , TW_CL_NULL
 1411 #endif /* TW_OSL_NON_DMA_MEM_ALLOC_PER_REQUEST */
 1412                 )) == TW_CL_NULL)
 1413                 return(TW_OSL_EBUSY);
 1414 
 1415 #ifdef TW_OSL_DMA_MEM_ALLOC_PER_REQUEST
 1416 
 1417         req->cmd_pkt = ctlr->cmd_pkt_buf;
 1418         req->cmd_pkt_phys = ctlr->cmd_pkt_phys;
 1419         tw_osl_memzero(req->cmd_pkt,
 1420                 sizeof(struct tw_cl_command_header) +
 1421                 28 /* max bytes before sglist */);
 1422 
 1423 #endif /* TW_OSL_DMA_MEM_ALLOC_PER_REQUEST */
 1424 
 1425         req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
 1426         req->flags |= TW_CLI_REQ_FLAGS_9K;
 1427         req->tw_cli_callback = tw_cli_aen_callback;
 1428         if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
 1429                 tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
 1430                         "Could not send SCSI command",
 1431                         "request = %p, error = %d", req, error);
 1432                 if (req->data)
 1433                         ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
 1434                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
 1435         }
 1436         return(error);
 1437 }
 1438 
 1439 
 1440 
 1441 /*
 1442  * Function name:       tw_cli_fill_sg_list
 1443  * Description:         Fills in the scatter/gather list.
 1444  *
 1445  * Input:               ctlr    -- ptr to per ctlr structure
 1446  *                      sgl_src -- ptr to fill the sg list from
 1447  *                      sgl_dest-- ptr to sg list
 1448  *                      nsegments--# of segments
 1449  * Output:              None
 1450  * Return value:        None
 1451  */
 1452 TW_VOID
 1453 tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
 1454         TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
 1455 {
 1456         TW_INT32        i;
 1457 
 1458         tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
 1459 
 1460         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
 1461                 struct tw_cl_sg_desc64 *sgl_s =
 1462                         (struct tw_cl_sg_desc64 *)sgl_src;
 1463                 struct tw_cl_sg_desc64 *sgl_d =
 1464                         (struct tw_cl_sg_desc64 *)sgl_dest;
 1465 
 1466                 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
 1467                         "64 bit addresses");
 1468                 for (i = 0; i < num_sgl_entries; i++) {
 1469                         sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
 1470                         sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
 1471                         sgl_s++;
 1472                         if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
 1473                                 sgl_s = (struct tw_cl_sg_desc64 *)
 1474                                         (((TW_INT8 *)(sgl_s)) + 4);
 1475                 }
 1476         } else {
 1477                 struct tw_cl_sg_desc32 *sgl_s =
 1478                         (struct tw_cl_sg_desc32 *)sgl_src;
 1479                 struct tw_cl_sg_desc32 *sgl_d =
 1480                         (struct tw_cl_sg_desc32 *)sgl_dest;
 1481 
 1482                 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
 1483                         "32 bit addresses");
 1484                 for (i = 0; i < num_sgl_entries; i++) {
 1485                         sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
 1486                         sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
 1487                 }
 1488         }
 1489 }
 1490 

Cache object: e8585dbf2153f91f57421df30d59e892


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.