The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/tws/tws_cam.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2010 LSI Corp. 
    5  * All rights reserved.
    6  * Author : Manjunath Ranganathaiah <manjunath.ranganathaiah@lsi.com>
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD$
   30  */
   31 
   32 #include <dev/tws/tws.h>
   33 #include <dev/tws/tws_services.h>
   34 #include <dev/tws/tws_hdm.h>
   35 #include <dev/tws/tws_user.h>
   36 #include <cam/cam.h>
   37 #include <cam/cam_ccb.h>
   38 #include <cam/cam_sim.h>
   39 #include <cam/cam_xpt_sim.h>
   40 #include <cam/cam_debug.h>
   41 #include <cam/cam_periph.h>
   42 
   43 #include <cam/scsi/scsi_all.h>
   44 #include <cam/scsi/scsi_message.h>
   45 
   46 static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS);
   47 static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"};
   48 
   49 static void  tws_action(struct cam_sim *sim, union ccb *ccb);
   50 static void  tws_poll(struct cam_sim *sim);
   51 static void tws_scsi_complete(struct tws_request *req);
   52 
   53 void tws_unmap_request(struct tws_softc *sc, struct tws_request *req);
   54 int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req);
   55 int tws_bus_scan(struct tws_softc *sc);
   56 int tws_cam_attach(struct tws_softc *sc);
   57 void tws_cam_detach(struct tws_softc *sc);
   58 void tws_reset(void *arg);
   59 
   60 static void tws_reset_cb(void *arg);
   61 static void tws_reinit(void *arg);
   62 static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb);
   63 static void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req);
   64 static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
   65                             int nseg, int error);
   66 static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, 
   67                             void *sgl_dest, u_int16_t num_sgl_entries);
   68 static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa);
   69 static void tws_scsi_err_complete(struct tws_request *req, 
   70                                                struct tws_command_header *hdr);
   71 static void tws_passthru_err_complete(struct tws_request *req, 
   72                                                struct tws_command_header *hdr);
   73 
   74 void tws_timeout(void *arg);
   75 static void tws_intr_attn_aen(struct tws_softc *sc);
   76 static void tws_intr_attn_error(struct tws_softc *sc);
   77 static void tws_intr_resp(struct tws_softc *sc);
   78 void tws_intr(void *arg);
   79 void tws_cmd_complete(struct tws_request *req);
   80 void tws_aen_complete(struct tws_request *req);
   81 int tws_send_scsi_cmd(struct tws_softc *sc, int cmd);
   82 void tws_getset_param_complete(struct tws_request *req);
   83 int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
   84               u_int32_t param_size, void *data);
   85 int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,  
   86               u_int32_t param_size, void *data);
   87 
   88 extern struct tws_request *tws_get_request(struct tws_softc *sc, 
   89                                             u_int16_t type);
   90 extern void *tws_release_request(struct tws_request *req);
   91 extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req);
   92 extern boolean tws_get_response(struct tws_softc *sc, 
   93                                            u_int16_t *req_id, u_int64_t *mfa);
   94 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
   95                                 u_int8_t q_type );
   96 extern struct tws_request * tws_q_remove_request(struct tws_softc *sc,
   97                                    struct tws_request *req, u_int8_t q_type );
   98 extern void tws_send_event(struct tws_softc *sc, u_int8_t event);
   99 
  100 extern struct tws_sense *
  101 tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
  102 
  103 extern void tws_fetch_aen(void *arg);
  104 extern void tws_disable_db_intr(struct tws_softc *sc);
  105 extern void tws_enable_db_intr(struct tws_softc *sc);
  106 extern void tws_passthru_complete(struct tws_request *req);
  107 extern void tws_aen_synctime_with_host(struct tws_softc *sc);
  108 extern void tws_circular_aenq_insert(struct tws_softc *sc, 
  109                     struct tws_circular_q *cq, struct tws_event_packet *aen);
  110 extern int tws_use_32bit_sgls;
  111 extern boolean tws_ctlr_reset(struct tws_softc *sc);
  112 extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc, 
  113                                                            u_int8_t q_type );
  114 extern void tws_turn_off_interrupts(struct tws_softc *sc);
  115 extern void tws_turn_on_interrupts(struct tws_softc *sc);
  116 extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
  117 extern void tws_init_obfl_q(struct tws_softc *sc);
  118 extern uint8_t tws_get_state(struct tws_softc *sc);
  119 extern void tws_assert_soft_reset(struct tws_softc *sc);
  120 extern boolean tws_ctlr_ready(struct tws_softc *sc);
  121 extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa);
  122 extern int tws_setup_intr(struct tws_softc *sc, int irqs);
  123 extern int tws_teardown_intr(struct tws_softc *sc);
  124 
  125 int
  126 tws_cam_attach(struct tws_softc *sc)
  127 {
  128     struct cam_devq *devq;
  129 
  130     TWS_TRACE_DEBUG(sc, "entry", 0, sc);
  131     /* Create a device queue for sim */
  132 
  133     /* 
  134      * if the user sets cam depth to less than 1 
  135      * cam may get confused 
  136      */
  137     if ( tws_cam_depth < 1 )
  138         tws_cam_depth = 1;
  139     if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS)  )
  140         tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS;
  141 
  142     TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth);
  143 
  144     if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) {
  145         tws_log(sc, CAM_SIMQ_ALLOC);
  146         return(ENOMEM);
  147     }
  148 
  149    /*
  150     * Create a SIM entry.  Though we can support tws_cam_depth
  151     * simultaneous requests, we claim to be able to handle only
  152     * (tws_cam_depth), so that we always have reserved  requests
  153     * packet available to service ioctls and internal commands.
  154     */
  155     sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc,
  156                       device_get_unit(sc->tws_dev), 
  157                       &sc->sim_lock,
  158                       tws_cam_depth, 1, devq);
  159                       /* 1, 1, devq); */
  160     if (sc->sim == NULL) {
  161         cam_simq_free(devq);
  162         tws_log(sc, CAM_SIM_ALLOC);
  163     }
  164     /* Register the bus. */
  165     mtx_lock(&sc->sim_lock);
  166     if (xpt_bus_register(sc->sim, 
  167                          sc->tws_dev, 
  168                          0) != CAM_SUCCESS) {
  169         cam_sim_free(sc->sim, TRUE); /* passing true will free the devq */
  170         sc->sim = NULL; /* so cam_detach will not try to free it */
  171         mtx_unlock(&sc->sim_lock);
  172         tws_log(sc, TWS_XPT_BUS_REGISTER);
  173         return(ENXIO);
  174     }
  175     if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
  176                          CAM_TARGET_WILDCARD,
  177                          CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
  178         xpt_bus_deregister(cam_sim_path(sc->sim));
  179         /* Passing TRUE to cam_sim_free will free the devq as well. */
  180         cam_sim_free(sc->sim, TRUE);
  181         tws_log(sc, TWS_XPT_CREATE_PATH);
  182         mtx_unlock(&sc->sim_lock);
  183         return(ENXIO);
  184     }
  185     mtx_unlock(&sc->sim_lock);
  186 
  187     return(0);
  188 }
  189 
  190 void
  191 tws_cam_detach(struct tws_softc *sc)
  192 {
  193     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
  194     mtx_lock(&sc->sim_lock);
  195     if (sc->path)
  196         xpt_free_path(sc->path);
  197     if (sc->sim) {
  198         xpt_bus_deregister(cam_sim_path(sc->sim));
  199         cam_sim_free(sc->sim, TRUE);
  200     }
  201     mtx_unlock(&sc->sim_lock);
  202 }
  203 
  204 int
  205 tws_bus_scan(struct tws_softc *sc)
  206 {
  207     union ccb       *ccb;
  208 
  209     TWS_TRACE_DEBUG(sc, "entry", sc, 0);
  210     if (!(sc->sim))
  211         return(ENXIO);
  212     ccb = xpt_alloc_ccb();
  213     mtx_lock(&sc->sim_lock);
  214     if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim),
  215                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
  216         mtx_unlock(&sc->sim_lock);
  217         xpt_free_ccb(ccb);
  218         return(EIO);
  219     }
  220     xpt_rescan(ccb);
  221     mtx_unlock(&sc->sim_lock);
  222     return(0);
  223 }
  224 
  225 static void
  226 tws_action(struct cam_sim *sim, union ccb *ccb)
  227 {
  228     struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
  229 
  230     switch( ccb->ccb_h.func_code ) {
  231         case XPT_SCSI_IO:   
  232         {
  233             if ( tws_execute_scsi(sc, ccb) ) 
  234                 TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0);
  235             break;
  236         }
  237         case XPT_ABORT:
  238         {
  239             TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0);
  240             ccb->ccb_h.status = CAM_UA_ABORT;
  241             xpt_done(ccb);
  242             break;
  243         }
  244         case XPT_RESET_BUS:
  245         {
  246             TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb);
  247             break;
  248         }
  249         case XPT_SET_TRAN_SETTINGS:
  250         {
  251             TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb);
  252             ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
  253             xpt_done(ccb);
  254 
  255             break;
  256         }
  257         case XPT_GET_TRAN_SETTINGS:
  258         {
  259             TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb);
  260 
  261             ccb->cts.protocol = PROTO_SCSI;
  262             ccb->cts.protocol_version = SCSI_REV_2;
  263             ccb->cts.transport = XPORT_SPI;
  264             ccb->cts.transport_version = 2;
  265 
  266             ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
  267             ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
  268             ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
  269             ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
  270             ccb->ccb_h.status = CAM_REQ_CMP;
  271             xpt_done(ccb);
  272 
  273             break;
  274         }
  275         case XPT_CALC_GEOMETRY:
  276         {
  277             TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb, 
  278                                           ccb->ccg.block_size);
  279             cam_calc_geometry(&ccb->ccg, 1/* extended */);
  280             xpt_done(ccb);
  281 
  282             break;
  283         }
  284         case XPT_PATH_INQ:
  285         {
  286             TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb);
  287             ccb->cpi.version_num = 1;
  288             ccb->cpi.hba_inquiry = 0;
  289             ccb->cpi.target_sprt = 0;
  290             ccb->cpi.hba_misc = 0;
  291             ccb->cpi.hba_eng_cnt = 0;
  292             ccb->cpi.max_target = TWS_MAX_NUM_UNITS;
  293             ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1;
  294             ccb->cpi.unit_number = cam_sim_unit(sim);
  295             ccb->cpi.bus_id = cam_sim_bus(sim);
  296             ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID;
  297             ccb->cpi.base_transfer_speed = 6000000;
  298             strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
  299             strlcpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN);
  300             strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
  301             ccb->cpi.transport = XPORT_SPI;
  302             ccb->cpi.transport_version = 2;
  303             ccb->cpi.protocol = PROTO_SCSI;
  304             ccb->cpi.protocol_version = SCSI_REV_2;
  305             ccb->cpi.maxio = TWS_MAX_IO_SIZE;
  306             ccb->ccb_h.status = CAM_REQ_CMP;
  307             xpt_done(ccb);
  308 
  309             break;
  310         }
  311         default:
  312             TWS_TRACE_DEBUG(sc, "default", sim, ccb);
  313             ccb->ccb_h.status = CAM_REQ_INVALID;
  314             xpt_done(ccb);
  315             break;
  316     }
  317 }
  318 
  319 static void
  320 tws_scsi_complete(struct tws_request *req)
  321 {
  322     struct tws_softc *sc = req->sc;
  323 
  324     mtx_lock(&sc->q_lock);
  325     tws_q_remove_request(sc, req, TWS_BUSY_Q);
  326     mtx_unlock(&sc->q_lock);
  327 
  328     callout_stop(&req->timeout);
  329     tws_unmap_request(req->sc, req);
  330 
  331     req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
  332     mtx_lock(&sc->sim_lock);
  333     xpt_done(req->ccb_ptr);
  334     mtx_unlock(&sc->sim_lock);
  335 
  336     mtx_lock(&sc->q_lock);
  337     tws_q_insert_tail(sc, req, TWS_FREE_Q);
  338     mtx_unlock(&sc->q_lock);
  339 }
  340 
  341 void
  342 tws_getset_param_complete(struct tws_request *req)
  343 {
  344     struct tws_softc *sc = req->sc;
  345 
  346     TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id);
  347 
  348     callout_stop(&req->timeout);
  349     tws_unmap_request(sc, req);
  350 
  351     free(req->data, M_TWS);
  352 
  353     req->state = TWS_REQ_STATE_FREE;
  354 }
  355 
  356 void
  357 tws_aen_complete(struct tws_request *req)
  358 {
  359     struct tws_softc *sc = req->sc;
  360     struct tws_command_header *sense;
  361     struct tws_event_packet event;
  362     u_int16_t aen_code=0;
  363 
  364     TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id);
  365 
  366     callout_stop(&req->timeout);
  367     tws_unmap_request(sc, req);
  368 
  369     sense = (struct tws_command_header *)req->data;
  370 
  371     TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0], 
  372                                    sense->sense_data[2]);
  373     TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id, 
  374                                    sense->status_block.res__severity);
  375     TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum, 
  376                                    sense->status_block.error);
  377     TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header, 
  378                                    sense->header_desc.size_sense);
  379 
  380     aen_code = sense->status_block.error;
  381 
  382     switch ( aen_code ) {
  383         case TWS_AEN_SYNC_TIME_WITH_HOST :
  384             tws_aen_synctime_with_host(sc);
  385             break;
  386         case TWS_AEN_QUEUE_EMPTY :
  387             break;
  388         default :
  389             bzero(&event, sizeof(struct tws_event_packet));
  390             event.sequence_id = sc->seq_id;
  391             event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME;
  392             event.aen_code = sense->status_block.error;
  393             event.severity = sense->status_block.res__severity & 0x7;
  394             event.event_src = TWS_SRC_CTRL_EVENT;
  395             strcpy(event.severity_str, tws_sev_str[event.severity]);
  396             event.retrieved = TWS_AEN_NOT_RETRIEVED;
  397 
  398             bcopy(sense->err_specific_desc, event.parameter_data, 
  399                                     TWS_ERROR_SPECIFIC_DESC_LEN);
  400             event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0';
  401             event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1;
  402 
  403             if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) {
  404                 event.parameter_len += ((u_int8_t)strlen(event.parameter_data +
  405                                                 event.parameter_len) + 1);
  406             }
  407 
  408             device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n",
  409                 event.severity_str,
  410                 event.event_src,
  411                 event.aen_code,
  412                 event.parameter_data + 
  413                      (strlen(event.parameter_data) + 1), 
  414                 event.parameter_data);
  415 
  416             mtx_lock(&sc->gen_lock);
  417             tws_circular_aenq_insert(sc, &sc->aen_q, &event);
  418             sc->seq_id++;
  419             mtx_unlock(&sc->gen_lock);
  420             break;
  421     }
  422     
  423     free(req->data, M_TWS);
  424 
  425     req->state = TWS_REQ_STATE_FREE;
  426 
  427     if ( aen_code != TWS_AEN_QUEUE_EMPTY ) {
  428         /* timeout(tws_fetch_aen, sc, 1);*/
  429         sc->stats.num_aens++;
  430         tws_fetch_aen((void *)sc);
  431     } 
  432 }
  433 
  434 void
  435 tws_cmd_complete(struct tws_request *req)
  436 {
  437     struct tws_softc *sc = req->sc;
  438 
  439     callout_stop(&req->timeout);
  440     tws_unmap_request(sc, req);
  441 }
  442                                    
  443 static void
  444 tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
  445 {
  446     struct tws_command_header *hdr;
  447     struct tws_sense *sen;
  448     struct tws_request *req;
  449     u_int16_t req_id;
  450     u_int32_t reg, status;
  451 
  452     if ( !mfa ) {
  453         TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
  454         return;
  455     } else {
  456         /* lookup the sense */
  457         sen = tws_find_sense_from_mfa(sc, mfa);
  458         if ( sen == NULL ) {
  459             TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
  460             return;
  461         }
  462         hdr = sen->hdr;
  463         TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
  464         req_id = hdr->header_desc.request_id;
  465         req = &sc->reqs[req_id];
  466         TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
  467         if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS )
  468             TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
  469     }
  470 
  471     switch (req->type) {
  472         case TWS_REQ_TYPE_PASSTHRU :
  473             tws_passthru_err_complete(req, hdr);
  474             break;
  475         case TWS_REQ_TYPE_GETSET_PARAM :
  476             tws_getset_param_complete(req);
  477             break;
  478         case TWS_REQ_TYPE_SCSI_IO :
  479             tws_scsi_err_complete(req, hdr);
  480             break;
  481             
  482     }
  483 
  484     mtx_lock(&sc->io_lock);
  485     hdr->header_desc.size_header = 128;
  486     reg = (u_int32_t)( mfa>>32);
  487     tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
  488     reg = (u_int32_t)(mfa);
  489     tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);
  490 
  491     status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
  492     if ( status & TWS_BIT13 ) {
  493         device_printf(sc->tws_dev,  "OBFL Overrun\n");
  494         sc->obfl_q_overrun = true;
  495     }
  496     mtx_unlock(&sc->io_lock);
  497 }
  498 
  499 static void
  500 tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
  501 { 
  502     u_int8_t *sense_data;
  503     struct tws_softc *sc = req->sc;
  504     union ccb *ccb = req->ccb_ptr;
  505 
  506     TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error, 
  507                                  req->cmd_pkt->cmd.pkt_a.status);
  508     if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
  509          hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {
  510         if ( ccb->ccb_h.target_lun ) {
  511             TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
  512             ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
  513         } else {
  514             TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
  515             ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
  516         }
  517 
  518     } else {
  519         TWS_TRACE_DEBUG(sc, "scsi status  error",0,0);
  520         ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
  521         if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) && 
  522               (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
  523             ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
  524             TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
  525         }
  526     }
  527 
  528     /* if there were no error simply mark complete error */ 
  529     if (ccb->ccb_h.status == 0)
  530         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
  531 
  532     sense_data = (u_int8_t *)&ccb->csio.sense_data;
  533     if (sense_data) {
  534         memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
  535         ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
  536         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
  537     }
  538     ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;
  539 
  540     ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
  541     mtx_lock(&sc->sim_lock);
  542     xpt_done(ccb);
  543     mtx_unlock(&sc->sim_lock);
  544 
  545     callout_stop(&req->timeout);
  546     tws_unmap_request(req->sc, req);
  547     mtx_lock(&sc->q_lock);
  548     tws_q_remove_request(sc, req, TWS_BUSY_Q);
  549     tws_q_insert_tail(sc, req, TWS_FREE_Q);
  550     mtx_unlock(&sc->q_lock);
  551 }
  552 
  553 static void
  554 tws_passthru_err_complete(struct tws_request *req, 
  555                                           struct tws_command_header *hdr)
  556 { 
  557     TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id);
  558     req->error_code = hdr->status_block.error;
  559     memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header));
  560     tws_passthru_complete(req);
  561 }
  562 
  563 static void
  564 tws_drain_busy_queue(struct tws_softc *sc)
  565 {
  566     struct tws_request *req;
  567     union ccb          *ccb;
  568     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
  569 
  570     mtx_lock(&sc->q_lock);
  571     req = tws_q_remove_tail(sc, TWS_BUSY_Q);
  572     mtx_unlock(&sc->q_lock);
  573     while ( req ) {
  574         TWS_TRACE_DEBUG(sc, "moved to TWS_COMPLETE_Q", 0, req->request_id);
  575         callout_stop(&req->timeout);
  576 
  577         req->error_code = TWS_REQ_RET_RESET;
  578         ccb = (union ccb *)(req->ccb_ptr);
  579 
  580         ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
  581         ccb->ccb_h.status |=  CAM_REQUEUE_REQ;
  582         ccb->ccb_h.status |=  CAM_SCSI_BUS_RESET;
  583 
  584         tws_unmap_request(req->sc, req);
  585 
  586         mtx_lock(&sc->sim_lock);
  587         xpt_done(req->ccb_ptr);
  588         mtx_unlock(&sc->sim_lock);
  589 
  590         mtx_lock(&sc->q_lock);
  591         tws_q_insert_tail(sc, req, TWS_FREE_Q);
  592         req = tws_q_remove_tail(sc, TWS_BUSY_Q);
  593         mtx_unlock(&sc->q_lock);
  594     } 
  595 }
  596 
  597 static void
  598 tws_drain_reserved_reqs(struct tws_softc *sc)
  599 {
  600     struct tws_request *r;
  601 
  602     r = &sc->reqs[TWS_REQ_TYPE_AEN_FETCH];
  603     if ( r->state != TWS_REQ_STATE_FREE ) {
  604         TWS_TRACE_DEBUG(sc, "reset aen req", 0, 0);
  605         callout_stop(&r->timeout);
  606         tws_unmap_request(sc, r);
  607         free(r->data, M_TWS);
  608         r->state = TWS_REQ_STATE_FREE;
  609         r->error_code = TWS_REQ_RET_RESET;
  610     } 
  611 
  612     r = &sc->reqs[TWS_REQ_TYPE_PASSTHRU];
  613     if ( r->state == TWS_REQ_STATE_BUSY ) {
  614         TWS_TRACE_DEBUG(sc, "reset passthru req", 0, 0);
  615         r->error_code = TWS_REQ_RET_RESET;
  616     } 
  617 
  618     r = &sc->reqs[TWS_REQ_TYPE_GETSET_PARAM];
  619     if ( r->state != TWS_REQ_STATE_FREE ) {
  620         TWS_TRACE_DEBUG(sc, "reset setparam req", 0, 0);
  621         callout_stop(&r->timeout);
  622         tws_unmap_request(sc, r);
  623         free(r->data, M_TWS);
  624         r->state = TWS_REQ_STATE_FREE;
  625         r->error_code = TWS_REQ_RET_RESET;
  626     } 
  627 }
  628 
  629 static void
  630 tws_drain_response_queue(struct tws_softc *sc)
  631 {
  632     u_int16_t req_id;
  633     u_int64_t mfa;
  634     while ( tws_get_response(sc, &req_id, &mfa) );
  635 }
  636 
  637 static int32_t
  638 tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
  639 {
  640     struct tws_command_packet *cmd_pkt;
  641     struct tws_request *req;
  642     struct ccb_hdr *ccb_h = &(ccb->ccb_h);
  643     struct ccb_scsiio *csio = &(ccb->csio);
  644     int error;
  645     u_int16_t lun;
  646 
  647     mtx_assert(&sc->sim_lock, MA_OWNED);
  648     if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
  649         TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
  650         ccb_h->status |= CAM_TID_INVALID;
  651         xpt_done(ccb);
  652         return(0);
  653     }
  654     if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
  655         TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
  656         ccb_h->status |= CAM_LUN_INVALID;
  657         xpt_done(ccb);
  658         return(0);
  659     }
  660 
  661     if(ccb_h->flags & CAM_CDB_PHYS) {
  662         TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
  663         ccb_h->status = CAM_REQ_INVALID;
  664         xpt_done(ccb);
  665         return(0);
  666     }
  667 
  668     /*
  669      * We are going to work on this request.  Mark it as enqueued (though
  670      * we don't actually queue it...)
  671      */
  672     ccb_h->status |= CAM_SIM_QUEUED;
  673 
  674     req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO);
  675     if ( !req ) {
  676         TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
  677         ccb_h->status |= CAM_REQUEUE_REQ;
  678         xpt_done(ccb);
  679         return(0);
  680     }
  681 
  682     if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
  683         if(ccb_h->flags & CAM_DIR_IN)
  684             req->flags |= TWS_DIR_IN;
  685         if(ccb_h->flags & CAM_DIR_OUT)
  686             req->flags |= TWS_DIR_OUT;
  687     } else {
  688         req->flags = TWS_DIR_NONE; /* no data */
  689     }
  690 
  691     req->type = TWS_REQ_TYPE_SCSI_IO;
  692     req->cb = tws_scsi_complete;
  693 
  694     cmd_pkt = req->cmd_pkt;
  695     /* cmd_pkt->hdr.header_desc.size_header = 128; */
  696     cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
  697     cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
  698     cmd_pkt->cmd.pkt_a.status = 0;
  699     cmd_pkt->cmd.pkt_a.sgl_offset = 16;
  700 
  701     /* lower nibble */
  702     lun = ccb_h->target_lun & 0XF;
  703     lun = lun << 12;
  704     cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
  705     /* upper nibble */
  706     lun = ccb_h->target_lun & 0XF0;
  707     lun = lun << 8;
  708     cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;
  709 
  710 #ifdef TWS_DEBUG 
  711     if ( csio->cdb_len > 16 ) 
  712          TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
  713 #endif
  714 
  715     if(ccb_h->flags & CAM_CDB_POINTER)
  716         bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
  717     else
  718         bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
  719 
  720     req->data = ccb;
  721     req->flags |= TWS_DATA_CCB;
  722     /* save ccb ptr */
  723     req->ccb_ptr = ccb;
  724     /* 
  725      * tws_map_load_data_callback will fill in the SGL,
  726      * and submit the I/O.
  727      */
  728     sc->stats.scsi_ios++;
  729     callout_reset_sbt(&req->timeout, SBT_1MS * ccb->ccb_h.timeout, 0,
  730       tws_timeout, req, 0);
  731     error = tws_map_request(sc, req);
  732     return(error);
  733 }
  734 
  735 int
  736 tws_send_scsi_cmd(struct tws_softc *sc, int cmd)
  737 {
  738     struct tws_request *req;
  739     struct tws_command_packet *cmd_pkt;
  740     int error;
  741 
  742     TWS_TRACE_DEBUG(sc, "entry",sc, cmd);
  743     req = tws_get_request(sc, TWS_REQ_TYPE_AEN_FETCH);
  744 
  745     if ( req == NULL )
  746         return(ENOMEM);
  747 
  748     req->cb = tws_aen_complete;
  749 
  750     cmd_pkt = req->cmd_pkt;
  751     cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
  752     cmd_pkt->cmd.pkt_a.status = 0;
  753     cmd_pkt->cmd.pkt_a.unit = 0;
  754     cmd_pkt->cmd.pkt_a.sgl_offset = 16;
  755     cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id;
  756 
  757     cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd;
  758     cmd_pkt->cmd.pkt_a.cdb[4] = 128;
  759 
  760     req->length = TWS_SECTOR_SIZE;
  761     req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
  762     if ( req->data == NULL )
  763         return(ENOMEM);
  764     bzero(req->data, TWS_SECTOR_SIZE);
  765     req->flags = TWS_DIR_IN;
  766 
  767     callout_reset(&req->timeout, (TWS_IO_TIMEOUT * hz), tws_timeout, req);
  768     error = tws_map_request(sc, req);
  769     return(error);
  770 
  771 }
  772 
  773 int
  774 tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
  775               u_int32_t param_size, void *data)
  776 {
  777     struct tws_request *req;
  778     struct tws_command_packet *cmd_pkt;
  779     union tws_command_giga *cmd;
  780     struct tws_getset_param *param;
  781     int error;
  782 
  783     req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
  784     if ( req == NULL ) {
  785         TWS_TRACE_DEBUG(sc, "null req", 0, 0);
  786         return(ENOMEM);
  787     }
  788 
  789     req->length = TWS_SECTOR_SIZE;
  790     req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
  791     if ( req->data == NULL )
  792         return(ENOMEM);
  793     bzero(req->data, TWS_SECTOR_SIZE);
  794     param = (struct tws_getset_param *)req->data;
  795 
  796     req->cb = tws_getset_param_complete;
  797     req->flags = TWS_DIR_OUT;
  798     cmd_pkt = req->cmd_pkt;
  799 
  800     cmd = &cmd_pkt->cmd.pkt_g;
  801     cmd->param.sgl_off__opcode =
  802             BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM);
  803     cmd->param.request_id = (u_int8_t)req->request_id;
  804     cmd->param.host_id__unit = 0;
  805     cmd->param.param_count = 1;
  806     cmd->param.size = 2; /* map routine will add sgls */
  807 
  808     /* Specify which parameter we want to set. */
  809     param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
  810     param->parameter_id = (u_int8_t)(param_id);
  811     param->parameter_size_bytes = (u_int16_t)param_size;
  812     memcpy(param->data, data, param_size);
  813 
  814     callout_reset(&req->timeout, (TWS_IOCTL_TIMEOUT * hz), tws_timeout, req);
  815     error = tws_map_request(sc, req);
  816     return(error);
  817 
  818 }
  819 
  820 int
  821 tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
  822               u_int32_t param_size, void *data)
  823 {
  824     struct tws_request *req;
  825     struct tws_command_packet *cmd_pkt;
  826     union tws_command_giga *cmd;
  827     struct tws_getset_param *param;
  828     u_int16_t reqid;
  829     u_int64_t mfa;
  830     int error = SUCCESS;
  831 
  832     req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
  833     if ( req == NULL ) {
  834         TWS_TRACE_DEBUG(sc, "null req", 0, 0);
  835         return(FAILURE);
  836     }
  837 
  838     req->length = TWS_SECTOR_SIZE;
  839     req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
  840     if ( req->data == NULL )
  841         return(FAILURE);
  842     bzero(req->data, TWS_SECTOR_SIZE);
  843     param = (struct tws_getset_param *)req->data;
  844 
  845     req->cb = NULL;
  846     req->flags = TWS_DIR_IN;
  847     cmd_pkt = req->cmd_pkt;
  848 
  849     cmd = &cmd_pkt->cmd.pkt_g;
  850     cmd->param.sgl_off__opcode =
  851             BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM);
  852     cmd->param.request_id = (u_int8_t)req->request_id;
  853     cmd->param.host_id__unit = 0;
  854     cmd->param.param_count = 1;
  855     cmd->param.size = 2; /* map routine will add sgls */
  856 
  857     /* Specify which parameter we want to set. */
  858     param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
  859     param->parameter_id = (u_int8_t)(param_id);
  860     param->parameter_size_bytes = (u_int16_t)param_size;
  861    
  862     error = tws_map_request(sc, req);
  863     if (!error) {
  864         reqid = tws_poll4_response(sc, &mfa);
  865         tws_unmap_request(sc, req);
  866 
  867         if ( reqid == TWS_REQ_TYPE_GETSET_PARAM ) {
  868             memcpy(data, param->data, param_size);
  869         } else {
  870             error = FAILURE;
  871         }
  872     }
  873   
  874     free(req->data, M_TWS);
  875     req->state = TWS_REQ_STATE_FREE;
  876     return(error);
  877 
  878 }
  879 
  880 void 
  881 tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
  882 {
  883     if (req->data != NULL) {
  884         if ( req->flags & TWS_DIR_IN )
  885             bus_dmamap_sync(sc->data_tag, req->dma_map, 
  886                                             BUS_DMASYNC_POSTREAD);
  887         if ( req->flags & TWS_DIR_OUT )
  888             bus_dmamap_sync(sc->data_tag, req->dma_map, 
  889                                             BUS_DMASYNC_POSTWRITE);
  890         mtx_lock(&sc->io_lock);
  891         bus_dmamap_unload(sc->data_tag, req->dma_map);
  892         mtx_unlock(&sc->io_lock);
  893     }
  894 }
  895 
  896 int32_t
  897 tws_map_request(struct tws_softc *sc, struct tws_request *req)
  898 {
  899     int32_t error = 0;
  900 
  901     /* If the command involves data, map that too. */       
  902     if (req->data != NULL) {
  903         int my_flags = ((req->type == TWS_REQ_TYPE_SCSI_IO) ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
  904 
  905         /*
  906          * Map the data buffer into bus space and build the SG list.
  907          */
  908         mtx_lock(&sc->io_lock);
  909         if (req->flags & TWS_DATA_CCB)
  910                 error = bus_dmamap_load_ccb(sc->data_tag, req->dma_map,
  911                                             req->data,
  912                                             tws_dmamap_data_load_cbfn, req,
  913                                             my_flags);
  914         else
  915                 error = bus_dmamap_load(sc->data_tag, req->dma_map,
  916                                         req->data, req->length,
  917                                         tws_dmamap_data_load_cbfn, req,
  918                                         my_flags);
  919         mtx_unlock(&sc->io_lock);
  920 
  921         if (error == EINPROGRESS) {
  922             TWS_TRACE(sc, "in progress", 0, error);
  923             tws_freeze_simq(sc, req);
  924             error = 0;  // EINPROGRESS is not a fatal error.
  925         } 
  926     } else { /* no data involved */
  927         error = tws_submit_command(sc, req);
  928     }
  929     return(error);
  930 }
  931 
  932 static void
  933 tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs, 
  934                             int nseg, int error)
  935 {
  936     struct tws_request *req = (struct tws_request *)arg;
  937     struct tws_softc *sc = req->sc;
  938     u_int16_t sgls = nseg;
  939     void *sgl_ptr;
  940     struct tws_cmd_generic *gcmd;
  941 
  942     if ( error ) {
  943         TWS_TRACE(sc, "SOMETHING BAD HAPPENED! error = %d\n", error, 0);
  944     }
  945 
  946     if ( error == EFBIG ) {
  947         TWS_TRACE(sc, "not enough data segs", 0, nseg);
  948         req->error_code = error;
  949         req->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
  950         return;
  951     }
  952 
  953     if ( req->flags & TWS_DIR_IN )
  954         bus_dmamap_sync(req->sc->data_tag, req->dma_map, 
  955                                             BUS_DMASYNC_PREREAD);
  956     if ( req->flags & TWS_DIR_OUT )
  957         bus_dmamap_sync(req->sc->data_tag, req->dma_map, 
  958                                         BUS_DMASYNC_PREWRITE);
  959     if ( segs ) {
  960         if ( (req->type == TWS_REQ_TYPE_PASSTHRU && 
  961              GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) != 
  962                             TWS_FW_CMD_EXECUTE_SCSI) ||
  963               req->type == TWS_REQ_TYPE_GETSET_PARAM) {
  964             gcmd = &req->cmd_pkt->cmd.pkt_g.generic;
  965             sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size;
  966             gcmd->size += sgls * 
  967                           ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 : 2 );
  968             tws_fill_sg_list(req->sc, (void *)segs, sgl_ptr, sgls);
  969 
  970         } else {
  971             tws_fill_sg_list(req->sc, (void *)segs, 
  972                       (void *)&(req->cmd_pkt->cmd.pkt_a.sg_list), sgls);
  973             req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ;
  974         }
  975     }
  976 
  977     req->error_code = tws_submit_command(req->sc, req);
  978 
  979 }
  980 
  981 static void
  982 tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest, 
  983                           u_int16_t num_sgl_entries)
  984 {
  985     int i;
  986 
  987     if ( sc->is64bit ) {
  988         struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src;
  989 
  990         if ( !tws_use_32bit_sgls ) { 
  991             struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest;
  992             if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS )
  993                 TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0);
  994             for (i = 0; i < num_sgl_entries; i++) {
  995                 sgl_d[i].address = sgl_s->address;
  996                 sgl_d[i].length = sgl_s->length;
  997                 sgl_d[i].flag = 0;
  998                 sgl_d[i].reserved = 0;
  999                 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) + 
 1000                                                sizeof(bus_dma_segment_t));
 1001             }
 1002         } else {
 1003             struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
 1004             if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
 1005                 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
 1006             for (i = 0; i < num_sgl_entries; i++) {
 1007                 sgl_d[i].address = sgl_s->address;
 1008                 sgl_d[i].length = sgl_s->length;
 1009                 sgl_d[i].flag = 0;
 1010                 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) + 
 1011                                                sizeof(bus_dma_segment_t));
 1012             }
 1013         }
 1014     } else {
 1015         struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src;
 1016         struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
 1017 
 1018         if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
 1019             TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
 1020 
 1021         for (i = 0; i < num_sgl_entries; i++) {
 1022             sgl_d[i].address = sgl_s[i].address;
 1023             sgl_d[i].length = sgl_s[i].length;
 1024             sgl_d[i].flag = 0;
 1025         }
 1026     }
 1027 }
 1028 
 1029 void
 1030 tws_intr(void *arg)
 1031 {
 1032     struct tws_softc *sc = (struct tws_softc *)arg;
 1033     u_int32_t histat=0, db=0;
 1034 
 1035     if (!(sc)) {
 1036         device_printf(sc->tws_dev, "null softc!!!\n");
 1037         return;
 1038     }
 1039 
 1040     if ( tws_get_state(sc) == TWS_RESET ) {
 1041         return;
 1042     }
 1043 
 1044     if ( tws_get_state(sc) != TWS_ONLINE ) {
 1045         return;
 1046     }
 1047 
 1048     sc->stats.num_intrs++;
 1049     histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4);
 1050     if ( histat & TWS_BIT2 ) {
 1051         TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT);
 1052         db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
 1053         if ( db & TWS_BIT21 ) {
 1054             tws_intr_attn_error(sc);
 1055             return;
 1056         }
 1057         if ( db & TWS_BIT18 ) {
 1058             tws_intr_attn_aen(sc);
 1059         }
 1060     }
 1061 
 1062     if ( histat & TWS_BIT3 ) {
 1063         tws_intr_resp(sc);
 1064     }
 1065 }
 1066 
 1067 static void
 1068 tws_intr_attn_aen(struct tws_softc *sc)
 1069 {
 1070 
 1071     /* maskoff db intrs until all the aens are fetched */
 1072     /* tws_disable_db_intr(sc); */
 1073     tws_fetch_aen((void *)sc);
 1074     tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
 1075     (void)tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
 1076 
 1077 }
 1078 
 1079 static void
 1080 tws_intr_attn_error(struct tws_softc *sc)
 1081 {
 1082 
 1083     TWS_TRACE(sc, "attn error", 0, 0);
 1084     tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
 1085     (void)tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
 1086     device_printf(sc->tws_dev, "Micro controller error.\n");
 1087     tws_reset(sc);
 1088 }
 1089 
 1090 static void
 1091 tws_intr_resp(struct tws_softc *sc)
 1092 {
 1093     u_int16_t req_id;
 1094     u_int64_t mfa;
 1095 
 1096     while ( tws_get_response(sc, &req_id, &mfa) ) {
 1097         sc->stats.reqs_out++;
 1098         if ( req_id == TWS_INVALID_REQID ) {
 1099             TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id);
 1100             sc->stats.reqs_errored++;
 1101             tws_err_complete(sc, mfa);
 1102             continue;
 1103         }
 1104         sc->reqs[req_id].cb(&sc->reqs[req_id]);
 1105     }
 1106 
 1107 }
 1108 
 1109 static void
 1110 tws_poll(struct cam_sim *sim)
 1111 {
 1112     struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
 1113     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
 1114     tws_intr((void *) sc);
 1115 }
 1116 
 1117 void
 1118 tws_timeout(void *arg)
 1119 {
 1120     struct tws_request *req = (struct tws_request *)arg;
 1121     struct tws_softc *sc = req->sc;
 1122 
 1123     if ( req->error_code == TWS_REQ_RET_RESET ) {
 1124         return;
 1125     }
 1126 
 1127     mtx_lock(&sc->gen_lock);
 1128     if ( req->error_code == TWS_REQ_RET_RESET ) {
 1129         mtx_unlock(&sc->gen_lock);
 1130         return;
 1131     }
 1132 
 1133     if ( tws_get_state(sc) == TWS_RESET ) {
 1134         mtx_unlock(&sc->gen_lock);
 1135         return;
 1136     }
 1137 
 1138     xpt_freeze_simq(sc->sim, 1);
 1139 
 1140     tws_send_event(sc, TWS_RESET_START);
 1141 
 1142     if (req->type == TWS_REQ_TYPE_SCSI_IO) {
 1143         device_printf(sc->tws_dev, "I/O Request timed out... Resetting controller\n");
 1144     } else if (req->type == TWS_REQ_TYPE_PASSTHRU) {
 1145         device_printf(sc->tws_dev, "IOCTL Request timed out... Resetting controller\n");
 1146     } else {
 1147         device_printf(sc->tws_dev, "Internal Request timed out... Resetting controller\n");
 1148     }
 1149 
 1150     tws_assert_soft_reset(sc);
 1151     tws_turn_off_interrupts(sc);
 1152     tws_reset_cb( (void*) sc );
 1153     tws_reinit( (void*) sc );
 1154 
 1155 //  device_printf(sc->tws_dev,  "Controller Reset complete!\n");
 1156     tws_send_event(sc, TWS_RESET_COMPLETE);
 1157     mtx_unlock(&sc->gen_lock);
 1158 
 1159     xpt_release_simq(sc->sim, 1);
 1160 }
 1161 
 1162 void
 1163 tws_reset(void *arg)
 1164 {
 1165     struct tws_softc *sc = (struct tws_softc *)arg;
 1166 
 1167     mtx_lock(&sc->gen_lock);
 1168     if ( tws_get_state(sc) == TWS_RESET ) {
 1169         mtx_unlock(&sc->gen_lock);
 1170         return;
 1171     }
 1172 
 1173     xpt_freeze_simq(sc->sim, 1);
 1174 
 1175     tws_send_event(sc, TWS_RESET_START);
 1176 
 1177     device_printf(sc->tws_dev,  "Resetting controller\n");
 1178 
 1179     tws_assert_soft_reset(sc);
 1180     tws_turn_off_interrupts(sc);
 1181     tws_reset_cb( (void*) sc );
 1182     tws_reinit( (void*) sc );
 1183 
 1184 //  device_printf(sc->tws_dev,  "Controller Reset complete!\n");
 1185     tws_send_event(sc, TWS_RESET_COMPLETE);
 1186     mtx_unlock(&sc->gen_lock);
 1187 
 1188     xpt_release_simq(sc->sim, 1);
 1189 }
 1190 
 1191 static void
 1192 tws_reset_cb(void *arg)
 1193 {
 1194     struct tws_softc *sc = (struct tws_softc *)arg;
 1195     time_t endt;
 1196     int found = 0;
 1197     u_int32_t reg;
 1198   
 1199     if ( tws_get_state(sc) != TWS_RESET ) {
 1200         return;
 1201     }
 1202 
 1203 //  device_printf(sc->tws_dev,  "Draining Busy Queue\n");
 1204     tws_drain_busy_queue(sc);
 1205 //  device_printf(sc->tws_dev,  "Draining Reserved Reqs\n");
 1206     tws_drain_reserved_reqs(sc);
 1207 //  device_printf(sc->tws_dev,  "Draining Response Queue\n");
 1208     tws_drain_response_queue(sc);
 1209 
 1210 //  device_printf(sc->tws_dev,  "Looking for controller ready flag...\n");
 1211     endt = TWS_LOCAL_TIME + TWS_POLL_TIMEOUT;
 1212     while ((TWS_LOCAL_TIME <= endt) && (!found)) {
 1213         reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4);
 1214         if ( reg & TWS_BIT13 ) {
 1215             found = 1;
 1216 //          device_printf(sc->tws_dev,  " ... Got it!\n");
 1217         }
 1218     }
 1219     if ( !found )
 1220             device_printf(sc->tws_dev,  " ... Controller ready flag NOT found!\n");
 1221 }
 1222 
 1223 static void
 1224 tws_reinit(void *arg)
 1225 {
 1226     struct tws_softc *sc = (struct tws_softc *)arg;
 1227     int timeout_val=0;
 1228     int try=2;
 1229     int done=0;
 1230 
 1231 //  device_printf(sc->tws_dev,  "Waiting for Controller Ready\n");
 1232     while ( !done && try ) {
 1233         if ( tws_ctlr_ready(sc) ) {
 1234             done = 1;
 1235             break;
 1236         } else {
 1237             timeout_val += 5;
 1238             if ( timeout_val >= TWS_RESET_TIMEOUT ) {
 1239                timeout_val = 0;
 1240                if ( try )
 1241                    tws_assert_soft_reset(sc);
 1242                try--;
 1243             }
 1244             mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz);
 1245         }
 1246     }
 1247 
 1248     if (!done) {
 1249         device_printf(sc->tws_dev,  "FAILED to get Controller Ready!\n");
 1250         return;
 1251     }
 1252 
 1253     sc->obfl_q_overrun = false;
 1254 //  device_printf(sc->tws_dev,  "Sending initConnect\n");
 1255     if ( tws_init_connect(sc, tws_queue_depth) ) {
 1256         TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
 1257     }
 1258     tws_init_obfl_q(sc);
 1259 
 1260     tws_turn_on_interrupts(sc);
 1261 
 1262     wakeup_one(sc);
 1263 }
 1264 
 1265 static void
 1266 tws_freeze_simq(struct tws_softc *sc, struct tws_request *req)
 1267 {
 1268     /* Only for IO commands */
 1269     if (req->type == TWS_REQ_TYPE_SCSI_IO) {
 1270         union ccb   *ccb = (union ccb *)(req->ccb_ptr);
 1271 
 1272         xpt_freeze_simq(sc->sim, 1);
 1273         ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
 1274         ccb->ccb_h.status |= CAM_REQUEUE_REQ;
 1275     }
 1276 }
 1277 
 1278 TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth);

Cache object: 810561ca32ef44f216323729aee714f3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.