The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/advansys/advlib.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Low level routines for the Advanced Systems Inc. SCSI controllers chips
    3  *
    4  * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions, and the following disclaimer,
   12  *    without modification, immediately at the beginning of the file.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  */
   31 /*
   32  * Ported from:
   33  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
   34  *     
   35  * Copyright (c) 1995-1996 Advanced System Products, Inc.
   36  * All Rights Reserved.
   37  *   
   38  * Redistribution and use in source and binary forms, with or without
   39  * modification, are permitted provided that redistributions of source
   40  * code retain the above copyright notice and this comment without
   41  * modification.
   42  */
   43 
   44 #include <sys/cdefs.h>
   45 __FBSDID("$FreeBSD: releng/5.3/sys/dev/advansys/advlib.c 119418 2003-08-24 17:55:58Z obrien $");
   46 
   47 #include <sys/param.h>
   48 #include <sys/kernel.h>
   49 #include <sys/systm.h>
   50 
   51 #include <machine/bus_pio.h>
   52 #include <machine/bus.h>
   53 #include <machine/resource.h>
   54 #include <sys/bus.h> 
   55 #include <sys/rman.h> 
   56 
   57 #include <cam/cam.h>
   58 #include <cam/cam_ccb.h>
   59 #include <cam/cam_sim.h>
   60 #include <cam/cam_xpt_sim.h>
   61 
   62 #include <cam/scsi/scsi_all.h>
   63 #include <cam/scsi/scsi_message.h>
   64 #include <cam/scsi/scsi_da.h>
   65 #include <cam/scsi/scsi_cd.h>
   66 
   67 #include <vm/vm.h>
   68 #include <vm/vm_param.h>
   69 #include <vm/pmap.h>
   70 
   71 #include <dev/advansys/advansys.h>
   72 #include <dev/advansys/advmcode.h>
   73 
   74 struct adv_quirk_entry {
   75         struct scsi_inquiry_pattern inq_pat;
   76         u_int8_t quirks;
   77 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS  0x01
   78 #define ADV_QUIRK_FIX_ASYN_XFER         0x02
   79 };
   80 
   81 static struct adv_quirk_entry adv_quirk_table[] =
   82 {
   83         {
   84                 { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
   85                 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
   86         },
   87         {
   88                 { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
   89                 0
   90         },
   91         {
   92                 {
   93                   T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
   94                   "TANDBERG", " TDC 36", "*"
   95                 },
   96                 0
   97         },
   98         {
   99                 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
  100                 0
  101         },
  102         {
  103                 {
  104                   T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
  105                   "*", "*", "*"
  106                 },
  107                 0
  108         },
  109         {
  110                 {
  111                   T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
  112                   "*", "*", "*"
  113                 },
  114                 0
  115         },
  116         {
  117                 /* Default quirk entry */
  118                 {
  119                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
  120                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
  121                 }, 
  122                 ADV_QUIRK_FIX_ASYN_XFER,
  123         }
  124 };
  125 
  126 /*
  127  * Allowable periods in ns
  128  */
  129 static u_int8_t adv_sdtr_period_tbl[] =
  130 {
  131         25,
  132         30,
  133         35,
  134         40,
  135         50,
  136         60,
  137         70,
  138         85
  139 };
  140 
  141 static u_int8_t adv_sdtr_period_tbl_ultra[] =
  142 {
  143         12,
  144         19,
  145         25,
  146         32,
  147         38,
  148         44,
  149         50,
  150         57,
  151         63,
  152         69,
  153         75,
  154         82,
  155         88, 
  156         94,
  157         100,
  158         107
  159 };
  160 
  161 struct ext_msg {
  162         u_int8_t msg_type;
  163         u_int8_t msg_len;
  164         u_int8_t msg_req;
  165         union {
  166                 struct {
  167                         u_int8_t sdtr_xfer_period;
  168                         u_int8_t sdtr_req_ack_offset;
  169                 } sdtr;
  170                 struct {
  171                         u_int8_t wdtr_width;
  172                 } wdtr;
  173                 struct {
  174                         u_int8_t mdp[4];
  175                 } mdp;
  176         } u_ext_msg;
  177         u_int8_t res;
  178 };
  179 
  180 #define xfer_period     u_ext_msg.sdtr.sdtr_xfer_period
  181 #define req_ack_offset  u_ext_msg.sdtr.sdtr_req_ack_offset
  182 #define wdtr_width      u_ext_msg.wdtr.wdtr_width
  183 #define mdp_b3          u_ext_msg.mdp_b3
  184 #define mdp_b2          u_ext_msg.mdp_b2
  185 #define mdp_b1          u_ext_msg.mdp_b1
  186 #define mdp_b0          u_ext_msg.mdp_b0
  187 
  188 /*
  189  * Some of the early PCI adapters have problems with
  190  * async transfers.  Instead use an offset of 1.
  191  */
  192 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
  193 
  194 /* LRAM routines */
  195 static void      adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
  196                                         u_int16_t *buffer, int count);
  197 static void      adv_write_lram_16_multi(struct adv_softc *adv,
  198                                          u_int16_t s_addr, u_int16_t *buffer,
  199                                          int count);
  200 static void      adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
  201                                   u_int16_t set_value, int count);
  202 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
  203                                   int count);
  204 
  205 static int       adv_write_and_verify_lram_16(struct adv_softc *adv,
  206                                               u_int16_t addr, u_int16_t value);
  207 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
  208 
  209 
  210 static void      adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
  211                                    u_int32_t value);
  212 static void      adv_write_lram_32_multi(struct adv_softc *adv,
  213                                          u_int16_t s_addr, u_int32_t *buffer,
  214                                          int count);
  215 
  216 /* EEPROM routines */
  217 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
  218 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
  219                                      u_int16_t value);
  220 static int       adv_write_eeprom_cmd_reg(struct adv_softc *adv,
  221                                           u_int8_t cmd_reg);
  222 static int       adv_set_eeprom_config_once(struct adv_softc *adv,
  223                                             struct adv_eeprom_config *eeconfig);
  224 
  225 /* Initialization */
  226 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
  227                                     u_int16_t *mcode_buf, u_int16_t mcode_size);
  228 
  229 static void      adv_reinit_lram(struct adv_softc *adv);
  230 static void      adv_init_lram(struct adv_softc *adv);
  231 static int       adv_init_microcode_var(struct adv_softc *adv);
  232 static void      adv_init_qlink_var(struct adv_softc *adv);
  233 
  234 /* Interrupts */
  235 static void      adv_disable_interrupt(struct adv_softc *adv);
  236 static void      adv_enable_interrupt(struct adv_softc *adv);
  237 static void      adv_toggle_irq_act(struct adv_softc *adv);
  238 
  239 /* Chip Control */
  240 static int       adv_host_req_chip_halt(struct adv_softc *adv);
  241 static void      adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
  242 #if UNUSED
  243 static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
  244 #endif
  245 
  246 /* Queue handling and execution */
  247 static __inline int
  248                  adv_sgcount_to_qcount(int sgcount);
  249 
  250 static __inline int
  251 adv_sgcount_to_qcount(int sgcount)
  252 {
  253         int     n_sg_list_qs;
  254 
  255         n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
  256         if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
  257                 n_sg_list_qs++;
  258         return (n_sg_list_qs + 1);
  259 }
  260 
  261 #if BYTE_ORDER == BIG_ENDIAN
  262 static void      adv_adj_endian_qdone_info(struct adv_q_done_info *);
  263 static void      adv_adj_scsiq_endian(struct adv_scsi_q *);
  264 #endif
  265 static void      adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
  266                                 u_int16_t *inbuf, int words);
  267 static u_int     adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
  268 static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
  269                                        u_int8_t free_q_head, u_int8_t n_free_q);
  270 static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
  271                                       u_int8_t free_q_head);
  272 static int       adv_send_scsi_queue(struct adv_softc *adv,
  273                                      struct adv_scsi_q *scsiq,
  274                                      u_int8_t n_q_required);
  275 static void      adv_put_ready_sg_list_queue(struct adv_softc *adv,
  276                                              struct adv_scsi_q *scsiq,
  277                                              u_int q_no);
  278 static void      adv_put_ready_queue(struct adv_softc *adv,
  279                                      struct adv_scsi_q *scsiq, u_int q_no);
  280 static void      adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
  281                                u_int16_t *buffer, int words);
  282 
  283 /* Messages */
  284 static void      adv_handle_extmsg_in(struct adv_softc *adv,
  285                                       u_int16_t halt_q_addr, u_int8_t q_cntl,
  286                                       target_bit_vector target_id,
  287                                       int tid);
  288 static void      adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
  289                                  u_int8_t sdtr_offset);
  290 static void      adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
  291                                         u_int8_t sdtr_data);
  292 
  293 
  294 /* Exported functions first */
  295 
  296 void
  297 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
  298 {
  299         struct adv_softc *adv;
  300 
  301         adv = (struct adv_softc *)callback_arg;
  302         switch (code) {
  303         case AC_FOUND_DEVICE:
  304         {
  305                 struct ccb_getdev *cgd;
  306                 target_bit_vector target_mask;
  307                 int num_entries;
  308                 caddr_t match;
  309                 struct adv_quirk_entry *entry;
  310                 struct adv_target_transinfo* tinfo;
  311  
  312                 cgd = (struct ccb_getdev *)arg;
  313 
  314                 target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
  315 
  316                 num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
  317                 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
  318                                        (caddr_t)adv_quirk_table,
  319                                        num_entries, sizeof(*adv_quirk_table),
  320                                        scsi_inquiry_match);
  321         
  322                 if (match == NULL)
  323                         panic("advasync: device didn't match wildcard entry!!");
  324 
  325                 entry = (struct adv_quirk_entry *)match;
  326 
  327                 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
  328                         if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
  329                                 adv->fix_asyn_xfer_always |= target_mask;
  330                         else
  331                                 adv->fix_asyn_xfer_always &= ~target_mask;
  332                         /*
  333                          * We start out life with all bits set and clear them
  334                          * after we've determined that the fix isn't necessary.
  335                          * It may well be that we've already cleared a target
  336                          * before the full inquiry session completes, so don't
  337                          * gratuitously set a target bit even if it has this
  338                          * quirk.  But, if the quirk exonerates a device, clear
  339                          * the bit now.
  340                          */
  341                         if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
  342                                 adv->fix_asyn_xfer &= ~target_mask;
  343                 }
  344                 /*
  345                  * Reset our sync settings now that we've determined
  346                  * what quirks are in effect for the device.
  347                  */
  348                 tinfo = &adv->tinfo[cgd->ccb_h.target_id];
  349                 adv_set_syncrate(adv, cgd->ccb_h.path,
  350                                  cgd->ccb_h.target_id,
  351                                  tinfo->current.period,
  352                                  tinfo->current.offset,
  353                                  ADV_TRANS_CUR);
  354                 break;
  355         }
  356         case AC_LOST_DEVICE:
  357         {
  358                 u_int target_mask;
  359 
  360                 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
  361                         target_mask = 0x01 << xpt_path_target_id(path);
  362                         adv->fix_asyn_xfer |= target_mask;
  363                 }
  364 
  365                 /*
  366                  * Revert to async transfers
  367                  * for the next device.
  368                  */
  369                 adv_set_syncrate(adv, /*path*/NULL,
  370                                  xpt_path_target_id(path),
  371                                  /*period*/0,
  372                                  /*offset*/0,
  373                                  ADV_TRANS_GOAL|ADV_TRANS_CUR);
  374         }
  375         default:
  376                 break;
  377         }
  378 }
  379 
  380 void
  381 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
  382 {
  383         u_int8_t control;
  384 
  385         /*
  386          * Start out with the bank reset to 0
  387          */
  388         control = ADV_INB(adv, ADV_CHIP_CTRL)
  389                   &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
  390                         | ADV_CC_DIAG | ADV_CC_SCSI_RESET
  391                         | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
  392         if (bank == 1) {
  393                 control |= ADV_CC_BANK_ONE;
  394         } else if (bank == 2) {
  395                 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
  396         }
  397         ADV_OUTB(adv, ADV_CHIP_CTRL, control);
  398 }
  399 
  400 u_int8_t
  401 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
  402 {
  403         u_int8_t   byte_data;
  404         u_int16_t  word_data;
  405 
  406         /*
  407          * LRAM is accessed on 16bit boundaries.
  408          */
  409         ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
  410         word_data = ADV_INW(adv, ADV_LRAM_DATA);
  411         if (addr & 1) {
  412 #if BYTE_ORDER == BIG_ENDIAN
  413                 byte_data = (u_int8_t)(word_data & 0xFF);
  414 #else
  415                 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
  416 #endif
  417         } else {
  418 #if BYTE_ORDER == BIG_ENDIAN
  419                 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
  420 #else           
  421                 byte_data = (u_int8_t)(word_data & 0xFF);
  422 #endif
  423         }
  424         return (byte_data);
  425 }
  426 
  427 void
  428 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
  429 {
  430         u_int16_t word_data;
  431 
  432         word_data = adv_read_lram_16(adv, addr & 0xFFFE);
  433         if (addr & 1) {
  434                 word_data &= 0x00FF;
  435                 word_data |= (((u_int8_t)value << 8) & 0xFF00);
  436         } else {
  437                 word_data &= 0xFF00;
  438                 word_data |= ((u_int8_t)value & 0x00FF);
  439         }
  440         adv_write_lram_16(adv, addr & 0xFFFE, word_data);
  441 }
  442 
  443 
  444 u_int16_t
  445 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
  446 {
  447         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
  448         return (ADV_INW(adv, ADV_LRAM_DATA));
  449 }
  450 
  451 void
  452 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
  453 {
  454         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
  455         ADV_OUTW(adv, ADV_LRAM_DATA, value);
  456 }
  457 
  458 /*
  459  * Determine if there is a board at "iobase" by looking
  460  * for the AdvanSys signatures.  Return 1 if a board is
  461  * found, 0 otherwise.
  462  */
  463 int                         
  464 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
  465 {                            
  466         u_int16_t signature;
  467 
  468         if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
  469                 signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
  470                 if ((signature == ADV_1000_ID0W)
  471                  || (signature == ADV_1000_ID0W_FIX))
  472                         return (1);
  473         }
  474         return (0);
  475 }
  476 
  477 void
  478 adv_lib_init(struct adv_softc *adv)
  479 {
  480         if ((adv->type & ADV_ULTRA) != 0) {
  481                 adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
  482                 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
  483         } else {
  484                 adv->sdtr_period_tbl = adv_sdtr_period_tbl;
  485                 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);                
  486         }
  487 }
  488 
  489 u_int16_t
  490 adv_get_eeprom_config(struct adv_softc *adv, struct
  491                       adv_eeprom_config  *eeprom_config)
  492 {
  493         u_int16_t       sum;
  494         u_int16_t       *wbuf;
  495         u_int8_t        cfg_beg;
  496         u_int8_t        cfg_end;
  497         u_int8_t        s_addr;
  498 
  499         wbuf = (u_int16_t *)eeprom_config;
  500         sum = 0;
  501 
  502         for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
  503                 *wbuf = adv_read_eeprom_16(adv, s_addr);
  504                 sum += *wbuf;
  505         }
  506 
  507         if (adv->type & ADV_VL) {
  508                 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
  509                 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
  510         } else {
  511                 cfg_beg = ADV_EEPROM_CFG_BEG;
  512                 cfg_end = ADV_EEPROM_MAX_ADDR;
  513         }
  514 
  515         for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
  516                 *wbuf = adv_read_eeprom_16(adv, s_addr);
  517                 sum += *wbuf;
  518 #if ADV_DEBUG_EEPROM
  519                 printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
  520 #endif
  521         }
  522         *wbuf = adv_read_eeprom_16(adv, s_addr);
  523         return (sum);
  524 }
  525 
  526 int
  527 adv_set_eeprom_config(struct adv_softc *adv,
  528                       struct adv_eeprom_config *eeprom_config)
  529 {
  530         int     retry;
  531 
  532         retry = 0;
  533         while (1) {
  534                 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
  535                         break;
  536                 }
  537                 if (++retry > ADV_EEPROM_MAX_RETRY) {
  538                         break;
  539                 }
  540         }
  541         return (retry > ADV_EEPROM_MAX_RETRY);
  542 }
  543 
  544 int
  545 adv_reset_chip(struct adv_softc *adv, int reset_bus)
  546 {
  547         adv_stop_chip(adv);
  548         ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
  549                                      | (reset_bus ? ADV_CC_SCSI_RESET : 0));
  550         DELAY(60);
  551 
  552         adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
  553         adv_set_chip_ih(adv, ADV_INS_HALT);
  554 
  555         if (reset_bus)
  556                 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
  557 
  558         ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
  559         if (reset_bus)
  560                 DELAY(200 * 1000);
  561 
  562         ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
  563         ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
  564         return (adv_is_chip_halted(adv));
  565 }
  566 
  567 int
  568 adv_test_external_lram(struct adv_softc* adv)
  569 {
  570         u_int16_t       q_addr;
  571         u_int16_t       saved_value;
  572         int             success;
  573 
  574         success = 0;
  575 
  576         q_addr = ADV_QNO_TO_QADDR(241);
  577         saved_value = adv_read_lram_16(adv, q_addr);
  578         if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
  579                 success = 1;
  580                 adv_write_lram_16(adv, q_addr, saved_value);
  581         }
  582         return (success);
  583 }
  584 
  585 
  586 int
  587 adv_init_lram_and_mcode(struct adv_softc *adv)
  588 {
  589         u_int32_t       retval;
  590 
  591         adv_disable_interrupt(adv);
  592 
  593         adv_init_lram(adv);
  594 
  595         retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
  596                                     adv_mcode_size);
  597         if (retval != adv_mcode_chksum) {
  598                 printf("adv%d: Microcode download failed checksum!\n",
  599                        adv->unit);
  600                 return (1);
  601         }
  602         
  603         if (adv_init_microcode_var(adv) != 0)
  604                 return (1);
  605 
  606         adv_enable_interrupt(adv);
  607         return (0);
  608 }
  609 
  610 u_int8_t
  611 adv_get_chip_irq(struct adv_softc *adv)
  612 {
  613         u_int16_t       cfg_lsw;
  614         u_int8_t        chip_irq;
  615 
  616         cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
  617 
  618         if ((adv->type & ADV_VL) != 0) {
  619                 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
  620                 if ((chip_irq == 0) ||
  621                     (chip_irq == 4) ||
  622                     (chip_irq == 7)) {
  623                         return (0);
  624                 }
  625                 return (chip_irq + (ADV_MIN_IRQ_NO - 1));
  626         }
  627         chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
  628         if (chip_irq == 3)
  629                 chip_irq += 2;
  630         return (chip_irq + ADV_MIN_IRQ_NO);
  631 }
  632 
  633 u_int8_t
  634 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
  635 {
  636         u_int16_t       cfg_lsw;
  637 
  638         if ((adv->type & ADV_VL) != 0) {
  639                 if (irq_no != 0) {
  640                         if ((irq_no < ADV_MIN_IRQ_NO)
  641                          || (irq_no > ADV_MAX_IRQ_NO)) {
  642                                 irq_no = 0;
  643                         } else {
  644                                 irq_no -= ADV_MIN_IRQ_NO - 1;
  645                         }
  646                 }
  647                 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
  648                 cfg_lsw |= 0x0010;
  649                 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
  650                 adv_toggle_irq_act(adv);
  651 
  652                 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
  653                 cfg_lsw |= (irq_no & 0x07) << 2;
  654                 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
  655                 adv_toggle_irq_act(adv);
  656         } else if ((adv->type & ADV_ISA) != 0) {
  657                 if (irq_no == 15)
  658                         irq_no -= 2;
  659                 irq_no -= ADV_MIN_IRQ_NO;
  660                 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
  661                 cfg_lsw |= (irq_no & 0x03) << 2;
  662                 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
  663         }
  664         return (adv_get_chip_irq(adv));
  665 }
  666 
  667 void
  668 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
  669 {
  670         u_int16_t cfg_lsw;
  671 
  672         cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
  673         if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
  674                 return;
  675         cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
  676         cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
  677         ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
  678 }
  679 
  680 int
  681 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
  682                        u_int32_t datalen)
  683 {
  684         struct          adv_target_transinfo* tinfo;
  685         u_int32_t       *p_data_addr;
  686         u_int32_t       *p_data_bcount;
  687         int             disable_syn_offset_one_fix;
  688         int             retval;
  689         u_int           n_q_required;
  690         u_int32_t       addr;
  691         u_int8_t        sg_entry_cnt;
  692         u_int8_t        target_ix;
  693         u_int8_t        sg_entry_cnt_minus_one;
  694         u_int8_t        tid_no;
  695 
  696         scsiq->q1.q_no = 0;
  697         retval = 1;  /* Default to error case */
  698         target_ix = scsiq->q2.target_ix;
  699         tid_no = ADV_TIX_TO_TID(target_ix);
  700         tinfo = &adv->tinfo[tid_no];
  701 
  702         if (scsiq->cdbptr[0] == REQUEST_SENSE) {
  703                 /* Renegotiate if appropriate. */
  704                 adv_set_syncrate(adv, /*struct cam_path */NULL,
  705                                  tid_no, /*period*/0, /*offset*/0,
  706                                  ADV_TRANS_CUR);
  707                 if (tinfo->current.period != tinfo->goal.period) {
  708                         adv_msgout_sdtr(adv, tinfo->goal.period,
  709                                         tinfo->goal.offset);
  710                         scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
  711                 }
  712         }
  713 
  714         if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
  715                 sg_entry_cnt = scsiq->sg_head->entry_cnt;
  716                 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
  717 
  718 #ifdef DIAGNOSTIC
  719                 if (sg_entry_cnt <= 1) 
  720                         panic("adv_execute_scsi_queue: Queue "
  721                               "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
  722 
  723                 if (sg_entry_cnt > ADV_MAX_SG_LIST)
  724                         panic("adv_execute_scsi_queue: "
  725                               "Queue with too many segs.");
  726 
  727                 if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
  728                         int i;
  729 
  730                         for (i = 0; i < sg_entry_cnt_minus_one; i++) {
  731                                 addr = scsiq->sg_head->sg_list[i].addr +
  732                                        scsiq->sg_head->sg_list[i].bytes;
  733 
  734                                 if ((addr & 0x0003) != 0)
  735                                         panic("adv_execute_scsi_queue: SG "
  736                                               "with odd address or byte count");
  737                         }
  738                 }
  739 #endif
  740                 p_data_addr =
  741                     &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
  742                 p_data_bcount =
  743                     &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
  744 
  745                 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
  746                 scsiq->sg_head->queue_cnt = n_q_required - 1;
  747         } else {
  748                 p_data_addr = &scsiq->q1.data_addr;
  749                 p_data_bcount = &scsiq->q1.data_cnt;
  750                 n_q_required = 1;
  751         }
  752 
  753         disable_syn_offset_one_fix = FALSE;
  754 
  755         if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
  756          && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
  757 
  758                 if (datalen != 0) {
  759                         if (datalen < 512) {
  760                                 disable_syn_offset_one_fix = TRUE;
  761                         } else {
  762                                 if (scsiq->cdbptr[0] == INQUIRY
  763                                  || scsiq->cdbptr[0] == REQUEST_SENSE
  764                                  || scsiq->cdbptr[0] == READ_CAPACITY
  765                                  || scsiq->cdbptr[0] == MODE_SELECT_6 
  766                                  || scsiq->cdbptr[0] == MODE_SENSE_6
  767                                  || scsiq->cdbptr[0] == MODE_SENSE_10 
  768                                  || scsiq->cdbptr[0] == MODE_SELECT_10 
  769                                  || scsiq->cdbptr[0] == READ_TOC) {
  770                                         disable_syn_offset_one_fix = TRUE;
  771                                 }
  772                         }
  773                 }
  774         }
  775 
  776         if (disable_syn_offset_one_fix) {
  777                 scsiq->q2.tag_code &=
  778                     ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
  779                 scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
  780                                      | ADV_TAG_FLAG_DISABLE_DISCONNECT);
  781         }
  782 
  783         if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
  784          && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
  785                 u_int8_t extra_bytes;
  786 
  787                 addr = *p_data_addr + *p_data_bcount;
  788                 extra_bytes = addr & 0x0003;
  789                 if (extra_bytes != 0
  790                  && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
  791                   || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
  792                         scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
  793                         scsiq->q1.extra_bytes = extra_bytes;
  794                         *p_data_bcount -= extra_bytes;
  795                 }
  796         }
  797 
  798         if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
  799          || ((scsiq->q1.cntl & QC_URGENT) != 0))
  800                 retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
  801         
  802         return (retval);
  803 }
  804 
  805 
  806 u_int8_t
  807 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
  808                     struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
  809 {
  810         u_int16_t val;
  811         u_int8_t  sg_queue_cnt;
  812 
  813         adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
  814                        (u_int16_t *)scsiq,
  815                        (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
  816 
  817 #if BYTE_ORDER == BIG_ENDIAN
  818         adv_adj_endian_qdone_info(scsiq);
  819 #endif
  820 
  821         val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
  822         scsiq->q_status = val & 0xFF;
  823         scsiq->q_no = (val >> 8) & 0XFF;
  824 
  825         val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
  826         scsiq->cntl = val & 0xFF;
  827         sg_queue_cnt = (val >> 8) & 0xFF;
  828 
  829         val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
  830         scsiq->sense_len = val & 0xFF;
  831         scsiq->extra_bytes = (val >> 8) & 0xFF;
  832 
  833         /*
  834          * Due to a bug in accessing LRAM on the 940UA, the residual
  835          * is split into separate high and low 16bit quantities.
  836          */
  837         scsiq->remain_bytes =
  838             adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
  839         scsiq->remain_bytes |=
  840             adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
  841 
  842         /*
  843          * XXX Is this just a safeguard or will the counter really
  844          * have bogus upper bits?
  845          */
  846         scsiq->remain_bytes &= max_dma_count;
  847 
  848         return (sg_queue_cnt);
  849 }
  850 
  851 int
  852 adv_start_chip(struct adv_softc *adv)
  853 {
  854         ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
  855         if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
  856                 return (0);
  857         return (1);
  858 }
  859 
  860 int
  861 adv_stop_execution(struct adv_softc *adv)
  862 {
  863         int count;
  864 
  865         count = 0;
  866         if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
  867                 adv_write_lram_8(adv, ADV_STOP_CODE_B,
  868                                  ADV_STOP_REQ_RISC_STOP);
  869                 do {
  870                         if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
  871                                 ADV_STOP_ACK_RISC_STOP) {
  872                                 return (1);
  873                         }
  874                         DELAY(1000);
  875                 } while (count++ < 20);
  876         }
  877         return (0);
  878 }
  879 
  880 int
  881 adv_is_chip_halted(struct adv_softc *adv)
  882 {
  883         if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
  884                 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
  885                         return (1);
  886                 }
  887         }
  888         return (0);
  889 }
  890 
  891 /*
  892  * XXX The numeric constants and the loops in this routine
  893  * need to be documented.
  894  */
  895 void
  896 adv_ack_interrupt(struct adv_softc *adv)
  897 {
  898         u_int8_t        host_flag;
  899         u_int8_t        risc_flag;
  900         int             loop;
  901 
  902         loop = 0;
  903         do {
  904                 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
  905                 if (loop++ > 0x7FFF) {
  906                         break;
  907                 }
  908         } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
  909 
  910         host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
  911         adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
  912                          host_flag | ADV_HOST_FLAG_ACK_INT);
  913 
  914         ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
  915         loop = 0;
  916         while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
  917                 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
  918                 if (loop++ > 3) {
  919                         break;
  920                 }
  921         }
  922 
  923         adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
  924 }
  925 
  926 /*
  927  * Handle all conditions that may halt the chip waiting
  928  * for us to intervene.
  929  */
  930 void
  931 adv_isr_chip_halted(struct adv_softc *adv)
  932 {
  933         u_int16_t         int_halt_code;
  934         u_int16_t         halt_q_addr;
  935         target_bit_vector target_mask;
  936         target_bit_vector scsi_busy;
  937         u_int8_t          halt_qp;
  938         u_int8_t          target_ix;
  939         u_int8_t          q_cntl;
  940         u_int8_t          tid_no;
  941 
  942         int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
  943         halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
  944         halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
  945         target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
  946         q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
  947         tid_no = ADV_TIX_TO_TID(target_ix);
  948         target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
  949         if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
  950                 /*
  951                  * Temporarily disable the async fix by removing
  952                  * this target from the list of affected targets,
  953                  * setting our async rate, and then putting us
  954                  * back into the mask.
  955                  */
  956                 adv->fix_asyn_xfer &= ~target_mask;
  957                 adv_set_syncrate(adv, /*struct cam_path */NULL,
  958                                  tid_no, /*period*/0, /*offset*/0,
  959                                  ADV_TRANS_ACTIVE);
  960                 adv->fix_asyn_xfer |= target_mask;
  961         } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
  962                 adv_set_syncrate(adv, /*struct cam_path */NULL,
  963                                  tid_no, /*period*/0, /*offset*/0,
  964                                  ADV_TRANS_ACTIVE);
  965         } else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
  966                 adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
  967                                      target_mask, tid_no);
  968         } else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
  969                 struct    adv_target_transinfo* tinfo;
  970                 union     ccb *ccb;
  971                 u_int32_t cinfo_index;
  972                 u_int8_t  tag_code;
  973                 u_int8_t  q_status;
  974 
  975                 tinfo = &adv->tinfo[tid_no];
  976                 q_cntl |= QC_REQ_SENSE;
  977 
  978                 /* Renegotiate if appropriate. */
  979                 adv_set_syncrate(adv, /*struct cam_path */NULL,
  980                                  tid_no, /*period*/0, /*offset*/0,
  981                                  ADV_TRANS_CUR);
  982                 if (tinfo->current.period != tinfo->goal.period) {
  983                         adv_msgout_sdtr(adv, tinfo->goal.period,
  984                                         tinfo->goal.offset);
  985                         q_cntl |= QC_MSG_OUT;
  986                 }
  987                 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
  988 
  989                 /* Don't tag request sense commands */
  990                 tag_code = adv_read_lram_8(adv,
  991                                            halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
  992                 tag_code &=
  993                     ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
  994 
  995                 if ((adv->fix_asyn_xfer & target_mask) != 0
  996                  && (adv->fix_asyn_xfer_always & target_mask) == 0) {
  997                         tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
  998                                  | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
  999                 }
 1000                 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
 1001                                  tag_code);
 1002                 q_status = adv_read_lram_8(adv,
 1003                                            halt_q_addr + ADV_SCSIQ_B_STATUS);
 1004                 q_status |= (QS_READY | QS_BUSY);
 1005                 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
 1006                                  q_status);
 1007                 /*
 1008                  * Freeze the devq until we can handle the sense condition.
 1009                  */
 1010                 cinfo_index =
 1011                     adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
 1012                 ccb = adv->ccb_infos[cinfo_index].ccb;
 1013                 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
 1014                 ccb->ccb_h.status |= CAM_DEV_QFRZN;
 1015                 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
 1016                               /*ccb*/NULL, CAM_REQUEUE_REQ,
 1017                               /*queued_only*/TRUE);
 1018                 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
 1019                 scsi_busy &= ~target_mask;
 1020                 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
 1021                 /*
 1022                  * Ensure we have enough time to actually
 1023                  * retrieve the sense.
 1024                  */
 1025                 untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
 1026                 ccb->ccb_h.timeout_ch =
 1027                     timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
 1028         } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
 1029                 struct  ext_msg out_msg;
 1030 
 1031                 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
 1032                                        (u_int16_t *) &out_msg,
 1033                                        sizeof(out_msg)/2);
 1034 
 1035                 if ((out_msg.msg_type == MSG_EXTENDED)
 1036                  && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
 1037                  && (out_msg.msg_req == MSG_EXT_SDTR)) {
 1038 
 1039                         /* Revert to Async */
 1040                         adv_set_syncrate(adv, /*struct cam_path */NULL,
 1041                                          tid_no, /*period*/0, /*offset*/0,
 1042                                          ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
 1043                 }
 1044                 q_cntl &= ~QC_MSG_OUT;
 1045                 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
 1046         } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
 1047                 u_int8_t scsi_status;
 1048                 union ccb *ccb;
 1049                 u_int32_t cinfo_index;
 1050                 
 1051                 scsi_status = adv_read_lram_8(adv, halt_q_addr
 1052                                               + ADV_SCSIQ_SCSI_STATUS);
 1053                 cinfo_index =
 1054                     adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
 1055                 ccb = adv->ccb_infos[cinfo_index].ccb;
 1056                 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
 1057                 ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
 1058                 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL; 
 1059                 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
 1060                               /*ccb*/NULL, CAM_REQUEUE_REQ,
 1061                               /*queued_only*/TRUE);
 1062                 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
 1063                 scsi_busy &= ~target_mask;
 1064                 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);              
 1065         } else {
 1066                 printf("Unhandled Halt Code %x\n", int_halt_code);
 1067         }
 1068         adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
 1069 }
 1070 
 1071 void
 1072 adv_sdtr_to_period_offset(struct adv_softc *adv,
 1073                           u_int8_t sync_data, u_int8_t *period,
 1074                           u_int8_t *offset, int tid)
 1075 {
 1076         if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
 1077          && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
 1078                 *period = *offset = 0;
 1079         } else {
 1080                 *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
 1081                 *offset = sync_data & 0xF;
 1082         }
 1083 }
 1084 
 1085 void
 1086 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
 1087                  u_int tid, u_int period, u_int offset, u_int type)
 1088 {
 1089         struct adv_target_transinfo* tinfo;
 1090         u_int old_period;
 1091         u_int old_offset;
 1092         u_int8_t sdtr_data;
 1093 
 1094         tinfo = &adv->tinfo[tid];
 1095 
 1096         /* Filter our input */
 1097         sdtr_data = adv_period_offset_to_sdtr(adv, &period,
 1098                                               &offset, tid);
 1099 
 1100         old_period = tinfo->current.period;
 1101         old_offset = tinfo->current.offset;
 1102 
 1103         if ((type & ADV_TRANS_CUR) != 0
 1104          && ((old_period != period || old_offset != offset)
 1105           || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
 1106                 int s;
 1107                 int halted;
 1108 
 1109                 s = splcam();
 1110                 halted = adv_is_chip_halted(adv);
 1111                 if (halted == 0)
 1112                         /* Must halt the chip first */
 1113                         adv_host_req_chip_halt(adv);
 1114 
 1115                 /* Update current hardware settings */
 1116                 adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
 1117 
 1118                 /*
 1119                  * If a target can run in sync mode, we don't need
 1120                  * to check it for sync problems.
 1121                  */
 1122                 if (offset != 0)
 1123                         adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
 1124 
 1125                 if (halted == 0)
 1126                         /* Start the chip again */
 1127                         adv_start_chip(adv);
 1128 
 1129                 splx(s);
 1130                 tinfo->current.period = period;
 1131                 tinfo->current.offset = offset;
 1132 
 1133                 if (path != NULL) {
 1134                         /*
 1135                          * Tell the SCSI layer about the
 1136                          * new transfer parameters.
 1137                          */
 1138                         struct  ccb_trans_settings neg;
 1139 
 1140                         neg.sync_period = period;
 1141                         neg.sync_offset = offset;
 1142                         neg.valid = CCB_TRANS_SYNC_RATE_VALID
 1143                                   | CCB_TRANS_SYNC_OFFSET_VALID;
 1144                         xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
 1145                         xpt_async(AC_TRANSFER_NEG, path, &neg);
 1146                 }
 1147         }
 1148 
 1149         if ((type & ADV_TRANS_GOAL) != 0) {
 1150                 tinfo->goal.period = period;
 1151                 tinfo->goal.offset = offset;
 1152         }
 1153 
 1154         if ((type & ADV_TRANS_USER) != 0) {
 1155                 tinfo->user.period = period;
 1156                 tinfo->user.offset = offset;
 1157         }
 1158 }
 1159 
 1160 u_int8_t
 1161 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
 1162                           u_int *offset, int tid)
 1163 {
 1164         u_int i;
 1165         u_int dummy_offset;
 1166         u_int dummy_period;
 1167 
 1168         if (offset == NULL) {
 1169                 dummy_offset = 0;
 1170                 offset = &dummy_offset;
 1171         }
 1172 
 1173         if (period == NULL) {
 1174                 dummy_period = 0;
 1175                 period = &dummy_period;
 1176         }
 1177 
 1178         *offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
 1179         if (*period != 0 && *offset != 0) {
 1180                 for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
 1181                         if (*period <= adv->sdtr_period_tbl[i]) {
 1182                                 /*       
 1183                                  * When responding to a target that requests
 1184                                  * sync, the requested  rate may fall between
 1185                                  * two rates that we can output, but still be
 1186                                  * a rate that we can receive.  Because of this,
 1187                                  * we want to respond to the target with
 1188                                  * the same rate that it sent to us even
 1189                                  * if the period we use to send data to it
 1190                                  * is lower.  Only lower the response period
 1191                                  * if we must.
 1192                                  */        
 1193                                 if (i == 0 /* Our maximum rate */)
 1194                                         *period = adv->sdtr_period_tbl[0];
 1195                                 return ((i << 4) | *offset);
 1196                         }
 1197                 }
 1198         }
 1199         
 1200         /* Must go async */
 1201         *period = 0;
 1202         *offset = 0;
 1203         if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
 1204                 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
 1205         return (0);
 1206 }
 1207 
 1208 /* Internal Routines */
 1209 
 1210 static void
 1211 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
 1212                        u_int16_t *buffer, int count)
 1213 {
 1214         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
 1215         ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
 1216 }
 1217 
 1218 static void
 1219 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
 1220                         u_int16_t *buffer, int count)
 1221 {
 1222         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
 1223         ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
 1224 }
 1225 
 1226 static void
 1227 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
 1228                  u_int16_t set_value, int count)
 1229 {
 1230         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
 1231         bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
 1232                               set_value, count);
 1233 }
 1234 
 1235 static u_int32_t
 1236 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
 1237 {
 1238         u_int32_t       sum;
 1239         int             i;
 1240 
 1241         sum = 0;
 1242         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
 1243         for (i = 0; i < count; i++)
 1244                 sum += ADV_INW(adv, ADV_LRAM_DATA);
 1245         return (sum);
 1246 }
 1247 
 1248 static int
 1249 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
 1250                              u_int16_t value)
 1251 {
 1252         int     retval;
 1253 
 1254         retval = 0;
 1255         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
 1256         ADV_OUTW(adv, ADV_LRAM_DATA, value);
 1257         DELAY(10000);
 1258         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
 1259         if (value != ADV_INW(adv, ADV_LRAM_DATA))
 1260                 retval = 1;
 1261         return (retval);
 1262 }
 1263 
 1264 static u_int32_t
 1265 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
 1266 {
 1267         u_int16_t           val_low, val_high;
 1268 
 1269         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
 1270 
 1271 #if BYTE_ORDER == BIG_ENDIAN
 1272         val_high = ADV_INW(adv, ADV_LRAM_DATA);
 1273         val_low = ADV_INW(adv, ADV_LRAM_DATA);
 1274 #else
 1275         val_low = ADV_INW(adv, ADV_LRAM_DATA);
 1276         val_high = ADV_INW(adv, ADV_LRAM_DATA);
 1277 #endif
 1278 
 1279         return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
 1280 }
 1281 
 1282 static void
 1283 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
 1284 {
 1285         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
 1286 
 1287 #if BYTE_ORDER == BIG_ENDIAN
 1288         ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
 1289         ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
 1290 #else
 1291         ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
 1292         ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
 1293 #endif
 1294 }
 1295 
 1296 static void
 1297 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
 1298                         u_int32_t *buffer, int count)
 1299 {
 1300         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
 1301         ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
 1302 }
 1303 
 1304 static u_int16_t
 1305 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
 1306 {
 1307         u_int16_t read_wval;
 1308         u_int8_t  cmd_reg;
 1309 
 1310         adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
 1311         DELAY(1000);
 1312         cmd_reg = addr | ADV_EEPROM_CMD_READ;
 1313         adv_write_eeprom_cmd_reg(adv, cmd_reg);
 1314         DELAY(1000);
 1315         read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
 1316         DELAY(1000);
 1317         return (read_wval);
 1318 }
 1319 
 1320 static u_int16_t
 1321 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
 1322 {
 1323         u_int16_t       read_value;
 1324 
 1325         read_value = adv_read_eeprom_16(adv, addr);
 1326         if (read_value != value) {
 1327                 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
 1328                 DELAY(1000);
 1329                 
 1330                 ADV_OUTW(adv, ADV_EEPROM_DATA, value);
 1331                 DELAY(1000);
 1332 
 1333                 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
 1334                 DELAY(20 * 1000);
 1335 
 1336                 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
 1337                 DELAY(1000);
 1338                 read_value = adv_read_eeprom_16(adv, addr);
 1339         }
 1340         return (read_value);
 1341 }
 1342 
 1343 static int
 1344 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
 1345 {
 1346         u_int8_t read_back;
 1347         int      retry;
 1348 
 1349         retry = 0;
 1350         while (1) {
 1351                 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
 1352                 DELAY(1000);
 1353                 read_back = ADV_INB(adv, ADV_EEPROM_CMD);
 1354                 if (read_back == cmd_reg) {
 1355                         return (1);
 1356                 }
 1357                 if (retry++ > ADV_EEPROM_MAX_RETRY) {
 1358                         return (0);
 1359                 }
 1360         }
 1361 }
 1362 
 1363 static int
 1364 adv_set_eeprom_config_once(struct adv_softc *adv,
 1365                            struct adv_eeprom_config *eeprom_config)
 1366 {
 1367         int             n_error;
 1368         u_int16_t       *wbuf;
 1369         u_int16_t       sum;
 1370         u_int8_t        s_addr;
 1371         u_int8_t        cfg_beg;
 1372         u_int8_t        cfg_end;
 1373 
 1374         wbuf = (u_int16_t *)eeprom_config;
 1375         n_error = 0;
 1376         sum = 0;
 1377         for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
 1378                 sum += *wbuf;
 1379                 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
 1380                         n_error++;
 1381                 }
 1382         }
 1383         if (adv->type & ADV_VL) {
 1384                 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
 1385                 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
 1386         } else {
 1387                 cfg_beg = ADV_EEPROM_CFG_BEG;
 1388                 cfg_end = ADV_EEPROM_MAX_ADDR;
 1389         }
 1390 
 1391         for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
 1392                 sum += *wbuf;
 1393                 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
 1394                         n_error++;
 1395                 }
 1396         }
 1397         *wbuf = sum;
 1398         if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
 1399                 n_error++;
 1400         }
 1401         wbuf = (u_int16_t *)eeprom_config;
 1402         for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
 1403                 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
 1404                         n_error++;
 1405                 }
 1406         }
 1407         for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
 1408                 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
 1409                         n_error++;
 1410                 }
 1411         }
 1412         return (n_error);
 1413 }
 1414 
 1415 static u_int32_t
 1416 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
 1417                    u_int16_t *mcode_buf, u_int16_t mcode_size)
 1418 {
 1419         u_int32_t chksum;
 1420         u_int16_t mcode_lram_size;
 1421         u_int16_t mcode_chksum;
 1422 
 1423         mcode_lram_size = mcode_size >> 1;
 1424         /* XXX Why zero the memory just before you write the whole thing?? */
 1425         adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
 1426         adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
 1427 
 1428         chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
 1429         mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
 1430                                                    ((mcode_size - s_addr
 1431                                                      - ADV_CODE_SEC_BEG) >> 1));
 1432         adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
 1433         adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
 1434         return (chksum);
 1435 }
 1436 
 1437 static void
 1438 adv_reinit_lram(struct adv_softc *adv) {
 1439         adv_init_lram(adv);
 1440         adv_init_qlink_var(adv);
 1441 }
 1442 
 1443 static void
 1444 adv_init_lram(struct adv_softc *adv)
 1445 {
 1446         u_int8_t  i;
 1447         u_int16_t s_addr;
 1448 
 1449         adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
 1450                          (((adv->max_openings + 2 + 1) * 64) >> 1));
 1451         
 1452         i = ADV_MIN_ACTIVE_QNO;
 1453         s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
 1454 
 1455         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
 1456         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
 1457         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
 1458         i++;
 1459         s_addr += ADV_QBLK_SIZE;
 1460         for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
 1461                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
 1462                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
 1463                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
 1464         }
 1465 
 1466         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
 1467         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
 1468         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
 1469         i++;
 1470         s_addr += ADV_QBLK_SIZE;
 1471 
 1472         for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
 1473                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
 1474                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
 1475                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
 1476         }
 1477 }
 1478 
 1479 static int
 1480 adv_init_microcode_var(struct adv_softc *adv)
 1481 {
 1482         int      i;
 1483 
 1484         for (i = 0; i <= ADV_MAX_TID; i++) {
 1485                 
 1486                 /* Start out async all around */
 1487                 adv_set_syncrate(adv, /*path*/NULL,
 1488                                  i, 0, 0,
 1489                                  ADV_TRANS_GOAL|ADV_TRANS_CUR);
 1490         }
 1491 
 1492         adv_init_qlink_var(adv);
 1493 
 1494         adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
 1495         adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
 1496 
 1497         adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
 1498 
 1499         adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
 1500 
 1501         ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
 1502         if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
 1503                 printf("adv%d: Unable to set program counter. Aborting.\n",
 1504                        adv->unit);
 1505                 return (1);
 1506         }
 1507         return (0);
 1508 }
 1509 
 1510 static void
 1511 adv_init_qlink_var(struct adv_softc *adv)
 1512 {
 1513         int       i;
 1514         u_int16_t lram_addr;
 1515 
 1516         adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
 1517         adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
 1518 
 1519         adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
 1520         adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
 1521 
 1522         adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
 1523                          (u_int8_t)((int) adv->max_openings + 1));
 1524         adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
 1525                          (u_int8_t)((int) adv->max_openings + 2));
 1526 
 1527         adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
 1528 
 1529         adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
 1530         adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
 1531         adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
 1532         adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
 1533         adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
 1534         adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
 1535 
 1536         lram_addr = ADV_QADR_BEG;
 1537         for (i = 0; i < 32; i++, lram_addr += 2)
 1538                 adv_write_lram_16(adv, lram_addr, 0);
 1539 }
 1540 
 1541 static void
 1542 adv_disable_interrupt(struct adv_softc *adv)
 1543 {
 1544         u_int16_t cfg;
 1545 
 1546         cfg = ADV_INW(adv, ADV_CONFIG_LSW);
 1547         ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
 1548 }
 1549 
 1550 static void
 1551 adv_enable_interrupt(struct adv_softc *adv)
 1552 {
 1553         u_int16_t cfg;
 1554 
 1555         cfg = ADV_INW(adv, ADV_CONFIG_LSW);
 1556         ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
 1557 }
 1558 
 1559 static void
 1560 adv_toggle_irq_act(struct adv_softc *adv)
 1561 {
 1562         ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
 1563         ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
 1564 }
 1565 
 1566 void
 1567 adv_start_execution(struct adv_softc *adv)
 1568 {
 1569         if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
 1570                 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
 1571         }
 1572 }
 1573 
 1574 int
 1575 adv_stop_chip(struct adv_softc *adv)
 1576 {
 1577         u_int8_t cc_val;
 1578 
 1579         cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
 1580                  & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
 1581         ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
 1582         adv_set_chip_ih(adv, ADV_INS_HALT);
 1583         adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
 1584         if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
 1585                 return (0);
 1586         }
 1587         return (1);
 1588 }
 1589 
 1590 static int
 1591 adv_host_req_chip_halt(struct adv_softc *adv)
 1592 {       
 1593         int      count;
 1594         u_int8_t saved_stop_code;
 1595 
 1596         if (adv_is_chip_halted(adv))
 1597                 return (1);
 1598 
 1599         count = 0;
 1600         saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
 1601         adv_write_lram_8(adv, ADVV_STOP_CODE_B,
 1602                          ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
 1603         while (adv_is_chip_halted(adv) == 0
 1604             && count++ < 2000)
 1605                 ;
 1606 
 1607         adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
 1608         return (count < 2000); 
 1609 }
 1610 
 1611 static void
 1612 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
 1613 {
 1614         adv_set_bank(adv, 1);
 1615         ADV_OUTW(adv, ADV_REG_IH, ins_code);
 1616         adv_set_bank(adv, 0);
 1617 }
 1618 
 1619 #if UNUSED
 1620 static u_int8_t
 1621 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
 1622 {
 1623         u_int8_t scsi_ctrl;
 1624 
 1625         adv_set_bank(adv, 1);
 1626         scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
 1627         adv_set_bank(adv, 0);
 1628         return (scsi_ctrl);
 1629 }
 1630 #endif
 1631 
 1632 /*
 1633  * XXX Looks like more padding issues in this routine as well.
 1634  *     There has to be a way to turn this into an insw.
 1635  */
 1636 static void
 1637 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
 1638                u_int16_t *inbuf, int words)
 1639 {
 1640         int     i;
 1641 
 1642         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
 1643         for (i = 0; i < words; i++, inbuf++) {
 1644                 if (i == 5) {
 1645                         continue;
 1646                 }
 1647                 *inbuf = ADV_INW(adv, ADV_LRAM_DATA);
 1648         }
 1649 }
 1650 
 1651 static u_int
 1652 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
 1653 {
 1654         u_int     cur_used_qs;
 1655         u_int     cur_free_qs;
 1656 
 1657         cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
 1658 
 1659         if ((cur_used_qs + n_qs) <= adv->max_openings) {
 1660                 cur_free_qs = adv->max_openings - cur_used_qs;
 1661                 return (cur_free_qs);
 1662         }
 1663         adv->openings_needed = n_qs;
 1664         return (0);
 1665 }
 1666 
 1667 static u_int8_t
 1668 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
 1669                       u_int8_t n_free_q)
 1670 {
 1671         int i;
 1672 
 1673         for (i = 0; i < n_free_q; i++) {
 1674                 free_q_head = adv_alloc_free_queue(adv, free_q_head);
 1675                 if (free_q_head == ADV_QLINK_END)
 1676                         break;
 1677         }
 1678         return (free_q_head);
 1679 }
 1680 
 1681 static u_int8_t
 1682 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
 1683 {
 1684         u_int16_t       q_addr;
 1685         u_int8_t        next_qp;
 1686         u_int8_t        q_status;
 1687 
 1688         next_qp = ADV_QLINK_END;
 1689         q_addr = ADV_QNO_TO_QADDR(free_q_head);
 1690         q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS);
 1691         
 1692         if ((q_status & QS_READY) == 0)
 1693                 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
 1694 
 1695         return (next_qp);
 1696 }
 1697 
 1698 static int
 1699 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
 1700                     u_int8_t n_q_required)
 1701 {
 1702         u_int8_t        free_q_head;
 1703         u_int8_t        next_qp;
 1704         u_int8_t        tid_no;
 1705         u_int8_t        target_ix;
 1706         int             retval;
 1707 
 1708         retval = 1;
 1709         target_ix = scsiq->q2.target_ix;
 1710         tid_no = ADV_TIX_TO_TID(target_ix);
 1711         free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
 1712         if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
 1713             != ADV_QLINK_END) {
 1714                 scsiq->q1.q_no = free_q_head;
 1715 
 1716                 /*
 1717                  * Now that we know our Q number, point our sense
 1718                  * buffer pointer to a bus dma mapped area where
 1719                  * we can dma the data to.
 1720                  */
 1721                 scsiq->q1.sense_addr = adv->sense_physbase
 1722                     + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
 1723                 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
 1724                 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
 1725                 adv->cur_active += n_q_required;
 1726                 retval = 0;
 1727         }
 1728         return (retval);
 1729 }
 1730 
 1731 
 1732 static void
 1733 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
 1734                             u_int q_no)
 1735 {
 1736         u_int8_t        sg_list_dwords;
 1737         u_int8_t        sg_index, i;
 1738         u_int8_t        sg_entry_cnt;
 1739         u_int8_t        next_qp;
 1740         u_int16_t       q_addr;
 1741         struct          adv_sg_head *sg_head;
 1742         struct          adv_sg_list_q scsi_sg_q;
 1743 
 1744         sg_head = scsiq->sg_head;
 1745 
 1746         if (sg_head) {
 1747                 sg_entry_cnt = sg_head->entry_cnt - 1;
 1748 #ifdef DIAGNOSTIC
 1749                 if (sg_entry_cnt == 0)
 1750                         panic("adv_put_ready_sg_list_queue: ScsiQ with "
 1751                               "a SG list but only one element");
 1752                 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
 1753                         panic("adv_put_ready_sg_list_queue: ScsiQ with "
 1754                               "a SG list but QC_SG_HEAD not set");
 1755 #endif                  
 1756                 q_addr = ADV_QNO_TO_QADDR(q_no);
 1757                 sg_index = 1;
 1758                 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
 1759                 scsi_sg_q.sg_head_qp = q_no;
 1760                 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
 1761                 for (i = 0; i < sg_head->queue_cnt; i++) {
 1762                         u_int8_t segs_this_q;
 1763 
 1764                         if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
 1765                                 segs_this_q = ADV_SG_LIST_PER_Q;
 1766                         else {
 1767                                 /* This will be the last segment then */
 1768                                 segs_this_q = sg_entry_cnt;
 1769                                 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
 1770                         }
 1771                         scsi_sg_q.seq_no = i + 1;
 1772                         sg_list_dwords = segs_this_q << 1;
 1773                         if (i == 0) {
 1774                                 scsi_sg_q.sg_list_cnt = segs_this_q;
 1775                                 scsi_sg_q.sg_cur_list_cnt = segs_this_q;
 1776                         } else {
 1777                                 scsi_sg_q.sg_list_cnt = segs_this_q - 1;
 1778                                 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
 1779                         }
 1780                         next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
 1781                         scsi_sg_q.q_no = next_qp;
 1782                         q_addr = ADV_QNO_TO_QADDR(next_qp);
 1783 
 1784                         adv_write_lram_16_multi(adv,
 1785                                                 q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
 1786                                                 (u_int16_t *)&scsi_sg_q,
 1787                                                 sizeof(scsi_sg_q) >> 1);
 1788                         adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
 1789                                                 (u_int32_t *)&sg_head->sg_list[sg_index],
 1790                                                 sg_list_dwords);
 1791                         sg_entry_cnt -= segs_this_q;
 1792                         sg_index += ADV_SG_LIST_PER_Q;
 1793                 }
 1794         }
 1795         adv_put_ready_queue(adv, scsiq, q_no);
 1796 }
 1797 
 1798 static void
 1799 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
 1800                     u_int q_no)
 1801 {
 1802         struct          adv_target_transinfo* tinfo;
 1803         u_int           q_addr;
 1804         u_int           tid_no;
 1805 
 1806         tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
 1807         tinfo = &adv->tinfo[tid_no];
 1808         if ((tinfo->current.period != tinfo->goal.period)
 1809          || (tinfo->current.offset != tinfo->goal.offset)) {
 1810 
 1811                 adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
 1812                 scsiq->q1.cntl |= QC_MSG_OUT;
 1813         }
 1814         q_addr = ADV_QNO_TO_QADDR(q_no);
 1815 
 1816         scsiq->q1.status = QS_FREE;
 1817 
 1818         adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
 1819                                 (u_int16_t *)scsiq->cdbptr,
 1820                                 scsiq->q2.cdb_len >> 1);
 1821 
 1822 #if BYTE_ORDER == BIG_ENDIAN
 1823         adv_adj_scsiq_endian(scsiq);
 1824 #endif
 1825 
 1826         adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
 1827                       (u_int16_t *) &scsiq->q1.cntl,
 1828                       ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
 1829 
 1830 #if CC_WRITE_IO_COUNT
 1831         adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
 1832                           adv->req_count);
 1833 #endif
 1834 
 1835 #if CC_CLEAR_DMA_REMAIN
 1836 
 1837         adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
 1838         adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
 1839 #endif
 1840 
 1841         adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
 1842                           (scsiq->q1.q_no << 8) | QS_READY);
 1843 }
 1844 
 1845 static void
 1846 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
 1847               u_int16_t *buffer, int words)
 1848 {
 1849         int     i;
 1850 
 1851         /*
 1852          * XXX This routine makes *gross* assumptions
 1853          * about padding in the data structures.
 1854          * Either the data structures should have explicit
 1855          * padding members added, or they should have padding
 1856          * turned off via compiler attributes depending on
 1857          * which yields better overall performance.  My hunch
 1858          * would be that turning off padding would be the
 1859          * faster approach as an outsw is much faster than
 1860          * this crude loop and accessing un-aligned data
 1861          * members isn't *that* expensive.  The other choice
 1862          * would be to modify the ASC script so that the
 1863          * the adv_scsiq_1 structure can be re-arranged so
 1864          * padding isn't required.
 1865          */
 1866         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
 1867         for (i = 0; i < words; i++, buffer++) {
 1868                 if (i == 2 || i == 10) {
 1869                         continue;
 1870                 }
 1871                 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
 1872         }
 1873 }
 1874 
 1875 #if BYTE_ORDER == BIG_ENDIAN
 1876 void
 1877 adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
 1878 {
 1879 
 1880         panic("adv(4) not supported on big-endian machines.\n");
 1881 }
 1882 
 1883 void
 1884 adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
 1885 {
 1886 
 1887         panic("adv(4) not supported on big-endian machines.\n");
 1888 }
 1889 #endif
 1890 
 1891 static void
 1892 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
 1893                      u_int8_t q_cntl, target_bit_vector target_mask,
 1894                      int tid_no)
 1895 {
 1896         struct  ext_msg ext_msg;
 1897 
 1898         adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
 1899                                sizeof(ext_msg) >> 1);
 1900         if ((ext_msg.msg_type == MSG_EXTENDED)
 1901          && (ext_msg.msg_req == MSG_EXT_SDTR)
 1902          && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
 1903                 union     ccb *ccb;
 1904                 struct    adv_target_transinfo* tinfo;
 1905                 u_int32_t cinfo_index;
 1906                 u_int    period;
 1907                 u_int    offset;
 1908                 int      sdtr_accept;
 1909                 u_int8_t orig_offset;
 1910 
 1911                 cinfo_index =
 1912                     adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
 1913                 ccb = adv->ccb_infos[cinfo_index].ccb;
 1914                 tinfo = &adv->tinfo[tid_no];
 1915                 sdtr_accept = TRUE;
 1916 
 1917                 orig_offset = ext_msg.req_ack_offset;
 1918                 if (ext_msg.xfer_period < tinfo->goal.period) {
 1919                         sdtr_accept = FALSE;
 1920                         ext_msg.xfer_period = tinfo->goal.period;
 1921                 }
 1922 
 1923                 /* Perform range checking */
 1924                 period = ext_msg.xfer_period;
 1925                 offset = ext_msg.req_ack_offset;
 1926                 adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
 1927                 ext_msg.xfer_period = period;
 1928                 ext_msg.req_ack_offset = offset;
 1929                 
 1930                 /* Record our current sync settings */
 1931                 adv_set_syncrate(adv, ccb->ccb_h.path,
 1932                                  tid_no, ext_msg.xfer_period,
 1933                                  ext_msg.req_ack_offset,
 1934                                  ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
 1935 
 1936                 /* Offset too high or large period forced async */
 1937                 if (orig_offset != ext_msg.req_ack_offset)
 1938                         sdtr_accept = FALSE;
 1939 
 1940                 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
 1941                         /* Valid response to our requested negotiation */
 1942                         q_cntl &= ~QC_MSG_OUT;
 1943                 } else {
 1944                         /* Must Respond */
 1945                         q_cntl |= QC_MSG_OUT;
 1946                         adv_msgout_sdtr(adv, ext_msg.xfer_period,
 1947                                         ext_msg.req_ack_offset);
 1948                 }
 1949 
 1950         } else if (ext_msg.msg_type == MSG_EXTENDED
 1951                 && ext_msg.msg_req == MSG_EXT_WDTR
 1952                 && ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
 1953 
 1954                 ext_msg.wdtr_width = 0;
 1955                 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
 1956                                         (u_int16_t *)&ext_msg,
 1957                                         sizeof(ext_msg) >> 1);
 1958                 q_cntl |= QC_MSG_OUT;
 1959         } else {
 1960 
 1961                 ext_msg.msg_type = MSG_MESSAGE_REJECT;
 1962                 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
 1963                                         (u_int16_t *)&ext_msg,
 1964                                         sizeof(ext_msg) >> 1);
 1965                 q_cntl |= QC_MSG_OUT;
 1966         }
 1967         adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
 1968 }
 1969 
 1970 static void
 1971 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
 1972                 u_int8_t sdtr_offset)
 1973 {
 1974         struct   ext_msg sdtr_buf;
 1975 
 1976         sdtr_buf.msg_type = MSG_EXTENDED;
 1977         sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
 1978         sdtr_buf.msg_req = MSG_EXT_SDTR;
 1979         sdtr_buf.xfer_period = sdtr_period;
 1980         sdtr_offset &= ADV_SYN_MAX_OFFSET;
 1981         sdtr_buf.req_ack_offset = sdtr_offset;
 1982         adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
 1983                                 (u_int16_t *) &sdtr_buf,
 1984                                 sizeof(sdtr_buf) / 2);
 1985 }
 1986 
 1987 int
 1988 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
 1989               u_int32_t status, int queued_only)
 1990 {
 1991         u_int16_t q_addr;
 1992         u_int8_t  q_no;
 1993         struct adv_q_done_info scsiq_buf;
 1994         struct adv_q_done_info *scsiq;
 1995         u_int8_t  target_ix;
 1996         int       count;
 1997 
 1998         scsiq = &scsiq_buf;
 1999         target_ix = ADV_TIDLUN_TO_IX(target, lun);
 2000         count = 0;
 2001         for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
 2002                 struct adv_ccb_info *ccb_info;
 2003                 q_addr = ADV_QNO_TO_QADDR(q_no);
 2004 
 2005                 adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
 2006                 ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
 2007                 if (((scsiq->q_status & QS_READY) != 0)
 2008                  && ((scsiq->q_status & QS_ABORTED) == 0)
 2009                  && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
 2010                  && (scsiq->d2.target_ix == target_ix)
 2011                  && (queued_only == 0
 2012                   || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
 2013                  && (ccb == NULL || (ccb == ccb_info->ccb))) {
 2014                         union ccb *aborted_ccb;
 2015                         struct adv_ccb_info *cinfo;
 2016 
 2017                         scsiq->q_status |= QS_ABORTED;
 2018                         adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
 2019                                          scsiq->q_status);
 2020                         aborted_ccb = ccb_info->ccb;
 2021                         /* Don't clobber earlier error codes */
 2022                         if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
 2023                           == CAM_REQ_INPROG)
 2024                                 aborted_ccb->ccb_h.status |= status;
 2025                         cinfo = (struct adv_ccb_info *)
 2026                             aborted_ccb->ccb_h.ccb_cinfo_ptr;
 2027                         cinfo->state |= ACCB_ABORT_QUEUED;
 2028                         count++;
 2029                 }
 2030         }
 2031         return (count);
 2032 }
 2033 
 2034 int
 2035 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
 2036 {
 2037         int count; 
 2038         int i;
 2039         union ccb *ccb;
 2040 
 2041         i = 200;
 2042         while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
 2043             && i--)
 2044                 DELAY(1000);
 2045         adv_reset_chip(adv, initiate_bus_reset);
 2046         adv_reinit_lram(adv);
 2047         for (i = 0; i <= ADV_MAX_TID; i++)
 2048                 adv_set_syncrate(adv, NULL, i, /*period*/0,
 2049                                  /*offset*/0, ADV_TRANS_CUR);
 2050         ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
 2051 
 2052         /* Tell the XPT layer that a bus reset occured */
 2053         if (adv->path != NULL)
 2054                 xpt_async(AC_BUS_RESET, adv->path, NULL);
 2055 
 2056         count = 0;
 2057         while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
 2058                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
 2059                         ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
 2060                 adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
 2061                 count++;
 2062         }
 2063 
 2064         adv_start_chip(adv);
 2065         return (count);
 2066 }
 2067 
 2068 static void
 2069 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
 2070 {
 2071         int orig_id;
 2072 
 2073         adv_set_bank(adv, 1);
 2074         orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
 2075         ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
 2076         if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
 2077                 adv_set_bank(adv, 0);
 2078                 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
 2079         }
 2080         adv_set_bank(adv, 1);
 2081         ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
 2082         adv_set_bank(adv, 0);
 2083 }

Cache object: b935e92df4d2a81702e044e97c5aa529


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.