1 /*-
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31 /*-
32 * Ported from:
33 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
34 *
35 * Copyright (c) 1995-1996 Advanced System Products, Inc.
36 * All Rights Reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that redistributions of source
40 * code retain the above copyright notice and this comment without
41 * modification.
42 */
43
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD: releng/9.0/sys/dev/advansys/advlib.c 163896 2006-11-02 00:54:38Z mjacob $");
46
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50
51 #include <machine/bus.h>
52 #include <machine/resource.h>
53 #include <sys/bus.h>
54 #include <sys/rman.h>
55
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 #include <cam/cam_sim.h>
59 #include <cam/cam_xpt_sim.h>
60
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 #include <cam/scsi/scsi_da.h>
64 #include <cam/scsi/scsi_cd.h>
65
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/pmap.h>
69
70 #include <dev/advansys/advansys.h>
71 #include <dev/advansys/advmcode.h>
72
73 struct adv_quirk_entry {
74 struct scsi_inquiry_pattern inq_pat;
75 u_int8_t quirks;
76 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS 0x01
77 #define ADV_QUIRK_FIX_ASYN_XFER 0x02
78 };
79
80 static struct adv_quirk_entry adv_quirk_table[] =
81 {
82 {
83 { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
84 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
85 },
86 {
87 { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
88 0
89 },
90 {
91 {
92 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
93 "TANDBERG", " TDC 36", "*"
94 },
95 0
96 },
97 {
98 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
99 0
100 },
101 {
102 {
103 T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
104 "*", "*", "*"
105 },
106 0
107 },
108 {
109 {
110 T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
111 "*", "*", "*"
112 },
113 0
114 },
115 {
116 /* Default quirk entry */
117 {
118 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119 /*vendor*/"*", /*product*/"*", /*revision*/"*"
120 },
121 ADV_QUIRK_FIX_ASYN_XFER,
122 }
123 };
124
125 /*
126 * Allowable periods in ns
127 */
128 static u_int8_t adv_sdtr_period_tbl[] =
129 {
130 25,
131 30,
132 35,
133 40,
134 50,
135 60,
136 70,
137 85
138 };
139
140 static u_int8_t adv_sdtr_period_tbl_ultra[] =
141 {
142 12,
143 19,
144 25,
145 32,
146 38,
147 44,
148 50,
149 57,
150 63,
151 69,
152 75,
153 82,
154 88,
155 94,
156 100,
157 107
158 };
159
160 struct ext_msg {
161 u_int8_t msg_type;
162 u_int8_t msg_len;
163 u_int8_t msg_req;
164 union {
165 struct {
166 u_int8_t sdtr_xfer_period;
167 u_int8_t sdtr_req_ack_offset;
168 } sdtr;
169 struct {
170 u_int8_t wdtr_width;
171 } wdtr;
172 struct {
173 u_int8_t mdp[4];
174 } mdp;
175 } u_ext_msg;
176 u_int8_t res;
177 };
178
179 #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
180 #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
181 #define wdtr_width u_ext_msg.wdtr.wdtr_width
182 #define mdp_b3 u_ext_msg.mdp_b3
183 #define mdp_b2 u_ext_msg.mdp_b2
184 #define mdp_b1 u_ext_msg.mdp_b1
185 #define mdp_b0 u_ext_msg.mdp_b0
186
187 /*
188 * Some of the early PCI adapters have problems with
189 * async transfers. Instead use an offset of 1.
190 */
191 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
192
193 /* LRAM routines */
194 static void adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
195 u_int16_t *buffer, int count);
196 static void adv_write_lram_16_multi(struct adv_softc *adv,
197 u_int16_t s_addr, u_int16_t *buffer,
198 int count);
199 static void adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200 u_int16_t set_value, int count);
201 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202 int count);
203
204 static int adv_write_and_verify_lram_16(struct adv_softc *adv,
205 u_int16_t addr, u_int16_t value);
206 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
207
208
209 static void adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
210 u_int32_t value);
211 static void adv_write_lram_32_multi(struct adv_softc *adv,
212 u_int16_t s_addr, u_int32_t *buffer,
213 int count);
214
215 /* EEPROM routines */
216 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
217 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
218 u_int16_t value);
219 static int adv_write_eeprom_cmd_reg(struct adv_softc *adv,
220 u_int8_t cmd_reg);
221 static int adv_set_eeprom_config_once(struct adv_softc *adv,
222 struct adv_eeprom_config *eeconfig);
223
224 /* Initialization */
225 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
226 u_int16_t *mcode_buf, u_int16_t mcode_size);
227
228 static void adv_reinit_lram(struct adv_softc *adv);
229 static void adv_init_lram(struct adv_softc *adv);
230 static int adv_init_microcode_var(struct adv_softc *adv);
231 static void adv_init_qlink_var(struct adv_softc *adv);
232
233 /* Interrupts */
234 static void adv_disable_interrupt(struct adv_softc *adv);
235 static void adv_enable_interrupt(struct adv_softc *adv);
236 static void adv_toggle_irq_act(struct adv_softc *adv);
237
238 /* Chip Control */
239 static int adv_host_req_chip_halt(struct adv_softc *adv);
240 static void adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
241 #if 0
242 static u_int8_t adv_get_chip_scsi_ctrl(struct adv_softc *adv);
243 #endif
244
245 /* Queue handling and execution */
246 static __inline int
247 adv_sgcount_to_qcount(int sgcount);
248
249 static __inline int
250 adv_sgcount_to_qcount(int sgcount)
251 {
252 int n_sg_list_qs;
253
254 n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
255 if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
256 n_sg_list_qs++;
257 return (n_sg_list_qs + 1);
258 }
259
260 #if BYTE_ORDER == BIG_ENDIAN
261 static void adv_adj_endian_qdone_info(struct adv_q_done_info *);
262 static void adv_adj_scsiq_endian(struct adv_scsi_q *);
263 #endif
264 static void adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
265 u_int16_t *inbuf, int words);
266 static u_int adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
267 static u_int8_t adv_alloc_free_queues(struct adv_softc *adv,
268 u_int8_t free_q_head, u_int8_t n_free_q);
269 static u_int8_t adv_alloc_free_queue(struct adv_softc *adv,
270 u_int8_t free_q_head);
271 static int adv_send_scsi_queue(struct adv_softc *adv,
272 struct adv_scsi_q *scsiq,
273 u_int8_t n_q_required);
274 static void adv_put_ready_sg_list_queue(struct adv_softc *adv,
275 struct adv_scsi_q *scsiq,
276 u_int q_no);
277 static void adv_put_ready_queue(struct adv_softc *adv,
278 struct adv_scsi_q *scsiq, u_int q_no);
279 static void adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
280 u_int16_t *buffer, int words);
281
282 /* Messages */
283 static void adv_handle_extmsg_in(struct adv_softc *adv,
284 u_int16_t halt_q_addr, u_int8_t q_cntl,
285 target_bit_vector target_id,
286 int tid);
287 static void adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
288 u_int8_t sdtr_offset);
289 static void adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
290 u_int8_t sdtr_data);
291
292
293 /* Exported functions first */
294
295 void
296 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
297 {
298 struct adv_softc *adv;
299
300 adv = (struct adv_softc *)callback_arg;
301 switch (code) {
302 case AC_FOUND_DEVICE:
303 {
304 struct ccb_getdev *cgd;
305 target_bit_vector target_mask;
306 int num_entries;
307 caddr_t match;
308 struct adv_quirk_entry *entry;
309 struct adv_target_transinfo* tinfo;
310
311 cgd = (struct ccb_getdev *)arg;
312
313 target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
314
315 num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
316 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
317 (caddr_t)adv_quirk_table,
318 num_entries, sizeof(*adv_quirk_table),
319 scsi_inquiry_match);
320
321 if (match == NULL)
322 panic("advasync: device didn't match wildcard entry!!");
323
324 entry = (struct adv_quirk_entry *)match;
325
326 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
327 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
328 adv->fix_asyn_xfer_always |= target_mask;
329 else
330 adv->fix_asyn_xfer_always &= ~target_mask;
331 /*
332 * We start out life with all bits set and clear them
333 * after we've determined that the fix isn't necessary.
334 * It may well be that we've already cleared a target
335 * before the full inquiry session completes, so don't
336 * gratuitously set a target bit even if it has this
337 * quirk. But, if the quirk exonerates a device, clear
338 * the bit now.
339 */
340 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
341 adv->fix_asyn_xfer &= ~target_mask;
342 }
343 /*
344 * Reset our sync settings now that we've determined
345 * what quirks are in effect for the device.
346 */
347 tinfo = &adv->tinfo[cgd->ccb_h.target_id];
348 adv_set_syncrate(adv, cgd->ccb_h.path,
349 cgd->ccb_h.target_id,
350 tinfo->current.period,
351 tinfo->current.offset,
352 ADV_TRANS_CUR);
353 break;
354 }
355 case AC_LOST_DEVICE:
356 {
357 u_int target_mask;
358
359 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
360 target_mask = 0x01 << xpt_path_target_id(path);
361 adv->fix_asyn_xfer |= target_mask;
362 }
363
364 /*
365 * Revert to async transfers
366 * for the next device.
367 */
368 adv_set_syncrate(adv, /*path*/NULL,
369 xpt_path_target_id(path),
370 /*period*/0,
371 /*offset*/0,
372 ADV_TRANS_GOAL|ADV_TRANS_CUR);
373 }
374 default:
375 break;
376 }
377 }
378
379 void
380 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
381 {
382 u_int8_t control;
383
384 /*
385 * Start out with the bank reset to 0
386 */
387 control = ADV_INB(adv, ADV_CHIP_CTRL)
388 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
389 | ADV_CC_DIAG | ADV_CC_SCSI_RESET
390 | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
391 if (bank == 1) {
392 control |= ADV_CC_BANK_ONE;
393 } else if (bank == 2) {
394 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
395 }
396 ADV_OUTB(adv, ADV_CHIP_CTRL, control);
397 }
398
399 u_int8_t
400 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
401 {
402 u_int8_t byte_data;
403 u_int16_t word_data;
404
405 /*
406 * LRAM is accessed on 16bit boundaries.
407 */
408 ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
409 word_data = ADV_INW(adv, ADV_LRAM_DATA);
410 if (addr & 1) {
411 #if BYTE_ORDER == BIG_ENDIAN
412 byte_data = (u_int8_t)(word_data & 0xFF);
413 #else
414 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
415 #endif
416 } else {
417 #if BYTE_ORDER == BIG_ENDIAN
418 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
419 #else
420 byte_data = (u_int8_t)(word_data & 0xFF);
421 #endif
422 }
423 return (byte_data);
424 }
425
426 void
427 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
428 {
429 u_int16_t word_data;
430
431 word_data = adv_read_lram_16(adv, addr & 0xFFFE);
432 if (addr & 1) {
433 word_data &= 0x00FF;
434 word_data |= (((u_int8_t)value << 8) & 0xFF00);
435 } else {
436 word_data &= 0xFF00;
437 word_data |= ((u_int8_t)value & 0x00FF);
438 }
439 adv_write_lram_16(adv, addr & 0xFFFE, word_data);
440 }
441
442
443 u_int16_t
444 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
445 {
446 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
447 return (ADV_INW(adv, ADV_LRAM_DATA));
448 }
449
450 void
451 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
452 {
453 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
454 ADV_OUTW(adv, ADV_LRAM_DATA, value);
455 }
456
457 /*
458 * Determine if there is a board at "iobase" by looking
459 * for the AdvanSys signatures. Return 1 if a board is
460 * found, 0 otherwise.
461 */
462 int
463 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
464 {
465 u_int16_t signature;
466
467 if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
468 signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
469 if ((signature == ADV_1000_ID0W)
470 || (signature == ADV_1000_ID0W_FIX))
471 return (1);
472 }
473 return (0);
474 }
475
476 void
477 adv_lib_init(struct adv_softc *adv)
478 {
479 if ((adv->type & ADV_ULTRA) != 0) {
480 adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
481 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
482 } else {
483 adv->sdtr_period_tbl = adv_sdtr_period_tbl;
484 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
485 }
486 }
487
488 u_int16_t
489 adv_get_eeprom_config(struct adv_softc *adv, struct
490 adv_eeprom_config *eeprom_config)
491 {
492 u_int16_t sum;
493 u_int16_t *wbuf;
494 u_int8_t cfg_beg;
495 u_int8_t cfg_end;
496 u_int8_t s_addr;
497
498 wbuf = (u_int16_t *)eeprom_config;
499 sum = 0;
500
501 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
502 *wbuf = adv_read_eeprom_16(adv, s_addr);
503 sum += *wbuf;
504 }
505
506 if (adv->type & ADV_VL) {
507 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
508 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
509 } else {
510 cfg_beg = ADV_EEPROM_CFG_BEG;
511 cfg_end = ADV_EEPROM_MAX_ADDR;
512 }
513
514 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
515 *wbuf = adv_read_eeprom_16(adv, s_addr);
516 sum += *wbuf;
517 #ifdef ADV_DEBUG_EEPROM
518 printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
519 #endif
520 }
521 *wbuf = adv_read_eeprom_16(adv, s_addr);
522 return (sum);
523 }
524
525 int
526 adv_set_eeprom_config(struct adv_softc *adv,
527 struct adv_eeprom_config *eeprom_config)
528 {
529 int retry;
530
531 retry = 0;
532 while (1) {
533 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
534 break;
535 }
536 if (++retry > ADV_EEPROM_MAX_RETRY) {
537 break;
538 }
539 }
540 return (retry > ADV_EEPROM_MAX_RETRY);
541 }
542
543 int
544 adv_reset_chip(struct adv_softc *adv, int reset_bus)
545 {
546 adv_stop_chip(adv);
547 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
548 | (reset_bus ? ADV_CC_SCSI_RESET : 0));
549 DELAY(60);
550
551 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
552 adv_set_chip_ih(adv, ADV_INS_HALT);
553
554 if (reset_bus)
555 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
556
557 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
558 if (reset_bus)
559 DELAY(200 * 1000);
560
561 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
562 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
563 return (adv_is_chip_halted(adv));
564 }
565
566 int
567 adv_test_external_lram(struct adv_softc* adv)
568 {
569 u_int16_t q_addr;
570 u_int16_t saved_value;
571 int success;
572
573 success = 0;
574
575 q_addr = ADV_QNO_TO_QADDR(241);
576 saved_value = adv_read_lram_16(adv, q_addr);
577 if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
578 success = 1;
579 adv_write_lram_16(adv, q_addr, saved_value);
580 }
581 return (success);
582 }
583
584
585 int
586 adv_init_lram_and_mcode(struct adv_softc *adv)
587 {
588 u_int32_t retval;
589
590 adv_disable_interrupt(adv);
591
592 adv_init_lram(adv);
593
594 retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
595 adv_mcode_size);
596 if (retval != adv_mcode_chksum) {
597 printf("adv%d: Microcode download failed checksum!\n",
598 adv->unit);
599 return (1);
600 }
601
602 if (adv_init_microcode_var(adv) != 0)
603 return (1);
604
605 adv_enable_interrupt(adv);
606 return (0);
607 }
608
609 u_int8_t
610 adv_get_chip_irq(struct adv_softc *adv)
611 {
612 u_int16_t cfg_lsw;
613 u_int8_t chip_irq;
614
615 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
616
617 if ((adv->type & ADV_VL) != 0) {
618 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
619 if ((chip_irq == 0) ||
620 (chip_irq == 4) ||
621 (chip_irq == 7)) {
622 return (0);
623 }
624 return (chip_irq + (ADV_MIN_IRQ_NO - 1));
625 }
626 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
627 if (chip_irq == 3)
628 chip_irq += 2;
629 return (chip_irq + ADV_MIN_IRQ_NO);
630 }
631
632 u_int8_t
633 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
634 {
635 u_int16_t cfg_lsw;
636
637 if ((adv->type & ADV_VL) != 0) {
638 if (irq_no != 0) {
639 if ((irq_no < ADV_MIN_IRQ_NO)
640 || (irq_no > ADV_MAX_IRQ_NO)) {
641 irq_no = 0;
642 } else {
643 irq_no -= ADV_MIN_IRQ_NO - 1;
644 }
645 }
646 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
647 cfg_lsw |= 0x0010;
648 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
649 adv_toggle_irq_act(adv);
650
651 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
652 cfg_lsw |= (irq_no & 0x07) << 2;
653 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
654 adv_toggle_irq_act(adv);
655 } else if ((adv->type & ADV_ISA) != 0) {
656 if (irq_no == 15)
657 irq_no -= 2;
658 irq_no -= ADV_MIN_IRQ_NO;
659 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
660 cfg_lsw |= (irq_no & 0x03) << 2;
661 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
662 }
663 return (adv_get_chip_irq(adv));
664 }
665
666 void
667 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
668 {
669 u_int16_t cfg_lsw;
670
671 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
672 if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
673 return;
674 cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
675 cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
676 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
677 }
678
679 int
680 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
681 u_int32_t datalen)
682 {
683 struct adv_target_transinfo* tinfo;
684 u_int32_t *p_data_addr;
685 u_int32_t *p_data_bcount;
686 int disable_syn_offset_one_fix;
687 int retval;
688 u_int n_q_required;
689 u_int32_t addr;
690 u_int8_t sg_entry_cnt;
691 u_int8_t target_ix;
692 u_int8_t sg_entry_cnt_minus_one;
693 u_int8_t tid_no;
694
695 scsiq->q1.q_no = 0;
696 retval = 1; /* Default to error case */
697 target_ix = scsiq->q2.target_ix;
698 tid_no = ADV_TIX_TO_TID(target_ix);
699 tinfo = &adv->tinfo[tid_no];
700
701 if (scsiq->cdbptr[0] == REQUEST_SENSE) {
702 /* Renegotiate if appropriate. */
703 adv_set_syncrate(adv, /*struct cam_path */NULL,
704 tid_no, /*period*/0, /*offset*/0,
705 ADV_TRANS_CUR);
706 if (tinfo->current.period != tinfo->goal.period) {
707 adv_msgout_sdtr(adv, tinfo->goal.period,
708 tinfo->goal.offset);
709 scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
710 }
711 }
712
713 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
714 sg_entry_cnt = scsiq->sg_head->entry_cnt;
715 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
716
717 #ifdef DIAGNOSTIC
718 if (sg_entry_cnt <= 1)
719 panic("adv_execute_scsi_queue: Queue "
720 "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
721
722 if (sg_entry_cnt > ADV_MAX_SG_LIST)
723 panic("adv_execute_scsi_queue: "
724 "Queue with too many segs.");
725
726 if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
727 int i;
728
729 for (i = 0; i < sg_entry_cnt_minus_one; i++) {
730 addr = scsiq->sg_head->sg_list[i].addr +
731 scsiq->sg_head->sg_list[i].bytes;
732
733 if ((addr & 0x0003) != 0)
734 panic("adv_execute_scsi_queue: SG "
735 "with odd address or byte count");
736 }
737 }
738 #endif
739 p_data_addr =
740 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
741 p_data_bcount =
742 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
743
744 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
745 scsiq->sg_head->queue_cnt = n_q_required - 1;
746 } else {
747 p_data_addr = &scsiq->q1.data_addr;
748 p_data_bcount = &scsiq->q1.data_cnt;
749 n_q_required = 1;
750 }
751
752 disable_syn_offset_one_fix = FALSE;
753
754 if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
755 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
756
757 if (datalen != 0) {
758 if (datalen < 512) {
759 disable_syn_offset_one_fix = TRUE;
760 } else {
761 if (scsiq->cdbptr[0] == INQUIRY
762 || scsiq->cdbptr[0] == REQUEST_SENSE
763 || scsiq->cdbptr[0] == READ_CAPACITY
764 || scsiq->cdbptr[0] == MODE_SELECT_6
765 || scsiq->cdbptr[0] == MODE_SENSE_6
766 || scsiq->cdbptr[0] == MODE_SENSE_10
767 || scsiq->cdbptr[0] == MODE_SELECT_10
768 || scsiq->cdbptr[0] == READ_TOC) {
769 disable_syn_offset_one_fix = TRUE;
770 }
771 }
772 }
773 }
774
775 if (disable_syn_offset_one_fix) {
776 scsiq->q2.tag_code &=
777 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
778 scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
779 | ADV_TAG_FLAG_DISABLE_DISCONNECT);
780 }
781
782 if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
783 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
784 u_int8_t extra_bytes;
785
786 addr = *p_data_addr + *p_data_bcount;
787 extra_bytes = addr & 0x0003;
788 if (extra_bytes != 0
789 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
790 || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
791 scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
792 scsiq->q1.extra_bytes = extra_bytes;
793 *p_data_bcount -= extra_bytes;
794 }
795 }
796
797 if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
798 || ((scsiq->q1.cntl & QC_URGENT) != 0))
799 retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
800
801 return (retval);
802 }
803
804
805 u_int8_t
806 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
807 struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
808 {
809 u_int16_t val;
810 u_int8_t sg_queue_cnt;
811
812 adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
813 (u_int16_t *)scsiq,
814 (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
815
816 #if BYTE_ORDER == BIG_ENDIAN
817 adv_adj_endian_qdone_info(scsiq);
818 #endif
819
820 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
821 scsiq->q_status = val & 0xFF;
822 scsiq->q_no = (val >> 8) & 0XFF;
823
824 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
825 scsiq->cntl = val & 0xFF;
826 sg_queue_cnt = (val >> 8) & 0xFF;
827
828 val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
829 scsiq->sense_len = val & 0xFF;
830 scsiq->extra_bytes = (val >> 8) & 0xFF;
831
832 /*
833 * Due to a bug in accessing LRAM on the 940UA, the residual
834 * is split into separate high and low 16bit quantities.
835 */
836 scsiq->remain_bytes =
837 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
838 scsiq->remain_bytes |=
839 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
840
841 /*
842 * XXX Is this just a safeguard or will the counter really
843 * have bogus upper bits?
844 */
845 scsiq->remain_bytes &= max_dma_count;
846
847 return (sg_queue_cnt);
848 }
849
850 int
851 adv_start_chip(struct adv_softc *adv)
852 {
853 ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
854 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
855 return (0);
856 return (1);
857 }
858
859 int
860 adv_stop_execution(struct adv_softc *adv)
861 {
862 int count;
863
864 count = 0;
865 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
866 adv_write_lram_8(adv, ADV_STOP_CODE_B,
867 ADV_STOP_REQ_RISC_STOP);
868 do {
869 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
870 ADV_STOP_ACK_RISC_STOP) {
871 return (1);
872 }
873 DELAY(1000);
874 } while (count++ < 20);
875 }
876 return (0);
877 }
878
879 int
880 adv_is_chip_halted(struct adv_softc *adv)
881 {
882 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
883 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
884 return (1);
885 }
886 }
887 return (0);
888 }
889
890 /*
891 * XXX The numeric constants and the loops in this routine
892 * need to be documented.
893 */
894 void
895 adv_ack_interrupt(struct adv_softc *adv)
896 {
897 u_int8_t host_flag;
898 u_int8_t risc_flag;
899 int loop;
900
901 loop = 0;
902 do {
903 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
904 if (loop++ > 0x7FFF) {
905 break;
906 }
907 } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
908
909 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
910 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
911 host_flag | ADV_HOST_FLAG_ACK_INT);
912
913 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
914 loop = 0;
915 while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
916 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
917 if (loop++ > 3) {
918 break;
919 }
920 }
921
922 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
923 }
924
925 /*
926 * Handle all conditions that may halt the chip waiting
927 * for us to intervene.
928 */
929 void
930 adv_isr_chip_halted(struct adv_softc *adv)
931 {
932 u_int16_t int_halt_code;
933 u_int16_t halt_q_addr;
934 target_bit_vector target_mask;
935 target_bit_vector scsi_busy;
936 u_int8_t halt_qp;
937 u_int8_t target_ix;
938 u_int8_t q_cntl;
939 u_int8_t tid_no;
940
941 int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
942 halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
943 halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
944 target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
945 q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
946 tid_no = ADV_TIX_TO_TID(target_ix);
947 target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
948 if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
949 /*
950 * Temporarily disable the async fix by removing
951 * this target from the list of affected targets,
952 * setting our async rate, and then putting us
953 * back into the mask.
954 */
955 adv->fix_asyn_xfer &= ~target_mask;
956 adv_set_syncrate(adv, /*struct cam_path */NULL,
957 tid_no, /*period*/0, /*offset*/0,
958 ADV_TRANS_ACTIVE);
959 adv->fix_asyn_xfer |= target_mask;
960 } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
961 adv_set_syncrate(adv, /*struct cam_path */NULL,
962 tid_no, /*period*/0, /*offset*/0,
963 ADV_TRANS_ACTIVE);
964 } else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
965 adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
966 target_mask, tid_no);
967 } else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
968 struct adv_target_transinfo* tinfo;
969 union ccb *ccb;
970 u_int32_t cinfo_index;
971 u_int8_t tag_code;
972 u_int8_t q_status;
973
974 tinfo = &adv->tinfo[tid_no];
975 q_cntl |= QC_REQ_SENSE;
976
977 /* Renegotiate if appropriate. */
978 adv_set_syncrate(adv, /*struct cam_path */NULL,
979 tid_no, /*period*/0, /*offset*/0,
980 ADV_TRANS_CUR);
981 if (tinfo->current.period != tinfo->goal.period) {
982 adv_msgout_sdtr(adv, tinfo->goal.period,
983 tinfo->goal.offset);
984 q_cntl |= QC_MSG_OUT;
985 }
986 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
987
988 /* Don't tag request sense commands */
989 tag_code = adv_read_lram_8(adv,
990 halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
991 tag_code &=
992 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
993
994 if ((adv->fix_asyn_xfer & target_mask) != 0
995 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
996 tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
997 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
998 }
999 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
1000 tag_code);
1001 q_status = adv_read_lram_8(adv,
1002 halt_q_addr + ADV_SCSIQ_B_STATUS);
1003 q_status |= (QS_READY | QS_BUSY);
1004 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1005 q_status);
1006 /*
1007 * Freeze the devq until we can handle the sense condition.
1008 */
1009 cinfo_index =
1010 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1011 ccb = adv->ccb_infos[cinfo_index].ccb;
1012 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1013 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1014 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1015 /*ccb*/NULL, CAM_REQUEUE_REQ,
1016 /*queued_only*/TRUE);
1017 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1018 scsi_busy &= ~target_mask;
1019 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1020 /*
1021 * Ensure we have enough time to actually
1022 * retrieve the sense.
1023 */
1024 untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1025 ccb->ccb_h.timeout_ch =
1026 timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1027 } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1028 struct ext_msg out_msg;
1029
1030 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1031 (u_int16_t *) &out_msg,
1032 sizeof(out_msg)/2);
1033
1034 if ((out_msg.msg_type == MSG_EXTENDED)
1035 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1036 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1037
1038 /* Revert to Async */
1039 adv_set_syncrate(adv, /*struct cam_path */NULL,
1040 tid_no, /*period*/0, /*offset*/0,
1041 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1042 }
1043 q_cntl &= ~QC_MSG_OUT;
1044 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1045 } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1046 u_int8_t scsi_status;
1047 union ccb *ccb;
1048 u_int32_t cinfo_index;
1049
1050 scsi_status = adv_read_lram_8(adv, halt_q_addr
1051 + ADV_SCSIQ_SCSI_STATUS);
1052 cinfo_index =
1053 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1054 ccb = adv->ccb_infos[cinfo_index].ccb;
1055 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1056 ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1057 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1058 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1059 /*ccb*/NULL, CAM_REQUEUE_REQ,
1060 /*queued_only*/TRUE);
1061 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1062 scsi_busy &= ~target_mask;
1063 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1064 } else {
1065 printf("Unhandled Halt Code %x\n", int_halt_code);
1066 }
1067 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1068 }
1069
1070 void
1071 adv_sdtr_to_period_offset(struct adv_softc *adv,
1072 u_int8_t sync_data, u_int8_t *period,
1073 u_int8_t *offset, int tid)
1074 {
1075 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1076 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1077 *period = *offset = 0;
1078 } else {
1079 *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1080 *offset = sync_data & 0xF;
1081 }
1082 }
1083
1084 void
1085 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1086 u_int tid, u_int period, u_int offset, u_int type)
1087 {
1088 struct adv_target_transinfo* tinfo;
1089 u_int old_period;
1090 u_int old_offset;
1091 u_int8_t sdtr_data;
1092
1093 tinfo = &adv->tinfo[tid];
1094
1095 /* Filter our input */
1096 sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1097 &offset, tid);
1098
1099 old_period = tinfo->current.period;
1100 old_offset = tinfo->current.offset;
1101
1102 if ((type & ADV_TRANS_CUR) != 0
1103 && ((old_period != period || old_offset != offset)
1104 || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1105 int s;
1106 int halted;
1107
1108 s = splcam();
1109 halted = adv_is_chip_halted(adv);
1110 if (halted == 0)
1111 /* Must halt the chip first */
1112 adv_host_req_chip_halt(adv);
1113
1114 /* Update current hardware settings */
1115 adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1116
1117 /*
1118 * If a target can run in sync mode, we don't need
1119 * to check it for sync problems.
1120 */
1121 if (offset != 0)
1122 adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1123
1124 if (halted == 0)
1125 /* Start the chip again */
1126 adv_start_chip(adv);
1127
1128 splx(s);
1129 tinfo->current.period = period;
1130 tinfo->current.offset = offset;
1131
1132 if (path != NULL) {
1133 /*
1134 * Tell the SCSI layer about the
1135 * new transfer parameters.
1136 */
1137 struct ccb_trans_settings neg;
1138 memset(&neg, 0, sizeof (neg));
1139 struct ccb_trans_settings_spi *spi =
1140 &neg.xport_specific.spi;
1141
1142 neg.protocol = PROTO_SCSI;
1143 neg.protocol_version = SCSI_REV_2;
1144 neg.transport = XPORT_SPI;
1145 neg.transport_version = 2;
1146
1147 spi->sync_offset = offset;
1148 spi->sync_period = period;
1149 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1150 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1151 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1152 xpt_async(AC_TRANSFER_NEG, path, &neg);
1153 }
1154 }
1155
1156 if ((type & ADV_TRANS_GOAL) != 0) {
1157 tinfo->goal.period = period;
1158 tinfo->goal.offset = offset;
1159 }
1160
1161 if ((type & ADV_TRANS_USER) != 0) {
1162 tinfo->user.period = period;
1163 tinfo->user.offset = offset;
1164 }
1165 }
1166
1167 u_int8_t
1168 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1169 u_int *offset, int tid)
1170 {
1171 u_int i;
1172 u_int dummy_offset;
1173 u_int dummy_period;
1174
1175 if (offset == NULL) {
1176 dummy_offset = 0;
1177 offset = &dummy_offset;
1178 }
1179
1180 if (period == NULL) {
1181 dummy_period = 0;
1182 period = &dummy_period;
1183 }
1184
1185 *offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1186 if (*period != 0 && *offset != 0) {
1187 for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1188 if (*period <= adv->sdtr_period_tbl[i]) {
1189 /*
1190 * When responding to a target that requests
1191 * sync, the requested rate may fall between
1192 * two rates that we can output, but still be
1193 * a rate that we can receive. Because of this,
1194 * we want to respond to the target with
1195 * the same rate that it sent to us even
1196 * if the period we use to send data to it
1197 * is lower. Only lower the response period
1198 * if we must.
1199 */
1200 if (i == 0 /* Our maximum rate */)
1201 *period = adv->sdtr_period_tbl[0];
1202 return ((i << 4) | *offset);
1203 }
1204 }
1205 }
1206
1207 /* Must go async */
1208 *period = 0;
1209 *offset = 0;
1210 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1211 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1212 return (0);
1213 }
1214
1215 /* Internal Routines */
1216
1217 static void
1218 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1219 u_int16_t *buffer, int count)
1220 {
1221 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1222 ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1223 }
1224
1225 static void
1226 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1227 u_int16_t *buffer, int count)
1228 {
1229 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1230 ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1231 }
1232
1233 static void
1234 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1235 u_int16_t set_value, int count)
1236 {
1237 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1238 bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1239 set_value, count);
1240 }
1241
1242 static u_int32_t
1243 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1244 {
1245 u_int32_t sum;
1246 int i;
1247
1248 sum = 0;
1249 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1250 for (i = 0; i < count; i++)
1251 sum += ADV_INW(adv, ADV_LRAM_DATA);
1252 return (sum);
1253 }
1254
1255 static int
1256 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1257 u_int16_t value)
1258 {
1259 int retval;
1260
1261 retval = 0;
1262 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1263 ADV_OUTW(adv, ADV_LRAM_DATA, value);
1264 DELAY(10000);
1265 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1266 if (value != ADV_INW(adv, ADV_LRAM_DATA))
1267 retval = 1;
1268 return (retval);
1269 }
1270
1271 static u_int32_t
1272 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1273 {
1274 u_int16_t val_low, val_high;
1275
1276 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1277
1278 #if BYTE_ORDER == BIG_ENDIAN
1279 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1280 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1281 #else
1282 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1283 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1284 #endif
1285
1286 return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1287 }
1288
1289 static void
1290 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1291 {
1292 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1293
1294 #if BYTE_ORDER == BIG_ENDIAN
1295 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1296 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1297 #else
1298 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1299 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1300 #endif
1301 }
1302
1303 static void
1304 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1305 u_int32_t *buffer, int count)
1306 {
1307 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1308 ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1309 }
1310
1311 static u_int16_t
1312 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1313 {
1314 u_int16_t read_wval;
1315 u_int8_t cmd_reg;
1316
1317 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1318 DELAY(1000);
1319 cmd_reg = addr | ADV_EEPROM_CMD_READ;
1320 adv_write_eeprom_cmd_reg(adv, cmd_reg);
1321 DELAY(1000);
1322 read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1323 DELAY(1000);
1324 return (read_wval);
1325 }
1326
1327 static u_int16_t
1328 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1329 {
1330 u_int16_t read_value;
1331
1332 read_value = adv_read_eeprom_16(adv, addr);
1333 if (read_value != value) {
1334 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1335 DELAY(1000);
1336
1337 ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1338 DELAY(1000);
1339
1340 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1341 DELAY(20 * 1000);
1342
1343 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1344 DELAY(1000);
1345 read_value = adv_read_eeprom_16(adv, addr);
1346 }
1347 return (read_value);
1348 }
1349
1350 static int
1351 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1352 {
1353 u_int8_t read_back;
1354 int retry;
1355
1356 retry = 0;
1357 while (1) {
1358 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1359 DELAY(1000);
1360 read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1361 if (read_back == cmd_reg) {
1362 return (1);
1363 }
1364 if (retry++ > ADV_EEPROM_MAX_RETRY) {
1365 return (0);
1366 }
1367 }
1368 }
1369
1370 static int
1371 adv_set_eeprom_config_once(struct adv_softc *adv,
1372 struct adv_eeprom_config *eeprom_config)
1373 {
1374 int n_error;
1375 u_int16_t *wbuf;
1376 u_int16_t sum;
1377 u_int8_t s_addr;
1378 u_int8_t cfg_beg;
1379 u_int8_t cfg_end;
1380
1381 wbuf = (u_int16_t *)eeprom_config;
1382 n_error = 0;
1383 sum = 0;
1384 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1385 sum += *wbuf;
1386 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1387 n_error++;
1388 }
1389 }
1390 if (adv->type & ADV_VL) {
1391 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1392 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1393 } else {
1394 cfg_beg = ADV_EEPROM_CFG_BEG;
1395 cfg_end = ADV_EEPROM_MAX_ADDR;
1396 }
1397
1398 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1399 sum += *wbuf;
1400 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1401 n_error++;
1402 }
1403 }
1404 *wbuf = sum;
1405 if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1406 n_error++;
1407 }
1408 wbuf = (u_int16_t *)eeprom_config;
1409 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1410 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1411 n_error++;
1412 }
1413 }
1414 for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1415 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1416 n_error++;
1417 }
1418 }
1419 return (n_error);
1420 }
1421
1422 static u_int32_t
1423 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1424 u_int16_t *mcode_buf, u_int16_t mcode_size)
1425 {
1426 u_int32_t chksum;
1427 u_int16_t mcode_lram_size;
1428 u_int16_t mcode_chksum;
1429
1430 mcode_lram_size = mcode_size >> 1;
1431 /* XXX Why zero the memory just before you write the whole thing?? */
1432 adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1433 adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1434
1435 chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1436 mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1437 ((mcode_size - s_addr
1438 - ADV_CODE_SEC_BEG) >> 1));
1439 adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1440 adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1441 return (chksum);
1442 }
1443
1444 static void
1445 adv_reinit_lram(struct adv_softc *adv) {
1446 adv_init_lram(adv);
1447 adv_init_qlink_var(adv);
1448 }
1449
1450 static void
1451 adv_init_lram(struct adv_softc *adv)
1452 {
1453 u_int8_t i;
1454 u_int16_t s_addr;
1455
1456 adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1457 (((adv->max_openings + 2 + 1) * 64) >> 1));
1458
1459 i = ADV_MIN_ACTIVE_QNO;
1460 s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1461
1462 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1463 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1464 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1465 i++;
1466 s_addr += ADV_QBLK_SIZE;
1467 for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1468 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1469 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1470 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1471 }
1472
1473 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1474 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1475 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1476 i++;
1477 s_addr += ADV_QBLK_SIZE;
1478
1479 for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1480 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1481 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1482 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1483 }
1484 }
1485
1486 static int
1487 adv_init_microcode_var(struct adv_softc *adv)
1488 {
1489 int i;
1490
1491 for (i = 0; i <= ADV_MAX_TID; i++) {
1492
1493 /* Start out async all around */
1494 adv_set_syncrate(adv, /*path*/NULL,
1495 i, 0, 0,
1496 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1497 }
1498
1499 adv_init_qlink_var(adv);
1500
1501 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1502 adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1503
1504 adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1505
1506 adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1507
1508 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1509 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1510 printf("adv%d: Unable to set program counter. Aborting.\n",
1511 adv->unit);
1512 return (1);
1513 }
1514 return (0);
1515 }
1516
1517 static void
1518 adv_init_qlink_var(struct adv_softc *adv)
1519 {
1520 int i;
1521 u_int16_t lram_addr;
1522
1523 adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1524 adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1525
1526 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1527 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1528
1529 adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1530 (u_int8_t)((int) adv->max_openings + 1));
1531 adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1532 (u_int8_t)((int) adv->max_openings + 2));
1533
1534 adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1535
1536 adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1537 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1538 adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1539 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1540 adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1541 adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1542
1543 lram_addr = ADV_QADR_BEG;
1544 for (i = 0; i < 32; i++, lram_addr += 2)
1545 adv_write_lram_16(adv, lram_addr, 0);
1546 }
1547
1548 static void
1549 adv_disable_interrupt(struct adv_softc *adv)
1550 {
1551 u_int16_t cfg;
1552
1553 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1554 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1555 }
1556
1557 static void
1558 adv_enable_interrupt(struct adv_softc *adv)
1559 {
1560 u_int16_t cfg;
1561
1562 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1563 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1564 }
1565
1566 static void
1567 adv_toggle_irq_act(struct adv_softc *adv)
1568 {
1569 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1570 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1571 }
1572
1573 void
1574 adv_start_execution(struct adv_softc *adv)
1575 {
1576 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1577 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1578 }
1579 }
1580
1581 int
1582 adv_stop_chip(struct adv_softc *adv)
1583 {
1584 u_int8_t cc_val;
1585
1586 cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1587 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1588 ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1589 adv_set_chip_ih(adv, ADV_INS_HALT);
1590 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1591 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1592 return (0);
1593 }
1594 return (1);
1595 }
1596
1597 static int
1598 adv_host_req_chip_halt(struct adv_softc *adv)
1599 {
1600 int count;
1601 u_int8_t saved_stop_code;
1602
1603 if (adv_is_chip_halted(adv))
1604 return (1);
1605
1606 count = 0;
1607 saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1608 adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1609 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1610 while (adv_is_chip_halted(adv) == 0
1611 && count++ < 2000)
1612 ;
1613
1614 adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1615 return (count < 2000);
1616 }
1617
1618 static void
1619 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1620 {
1621 adv_set_bank(adv, 1);
1622 ADV_OUTW(adv, ADV_REG_IH, ins_code);
1623 adv_set_bank(adv, 0);
1624 }
1625
1626 #if 0
1627 static u_int8_t
1628 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1629 {
1630 u_int8_t scsi_ctrl;
1631
1632 adv_set_bank(adv, 1);
1633 scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1634 adv_set_bank(adv, 0);
1635 return (scsi_ctrl);
1636 }
1637 #endif
1638
1639 /*
1640 * XXX Looks like more padding issues in this routine as well.
1641 * There has to be a way to turn this into an insw.
1642 */
1643 static void
1644 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1645 u_int16_t *inbuf, int words)
1646 {
1647 int i;
1648
1649 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1650 for (i = 0; i < words; i++, inbuf++) {
1651 if (i == 5) {
1652 continue;
1653 }
1654 *inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1655 }
1656 }
1657
1658 static u_int
1659 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1660 {
1661 u_int cur_used_qs;
1662 u_int cur_free_qs;
1663
1664 cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1665
1666 if ((cur_used_qs + n_qs) <= adv->max_openings) {
1667 cur_free_qs = adv->max_openings - cur_used_qs;
1668 return (cur_free_qs);
1669 }
1670 adv->openings_needed = n_qs;
1671 return (0);
1672 }
1673
1674 static u_int8_t
1675 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1676 u_int8_t n_free_q)
1677 {
1678 int i;
1679
1680 for (i = 0; i < n_free_q; i++) {
1681 free_q_head = adv_alloc_free_queue(adv, free_q_head);
1682 if (free_q_head == ADV_QLINK_END)
1683 break;
1684 }
1685 return (free_q_head);
1686 }
1687
1688 static u_int8_t
1689 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1690 {
1691 u_int16_t q_addr;
1692 u_int8_t next_qp;
1693 u_int8_t q_status;
1694
1695 next_qp = ADV_QLINK_END;
1696 q_addr = ADV_QNO_TO_QADDR(free_q_head);
1697 q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS);
1698
1699 if ((q_status & QS_READY) == 0)
1700 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1701
1702 return (next_qp);
1703 }
1704
1705 static int
1706 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1707 u_int8_t n_q_required)
1708 {
1709 u_int8_t free_q_head;
1710 u_int8_t next_qp;
1711 u_int8_t tid_no;
1712 u_int8_t target_ix;
1713 int retval;
1714
1715 retval = 1;
1716 target_ix = scsiq->q2.target_ix;
1717 tid_no = ADV_TIX_TO_TID(target_ix);
1718 free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1719 if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1720 != ADV_QLINK_END) {
1721 scsiq->q1.q_no = free_q_head;
1722
1723 /*
1724 * Now that we know our Q number, point our sense
1725 * buffer pointer to a bus dma mapped area where
1726 * we can dma the data to.
1727 */
1728 scsiq->q1.sense_addr = adv->sense_physbase
1729 + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1730 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1731 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1732 adv->cur_active += n_q_required;
1733 retval = 0;
1734 }
1735 return (retval);
1736 }
1737
1738
1739 static void
1740 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1741 u_int q_no)
1742 {
1743 u_int8_t sg_list_dwords;
1744 u_int8_t sg_index, i;
1745 u_int8_t sg_entry_cnt;
1746 u_int8_t next_qp;
1747 u_int16_t q_addr;
1748 struct adv_sg_head *sg_head;
1749 struct adv_sg_list_q scsi_sg_q;
1750
1751 sg_head = scsiq->sg_head;
1752
1753 if (sg_head) {
1754 sg_entry_cnt = sg_head->entry_cnt - 1;
1755 #ifdef DIAGNOSTIC
1756 if (sg_entry_cnt == 0)
1757 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1758 "a SG list but only one element");
1759 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1760 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1761 "a SG list but QC_SG_HEAD not set");
1762 #endif
1763 q_addr = ADV_QNO_TO_QADDR(q_no);
1764 sg_index = 1;
1765 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1766 scsi_sg_q.sg_head_qp = q_no;
1767 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1768 for (i = 0; i < sg_head->queue_cnt; i++) {
1769 u_int8_t segs_this_q;
1770
1771 if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1772 segs_this_q = ADV_SG_LIST_PER_Q;
1773 else {
1774 /* This will be the last segment then */
1775 segs_this_q = sg_entry_cnt;
1776 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1777 }
1778 scsi_sg_q.seq_no = i + 1;
1779 sg_list_dwords = segs_this_q << 1;
1780 if (i == 0) {
1781 scsi_sg_q.sg_list_cnt = segs_this_q;
1782 scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1783 } else {
1784 scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1785 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1786 }
1787 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1788 scsi_sg_q.q_no = next_qp;
1789 q_addr = ADV_QNO_TO_QADDR(next_qp);
1790
1791 adv_write_lram_16_multi(adv,
1792 q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1793 (u_int16_t *)&scsi_sg_q,
1794 sizeof(scsi_sg_q) >> 1);
1795 adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1796 (u_int32_t *)&sg_head->sg_list[sg_index],
1797 sg_list_dwords);
1798 sg_entry_cnt -= segs_this_q;
1799 sg_index += ADV_SG_LIST_PER_Q;
1800 }
1801 }
1802 adv_put_ready_queue(adv, scsiq, q_no);
1803 }
1804
1805 static void
1806 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1807 u_int q_no)
1808 {
1809 struct adv_target_transinfo* tinfo;
1810 u_int q_addr;
1811 u_int tid_no;
1812
1813 tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1814 tinfo = &adv->tinfo[tid_no];
1815 if ((tinfo->current.period != tinfo->goal.period)
1816 || (tinfo->current.offset != tinfo->goal.offset)) {
1817
1818 adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1819 scsiq->q1.cntl |= QC_MSG_OUT;
1820 }
1821 q_addr = ADV_QNO_TO_QADDR(q_no);
1822
1823 scsiq->q1.status = QS_FREE;
1824
1825 adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1826 (u_int16_t *)scsiq->cdbptr,
1827 scsiq->q2.cdb_len >> 1);
1828
1829 #if BYTE_ORDER == BIG_ENDIAN
1830 adv_adj_scsiq_endian(scsiq);
1831 #endif
1832
1833 adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1834 (u_int16_t *) &scsiq->q1.cntl,
1835 ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1836
1837 #ifdef CC_WRITE_IO_COUNT
1838 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1839 adv->req_count);
1840 #endif
1841
1842 #ifdef CC_CLEAR_DMA_REMAIN
1843
1844 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1845 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1846 #endif
1847
1848 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1849 (scsiq->q1.q_no << 8) | QS_READY);
1850 }
1851
1852 static void
1853 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1854 u_int16_t *buffer, int words)
1855 {
1856 int i;
1857
1858 /*
1859 * XXX This routine makes *gross* assumptions
1860 * about padding in the data structures.
1861 * Either the data structures should have explicit
1862 * padding members added, or they should have padding
1863 * turned off via compiler attributes depending on
1864 * which yields better overall performance. My hunch
1865 * would be that turning off padding would be the
1866 * faster approach as an outsw is much faster than
1867 * this crude loop and accessing un-aligned data
1868 * members isn't *that* expensive. The other choice
1869 * would be to modify the ASC script so that the
1870 * the adv_scsiq_1 structure can be re-arranged so
1871 * padding isn't required.
1872 */
1873 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1874 for (i = 0; i < words; i++, buffer++) {
1875 if (i == 2 || i == 10) {
1876 continue;
1877 }
1878 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1879 }
1880 }
1881
1882 #if BYTE_ORDER == BIG_ENDIAN
1883 void
1884 adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
1885 {
1886
1887 panic("adv(4) not supported on big-endian machines.\n");
1888 }
1889
1890 void
1891 adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
1892 {
1893
1894 panic("adv(4) not supported on big-endian machines.\n");
1895 }
1896 #endif
1897
1898 static void
1899 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1900 u_int8_t q_cntl, target_bit_vector target_mask,
1901 int tid_no)
1902 {
1903 struct ext_msg ext_msg;
1904
1905 adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1906 sizeof(ext_msg) >> 1);
1907 if ((ext_msg.msg_type == MSG_EXTENDED)
1908 && (ext_msg.msg_req == MSG_EXT_SDTR)
1909 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1910 union ccb *ccb;
1911 struct adv_target_transinfo* tinfo;
1912 u_int32_t cinfo_index;
1913 u_int period;
1914 u_int offset;
1915 int sdtr_accept;
1916 u_int8_t orig_offset;
1917
1918 cinfo_index =
1919 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1920 ccb = adv->ccb_infos[cinfo_index].ccb;
1921 tinfo = &adv->tinfo[tid_no];
1922 sdtr_accept = TRUE;
1923
1924 orig_offset = ext_msg.req_ack_offset;
1925 if (ext_msg.xfer_period < tinfo->goal.period) {
1926 sdtr_accept = FALSE;
1927 ext_msg.xfer_period = tinfo->goal.period;
1928 }
1929
1930 /* Perform range checking */
1931 period = ext_msg.xfer_period;
1932 offset = ext_msg.req_ack_offset;
1933 adv_period_offset_to_sdtr(adv, &period, &offset, tid_no);
1934 ext_msg.xfer_period = period;
1935 ext_msg.req_ack_offset = offset;
1936
1937 /* Record our current sync settings */
1938 adv_set_syncrate(adv, ccb->ccb_h.path,
1939 tid_no, ext_msg.xfer_period,
1940 ext_msg.req_ack_offset,
1941 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1942
1943 /* Offset too high or large period forced async */
1944 if (orig_offset != ext_msg.req_ack_offset)
1945 sdtr_accept = FALSE;
1946
1947 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1948 /* Valid response to our requested negotiation */
1949 q_cntl &= ~QC_MSG_OUT;
1950 } else {
1951 /* Must Respond */
1952 q_cntl |= QC_MSG_OUT;
1953 adv_msgout_sdtr(adv, ext_msg.xfer_period,
1954 ext_msg.req_ack_offset);
1955 }
1956
1957 } else if (ext_msg.msg_type == MSG_EXTENDED
1958 && ext_msg.msg_req == MSG_EXT_WDTR
1959 && ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1960
1961 ext_msg.wdtr_width = 0;
1962 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1963 (u_int16_t *)&ext_msg,
1964 sizeof(ext_msg) >> 1);
1965 q_cntl |= QC_MSG_OUT;
1966 } else {
1967
1968 ext_msg.msg_type = MSG_MESSAGE_REJECT;
1969 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1970 (u_int16_t *)&ext_msg,
1971 sizeof(ext_msg) >> 1);
1972 q_cntl |= QC_MSG_OUT;
1973 }
1974 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1975 }
1976
1977 static void
1978 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1979 u_int8_t sdtr_offset)
1980 {
1981 struct ext_msg sdtr_buf;
1982
1983 sdtr_buf.msg_type = MSG_EXTENDED;
1984 sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1985 sdtr_buf.msg_req = MSG_EXT_SDTR;
1986 sdtr_buf.xfer_period = sdtr_period;
1987 sdtr_offset &= ADV_SYN_MAX_OFFSET;
1988 sdtr_buf.req_ack_offset = sdtr_offset;
1989 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1990 (u_int16_t *) &sdtr_buf,
1991 sizeof(sdtr_buf) / 2);
1992 }
1993
1994 int
1995 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1996 u_int32_t status, int queued_only)
1997 {
1998 u_int16_t q_addr;
1999 u_int8_t q_no;
2000 struct adv_q_done_info scsiq_buf;
2001 struct adv_q_done_info *scsiq;
2002 u_int8_t target_ix;
2003 int count;
2004
2005 scsiq = &scsiq_buf;
2006 target_ix = ADV_TIDLUN_TO_IX(target, lun);
2007 count = 0;
2008 for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
2009 struct adv_ccb_info *ccb_info;
2010 q_addr = ADV_QNO_TO_QADDR(q_no);
2011
2012 adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
2013 ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
2014 if (((scsiq->q_status & QS_READY) != 0)
2015 && ((scsiq->q_status & QS_ABORTED) == 0)
2016 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2017 && (scsiq->d2.target_ix == target_ix)
2018 && (queued_only == 0
2019 || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2020 && (ccb == NULL || (ccb == ccb_info->ccb))) {
2021 union ccb *aborted_ccb;
2022 struct adv_ccb_info *cinfo;
2023
2024 scsiq->q_status |= QS_ABORTED;
2025 adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2026 scsiq->q_status);
2027 aborted_ccb = ccb_info->ccb;
2028 /* Don't clobber earlier error codes */
2029 if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2030 == CAM_REQ_INPROG)
2031 aborted_ccb->ccb_h.status |= status;
2032 cinfo = (struct adv_ccb_info *)
2033 aborted_ccb->ccb_h.ccb_cinfo_ptr;
2034 cinfo->state |= ACCB_ABORT_QUEUED;
2035 count++;
2036 }
2037 }
2038 return (count);
2039 }
2040
2041 int
2042 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2043 {
2044 int count;
2045 int i;
2046 union ccb *ccb;
2047
2048 i = 200;
2049 while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2050 && i--)
2051 DELAY(1000);
2052 adv_reset_chip(adv, initiate_bus_reset);
2053 adv_reinit_lram(adv);
2054 for (i = 0; i <= ADV_MAX_TID; i++)
2055 adv_set_syncrate(adv, NULL, i, /*period*/0,
2056 /*offset*/0, ADV_TRANS_CUR);
2057 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2058
2059 /* Tell the XPT layer that a bus reset occured */
2060 if (adv->path != NULL)
2061 xpt_async(AC_BUS_RESET, adv->path, NULL);
2062
2063 count = 0;
2064 while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2065 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2066 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2067 adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2068 count++;
2069 }
2070
2071 adv_start_chip(adv);
2072 return (count);
2073 }
2074
2075 static void
2076 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2077 {
2078 int orig_id;
2079
2080 adv_set_bank(adv, 1);
2081 orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2082 ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2083 if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2084 adv_set_bank(adv, 0);
2085 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2086 }
2087 adv_set_bank(adv, 1);
2088 ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2089 adv_set_bank(adv, 0);
2090 }
Cache object: 123199409c4d37784167708dd0fb3a58
|