1 /*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33 /*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1996 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/systm.h>
49
50 #include <machine/bus_pio.h>
51 #include <machine/bus.h>
52 #include <machine/clock.h>
53
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_xpt_sim.h>
58
59 #include <cam/scsi/scsi_all.h>
60 #include <cam/scsi/scsi_message.h>
61 #include <cam/scsi/scsi_da.h>
62 #include <cam/scsi/scsi_cd.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/pmap.h>
67
68 #include <dev/advansys/advansys.h>
69 #include <dev/advansys/advmcode.h>
70
71 struct adv_quirk_entry {
72 struct scsi_inquiry_pattern inq_pat;
73 u_int8_t quirks;
74 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS 0x01
75 #define ADV_QUIRK_FIX_ASYN_XFER 0x02
76 };
77
78 static struct adv_quirk_entry adv_quirk_table[] =
79 {
80 {
81 { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
82 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
83 },
84 {
85 { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
86 0
87 },
88 {
89 {
90 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
91 "TANDBERG", " TDC 36", "*"
92 },
93 0
94 },
95 {
96 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
97 0
98 },
99 {
100 {
101 T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
102 "*", "*", "*"
103 },
104 0
105 },
106 {
107 {
108 T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
109 "*", "*", "*"
110 },
111 0
112 },
113 {
114 /* Default quirk entry */
115 {
116 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
117 /*vendor*/"*", /*product*/"*", /*revision*/"*"
118 },
119 ADV_QUIRK_FIX_ASYN_XFER,
120 }
121 };
122
123 /*
124 * Allowable periods in ns
125 */
126 u_int8_t adv_sdtr_period_tbl[] =
127 {
128 25,
129 30,
130 35,
131 40,
132 50,
133 60,
134 70,
135 85
136 };
137
138 u_int8_t adv_sdtr_period_tbl_ultra[] =
139 {
140 12,
141 19,
142 25,
143 32,
144 38,
145 44,
146 50,
147 57,
148 63,
149 69,
150 75,
151 82,
152 88,
153 94,
154 100,
155 107
156 };
157
158 struct ext_msg {
159 u_int8_t msg_type;
160 u_int8_t msg_len;
161 u_int8_t msg_req;
162 union {
163 struct {
164 u_int8_t sdtr_xfer_period;
165 u_int8_t sdtr_req_ack_offset;
166 } sdtr;
167 struct {
168 u_int8_t wdtr_width;
169 } wdtr;
170 struct {
171 u_int8_t mdp[4];
172 } mdp;
173 } u_ext_msg;
174 u_int8_t res;
175 };
176
177 #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
178 #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
179 #define wdtr_width u_ext_msg.wdtr.wdtr_width
180 #define mdp_b3 u_ext_msg.mdp_b3
181 #define mdp_b2 u_ext_msg.mdp_b2
182 #define mdp_b1 u_ext_msg.mdp_b1
183 #define mdp_b0 u_ext_msg.mdp_b0
184
185 /*
186 * Some of the early PCI adapters have problems with
187 * async transfers. Instead use an offset of 1.
188 */
189 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
190
191 /* LRAM routines */
192 static void adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
193 u_int16_t *buffer, int count);
194 static void adv_write_lram_16_multi(struct adv_softc *adv,
195 u_int16_t s_addr, u_int16_t *buffer,
196 int count);
197 static void adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
198 u_int16_t set_value, int count);
199 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200 int count);
201
202 static int adv_write_and_verify_lram_16(struct adv_softc *adv,
203 u_int16_t addr, u_int16_t value);
204 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
205
206
207 static void adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
208 u_int32_t value);
209 static void adv_write_lram_32_multi(struct adv_softc *adv,
210 u_int16_t s_addr, u_int32_t *buffer,
211 int count);
212
213 /* EEPROM routines */
214 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
215 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
216 u_int16_t value);
217 static int adv_write_eeprom_cmd_reg(struct adv_softc *adv,
218 u_int8_t cmd_reg);
219 static int adv_set_eeprom_config_once(struct adv_softc *adv,
220 struct adv_eeprom_config *eeconfig);
221
222 /* Initialization */
223 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
224 u_int16_t *mcode_buf, u_int16_t mcode_size);
225
226 static void adv_reinit_lram(struct adv_softc *adv);
227 static void adv_init_lram(struct adv_softc *adv);
228 static int adv_init_microcode_var(struct adv_softc *adv);
229 static void adv_init_qlink_var(struct adv_softc *adv);
230
231 /* Interrupts */
232 static void adv_disable_interrupt(struct adv_softc *adv);
233 static void adv_enable_interrupt(struct adv_softc *adv);
234 static void adv_toggle_irq_act(struct adv_softc *adv);
235
236 /* Chip Control */
237 static int adv_stop_chip(struct adv_softc *adv);
238 static int adv_host_req_chip_halt(struct adv_softc *adv);
239 static void adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
240 #if UNUSED
241 static u_int8_t adv_get_chip_scsi_ctrl(struct adv_softc *adv);
242 #endif
243
244 /* Queue handling and execution */
245 static __inline int
246 adv_sgcount_to_qcount(int sgcount);
247
248 static __inline int
249 adv_sgcount_to_qcount(int sgcount)
250 {
251 int n_sg_list_qs;
252
253 n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
254 if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
255 n_sg_list_qs++;
256 return (n_sg_list_qs + 1);
257 }
258
259 static void adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
260 u_int16_t *inbuf, int words);
261 static u_int adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
262 static u_int8_t adv_alloc_free_queues(struct adv_softc *adv,
263 u_int8_t free_q_head, u_int8_t n_free_q);
264 static u_int8_t adv_alloc_free_queue(struct adv_softc *adv,
265 u_int8_t free_q_head);
266 static int adv_send_scsi_queue(struct adv_softc *adv,
267 struct adv_scsi_q *scsiq,
268 u_int8_t n_q_required);
269 static void adv_put_ready_sg_list_queue(struct adv_softc *adv,
270 struct adv_scsi_q *scsiq,
271 u_int q_no);
272 static void adv_put_ready_queue(struct adv_softc *adv,
273 struct adv_scsi_q *scsiq, u_int q_no);
274 static void adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
275 u_int16_t *buffer, int words);
276
277 /* Messages */
278 static void adv_handle_extmsg_in(struct adv_softc *adv,
279 u_int16_t halt_q_addr, u_int8_t q_cntl,
280 target_bit_vector target_id,
281 int tid);
282 static void adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
283 u_int8_t sdtr_offset);
284 static void adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
285 u_int8_t sdtr_data);
286
287
288 /* Exported functions first */
289
290 void
291 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
292 {
293 struct adv_softc *adv;
294
295 adv = (struct adv_softc *)callback_arg;
296 switch (code) {
297 case AC_FOUND_DEVICE:
298 {
299 struct ccb_getdev *cgd;
300 target_bit_vector target_mask;
301 int num_entries;
302 caddr_t match;
303 struct adv_quirk_entry *entry;
304 struct adv_target_transinfo* tinfo;
305
306 cgd = (struct ccb_getdev *)arg;
307
308 target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
309
310 num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
311 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
312 (caddr_t)adv_quirk_table,
313 num_entries, sizeof(*adv_quirk_table),
314 scsi_inquiry_match);
315
316 if (match == NULL)
317 panic("advasync: device didn't match wildcard entry!!");
318
319 entry = (struct adv_quirk_entry *)match;
320
321 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
322 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
323 adv->fix_asyn_xfer_always |= target_mask;
324 else
325 adv->fix_asyn_xfer_always &= ~target_mask;
326 /*
327 * We start out life with all bits set and clear them
328 * after we've determined that the fix isn't necessary.
329 * It may well be that we've already cleared a target
330 * before the full inquiry session completes, so don't
331 * gratuitously set a target bit even if it has this
332 * quirk. But, if the quirk exonerates a device, clear
333 * the bit now.
334 */
335 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
336 adv->fix_asyn_xfer &= ~target_mask;
337 }
338 /*
339 * Reset our sync settings now that we've determined
340 * what quirks are in effect for the device.
341 */
342 tinfo = &adv->tinfo[cgd->ccb_h.target_id];
343 adv_set_syncrate(adv, cgd->ccb_h.path,
344 cgd->ccb_h.target_id,
345 tinfo->current.period,
346 tinfo->current.offset,
347 ADV_TRANS_CUR);
348 break;
349 }
350 case AC_LOST_DEVICE:
351 {
352 u_int target_mask;
353
354 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
355 target_mask = 0x01 << xpt_path_target_id(path);
356 adv->fix_asyn_xfer |= target_mask;
357 }
358
359 /*
360 * Revert to async transfers
361 * for the next device.
362 */
363 adv_set_syncrate(adv, /*path*/NULL,
364 xpt_path_target_id(path),
365 /*period*/0,
366 /*offset*/0,
367 ADV_TRANS_GOAL|ADV_TRANS_CUR);
368 }
369 default:
370 break;
371 }
372 }
373
374 void
375 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
376 {
377 u_int8_t control;
378
379 /*
380 * Start out with the bank reset to 0
381 */
382 control = ADV_INB(adv, ADV_CHIP_CTRL)
383 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
384 | ADV_CC_DIAG | ADV_CC_SCSI_RESET
385 | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
386 if (bank == 1) {
387 control |= ADV_CC_BANK_ONE;
388 } else if (bank == 2) {
389 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
390 }
391 ADV_OUTB(adv, ADV_CHIP_CTRL, control);
392 }
393
394 u_int8_t
395 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
396 {
397 u_int8_t byte_data;
398 u_int16_t word_data;
399
400 /*
401 * LRAM is accessed on 16bit boundaries.
402 */
403 ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
404 word_data = ADV_INW(adv, ADV_LRAM_DATA);
405 if (addr & 1) {
406 #if BYTE_ORDER == BIG_ENDIAN
407 byte_data = (u_int8_t)(word_data & 0xFF);
408 #else
409 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
410 #endif
411 } else {
412 #if BYTE_ORDER == BIG_ENDIAN
413 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
414 #else
415 byte_data = (u_int8_t)(word_data & 0xFF);
416 #endif
417 }
418 return (byte_data);
419 }
420
421 void
422 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
423 {
424 u_int16_t word_data;
425
426 word_data = adv_read_lram_16(adv, addr & 0xFFFE);
427 if (addr & 1) {
428 word_data &= 0x00FF;
429 word_data |= (((u_int8_t)value << 8) & 0xFF00);
430 } else {
431 word_data &= 0xFF00;
432 word_data |= ((u_int8_t)value & 0x00FF);
433 }
434 adv_write_lram_16(adv, addr & 0xFFFE, word_data);
435 }
436
437
438 u_int16_t
439 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
440 {
441 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
442 return (ADV_INW(adv, ADV_LRAM_DATA));
443 }
444
445 void
446 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
447 {
448 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
449 ADV_OUTW(adv, ADV_LRAM_DATA, value);
450 }
451
452 /*
453 * Determine if there is a board at "iobase" by looking
454 * for the AdvanSys signatures. Return 1 if a board is
455 * found, 0 otherwise.
456 */
457 int
458 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
459 {
460 u_int16_t signature;
461
462 if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
463 signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
464 if ((signature == ADV_1000_ID0W)
465 || (signature == ADV_1000_ID0W_FIX))
466 return (1);
467 }
468 return (0);
469 }
470
471 void
472 adv_lib_init(struct adv_softc *adv)
473 {
474 if ((adv->type & ADV_ULTRA) != 0) {
475 adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
476 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
477 } else {
478 adv->sdtr_period_tbl = adv_sdtr_period_tbl;
479 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
480 }
481 }
482
483 u_int16_t
484 adv_get_eeprom_config(struct adv_softc *adv, struct
485 adv_eeprom_config *eeprom_config)
486 {
487 u_int16_t sum;
488 u_int16_t *wbuf;
489 u_int8_t cfg_beg;
490 u_int8_t cfg_end;
491 u_int8_t s_addr;
492
493 wbuf = (u_int16_t *)eeprom_config;
494 sum = 0;
495
496 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
497 *wbuf = adv_read_eeprom_16(adv, s_addr);
498 sum += *wbuf;
499 }
500
501 if (adv->type & ADV_VL) {
502 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
503 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
504 } else {
505 cfg_beg = ADV_EEPROM_CFG_BEG;
506 cfg_end = ADV_EEPROM_MAX_ADDR;
507 }
508
509 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
510 *wbuf = adv_read_eeprom_16(adv, s_addr);
511 sum += *wbuf;
512 #if ADV_DEBUG_EEPROM
513 printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
514 #endif
515 }
516 *wbuf = adv_read_eeprom_16(adv, s_addr);
517 return (sum);
518 }
519
520 int
521 adv_set_eeprom_config(struct adv_softc *adv,
522 struct adv_eeprom_config *eeprom_config)
523 {
524 int retry;
525
526 retry = 0;
527 while (1) {
528 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
529 break;
530 }
531 if (++retry > ADV_EEPROM_MAX_RETRY) {
532 break;
533 }
534 }
535 return (retry > ADV_EEPROM_MAX_RETRY);
536 }
537
538 int
539 adv_reset_chip_and_scsi_bus(struct adv_softc *adv)
540 {
541 adv_stop_chip(adv);
542 ADV_OUTB(adv, ADV_CHIP_CTRL,
543 ADV_CC_CHIP_RESET | ADV_CC_SCSI_RESET | ADV_CC_HALT);
544 DELAY(200 * 1000);
545
546 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
547 adv_set_chip_ih(adv, ADV_INS_HALT);
548
549 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
550 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
551 DELAY(200 * 1000);
552 return (adv_is_chip_halted(adv));
553 }
554
555 int
556 adv_test_external_lram(struct adv_softc* adv)
557 {
558 u_int16_t q_addr;
559 u_int16_t saved_value;
560 int success;
561
562 success = 0;
563
564 q_addr = ADV_QNO_TO_QADDR(241);
565 saved_value = adv_read_lram_16(adv, q_addr);
566 if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
567 success = 1;
568 adv_write_lram_16(adv, q_addr, saved_value);
569 }
570 return (success);
571 }
572
573
574 int
575 adv_init_lram_and_mcode(struct adv_softc *adv)
576 {
577 u_int32_t retval;
578
579 adv_disable_interrupt(adv);
580
581 adv_init_lram(adv);
582
583 retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
584 adv_mcode_size);
585 if (retval != adv_mcode_chksum) {
586 printf("adv%d: Microcode download failed checksum!\n",
587 adv->unit);
588 return (1);
589 }
590
591 if (adv_init_microcode_var(adv) != 0)
592 return (1);
593
594 adv_enable_interrupt(adv);
595 return (0);
596 }
597
598 u_int8_t
599 adv_get_chip_irq(struct adv_softc *adv)
600 {
601 u_int16_t cfg_lsw;
602 u_int8_t chip_irq;
603
604 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
605
606 if ((adv->type & ADV_VL) != 0) {
607 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
608 if ((chip_irq == 0) ||
609 (chip_irq == 4) ||
610 (chip_irq == 7)) {
611 return (0);
612 }
613 return (chip_irq + (ADV_MIN_IRQ_NO - 1));
614 }
615 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
616 if (chip_irq == 3)
617 chip_irq += 2;
618 return (chip_irq + ADV_MIN_IRQ_NO);
619 }
620
621 u_int8_t
622 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
623 {
624 u_int16_t cfg_lsw;
625
626 if ((adv->type & ADV_VL) != 0) {
627 if (irq_no != 0) {
628 if ((irq_no < ADV_MIN_IRQ_NO)
629 || (irq_no > ADV_MAX_IRQ_NO)) {
630 irq_no = 0;
631 } else {
632 irq_no -= ADV_MIN_IRQ_NO - 1;
633 }
634 }
635 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
636 cfg_lsw |= 0x0010;
637 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
638 adv_toggle_irq_act(adv);
639
640 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
641 cfg_lsw |= (irq_no & 0x07) << 2;
642 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
643 adv_toggle_irq_act(adv);
644 } else if ((adv->type & ADV_ISA) != 0) {
645 if (irq_no == 15)
646 irq_no -= 2;
647 irq_no -= ADV_MIN_IRQ_NO;
648 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
649 cfg_lsw |= (irq_no & 0x03) << 2;
650 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
651 }
652 return (adv_get_chip_irq(adv));
653 }
654
655 void
656 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
657 {
658 u_int16_t cfg_lsw;
659
660 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
661 if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
662 return;
663 cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
664 cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
665 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
666 }
667
668 int
669 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
670 u_int32_t datalen)
671 {
672 struct adv_target_transinfo* tinfo;
673 u_int32_t *p_data_addr;
674 u_int32_t *p_data_bcount;
675 int disable_syn_offset_one_fix;
676 int retval;
677 u_int n_q_required;
678 u_int32_t addr;
679 u_int8_t sg_entry_cnt;
680 u_int8_t target_ix;
681 u_int8_t sg_entry_cnt_minus_one;
682 u_int8_t tid_no;
683
684 scsiq->q1.q_no = 0;
685 retval = 1; /* Default to error case */
686 target_ix = scsiq->q2.target_ix;
687 tid_no = ADV_TIX_TO_TID(target_ix);
688 tinfo = &adv->tinfo[tid_no];
689
690 if (scsiq->cdbptr[0] == REQUEST_SENSE) {
691 /* Renegotiate if appropriate. */
692 adv_set_syncrate(adv, /*struct cam_path */NULL,
693 tid_no, /*period*/0, /*offset*/0,
694 ADV_TRANS_CUR);
695 if (tinfo->current.period != tinfo->goal.period) {
696 adv_msgout_sdtr(adv, tinfo->goal.period,
697 tinfo->goal.offset);
698 scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
699 }
700 }
701
702 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
703 sg_entry_cnt = scsiq->sg_head->entry_cnt;
704 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
705
706 #ifdef DIAGNOSTIC
707 if (sg_entry_cnt <= 1)
708 panic("adv_execute_scsi_queue: Queue "
709 "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
710
711 if (sg_entry_cnt > ADV_MAX_SG_LIST)
712 panic("adv_execute_scsi_queue: "
713 "Queue with too many segs.");
714
715 if (adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) {
716 int i;
717
718 for (i = 0; i < sg_entry_cnt_minus_one; i++) {
719 addr = scsiq->sg_head->sg_list[i].addr +
720 scsiq->sg_head->sg_list[i].bytes;
721
722 if ((addr & 0x0003) != 0)
723 panic("adv_execute_scsi_queue: SG "
724 "with odd address or byte count");
725 }
726 }
727 #endif
728 p_data_addr =
729 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
730 p_data_bcount =
731 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
732
733 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
734 scsiq->sg_head->queue_cnt = n_q_required - 1;
735 } else {
736 p_data_addr = &scsiq->q1.data_addr;
737 p_data_bcount = &scsiq->q1.data_cnt;
738 n_q_required = 1;
739 }
740
741 disable_syn_offset_one_fix = FALSE;
742
743 if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
744 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
745
746 if (datalen != 0) {
747 if (datalen < 512) {
748 disable_syn_offset_one_fix = TRUE;
749 } else {
750 if (scsiq->cdbptr[0] == INQUIRY
751 || scsiq->cdbptr[0] == REQUEST_SENSE
752 || scsiq->cdbptr[0] == READ_CAPACITY
753 || scsiq->cdbptr[0] == MODE_SELECT_6
754 || scsiq->cdbptr[0] == MODE_SENSE_6
755 || scsiq->cdbptr[0] == MODE_SENSE_10
756 || scsiq->cdbptr[0] == MODE_SELECT_10
757 || scsiq->cdbptr[0] == READ_TOC) {
758 disable_syn_offset_one_fix = TRUE;
759 }
760 }
761 }
762 }
763
764 if (disable_syn_offset_one_fix) {
765 scsiq->q2.tag_code &=
766 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
767 scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
768 | ADV_TAG_FLAG_DISABLE_DISCONNECT);
769 }
770
771 if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
772 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
773 u_int8_t extra_bytes;
774
775 addr = *p_data_addr + *p_data_bcount;
776 extra_bytes = addr & 0x0003;
777 if (extra_bytes != 0
778 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
779 || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
780 scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
781 scsiq->q1.extra_bytes = extra_bytes;
782 *p_data_bcount -= extra_bytes;
783 }
784 }
785
786 if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
787 || ((scsiq->q1.cntl & QC_URGENT) != 0))
788 retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
789
790 return (retval);
791 }
792
793
794 u_int8_t
795 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
796 struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
797 {
798 u_int16_t val;
799 u_int8_t sg_queue_cnt;
800
801 adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
802 (u_int16_t *)scsiq,
803 (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
804
805 #if BYTE_ORDER == BIG_ENDIAN
806 adv_adj_endian_qdone_info(scsiq);
807 #endif
808
809 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
810 scsiq->q_status = val & 0xFF;
811 scsiq->q_no = (val >> 8) & 0XFF;
812
813 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
814 scsiq->cntl = val & 0xFF;
815 sg_queue_cnt = (val >> 8) & 0xFF;
816
817 val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
818 scsiq->sense_len = val & 0xFF;
819 scsiq->extra_bytes = (val >> 8) & 0xFF;
820
821 /*
822 * XXX
823 * Due to a bug in accessing LRAM on the 940UA, we only pull
824 * the low 16bits of residual information. In the future, we'll
825 * want to allow transfers larger than 64K, but hopefully we'll
826 * get a new firmware revision from AdvanSys that address this
827 * problem before we up the transfer size.
828 */
829 scsiq->remain_bytes =
830 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
831 /*
832 * XXX Is this just a safeguard or will the counter really
833 * have bogus upper bits?
834 */
835 scsiq->remain_bytes &= max_dma_count;
836
837 return (sg_queue_cnt);
838 }
839
840 int
841 adv_start_chip(struct adv_softc *adv)
842 {
843 ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
844 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
845 return (0);
846 return (1);
847 }
848
849 int
850 adv_stop_execution(struct adv_softc *adv)
851 {
852 int count;
853
854 count = 0;
855 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
856 adv_write_lram_8(adv, ADV_STOP_CODE_B,
857 ADV_STOP_REQ_RISC_STOP);
858 do {
859 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
860 ADV_STOP_ACK_RISC_STOP) {
861 return (1);
862 }
863 DELAY(1000);
864 } while (count++ < 20);
865 }
866 return (0);
867 }
868
869 int
870 adv_is_chip_halted(struct adv_softc *adv)
871 {
872 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
873 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
874 return (1);
875 }
876 }
877 return (0);
878 }
879
880 /*
881 * XXX The numeric constants and the loops in this routine
882 * need to be documented.
883 */
884 void
885 adv_ack_interrupt(struct adv_softc *adv)
886 {
887 u_int8_t host_flag;
888 u_int8_t risc_flag;
889 int loop;
890
891 loop = 0;
892 do {
893 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
894 if (loop++ > 0x7FFF) {
895 break;
896 }
897 } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
898
899 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
900 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
901 host_flag | ADV_HOST_FLAG_ACK_INT);
902
903 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
904 loop = 0;
905 while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
906 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
907 if (loop++ > 3) {
908 break;
909 }
910 }
911
912 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
913 }
914
915 /*
916 * Handle all conditions that may halt the chip waiting
917 * for us to intervene.
918 */
919 void
920 adv_isr_chip_halted(struct adv_softc *adv)
921 {
922 u_int16_t int_halt_code;
923 u_int16_t halt_q_addr;
924 target_bit_vector target_mask;
925 target_bit_vector scsi_busy;
926 u_int8_t halt_qp;
927 u_int8_t target_ix;
928 u_int8_t q_cntl;
929 u_int8_t tid_no;
930
931 int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
932 halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
933 halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
934 target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
935 q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
936 tid_no = ADV_TIX_TO_TID(target_ix);
937 target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
938 if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
939 /*
940 * Temporarily disable the async fix by removing
941 * this target from the list of affected targets,
942 * setting our async rate, and then putting us
943 * back into the mask.
944 */
945 adv->fix_asyn_xfer &= ~target_mask;
946 adv_set_syncrate(adv, /*struct cam_path */NULL,
947 tid_no, /*period*/0, /*offset*/0,
948 ADV_TRANS_ACTIVE);
949 adv->fix_asyn_xfer |= target_mask;
950 } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
951 adv_set_syncrate(adv, /*struct cam_path */NULL,
952 tid_no, /*period*/0, /*offset*/0,
953 ADV_TRANS_ACTIVE);
954 } else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
955 adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
956 target_mask, tid_no);
957 } else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
958 struct adv_target_transinfo* tinfo;
959 union ccb *ccb;
960 u_int8_t tag_code;
961 u_int8_t q_status;
962
963 tinfo = &adv->tinfo[tid_no];
964 q_cntl |= QC_REQ_SENSE;
965
966 /* Renegotiate if appropriate. */
967 adv_set_syncrate(adv, /*struct cam_path */NULL,
968 tid_no, /*period*/0, /*offset*/0,
969 ADV_TRANS_CUR);
970 if (tinfo->current.period != tinfo->goal.period) {
971 adv_msgout_sdtr(adv, tinfo->goal.period,
972 tinfo->goal.offset);
973 q_cntl |= QC_MSG_OUT;
974 }
975 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
976
977 /* Don't tag request sense commands */
978 tag_code = adv_read_lram_8(adv,
979 halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
980 tag_code &=
981 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
982
983 if ((adv->fix_asyn_xfer & target_mask) != 0
984 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
985 tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
986 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
987 }
988 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
989 tag_code);
990 q_status = adv_read_lram_8(adv,
991 halt_q_addr + ADV_SCSIQ_B_STATUS);
992 q_status |= (QS_READY | QS_BUSY);
993 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
994 q_status);
995 /*
996 * Freeze the devq until we can handle the sense condition.
997 */
998 ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
999 + ADV_SCSIQ_D_CCBPTR);
1000 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1001 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1002 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1003 /*ccb*/NULL, CAM_REQUEUE_REQ,
1004 /*queued_only*/TRUE);
1005 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1006 scsi_busy &= ~target_mask;
1007 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1008 /*
1009 * Ensure we have enough time to actually
1010 * retrieve the sense.
1011 */
1012 untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1013 ccb->ccb_h.timeout_ch =
1014 timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1015 } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1016 struct ext_msg out_msg;
1017
1018 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1019 (u_int16_t *) &out_msg,
1020 sizeof(out_msg)/2);
1021
1022 if ((out_msg.msg_type == MSG_EXTENDED)
1023 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1024 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1025
1026 /* Revert to Async */
1027 adv_set_syncrate(adv, /*struct cam_path */NULL,
1028 tid_no, /*period*/0, /*offset*/0,
1029 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1030 }
1031 q_cntl &= ~QC_MSG_OUT;
1032 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1033 } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1034 u_int8_t scsi_status;
1035 union ccb *ccb;
1036
1037 scsi_status = adv_read_lram_8(adv, halt_q_addr
1038 + ADV_SCSIQ_SCSI_STATUS);
1039 ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
1040 + ADV_SCSIQ_D_CCBPTR);
1041 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1042 ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1043 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1044 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1045 /*ccb*/NULL, CAM_REQUEUE_REQ,
1046 /*queued_only*/TRUE);
1047 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1048 scsi_busy &= ~target_mask;
1049 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1050 }
1051 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1052 }
1053
1054 void
1055 adv_sdtr_to_period_offset(struct adv_softc *adv,
1056 u_int8_t sync_data, u_int8_t *period,
1057 u_int8_t *offset, int tid)
1058 {
1059 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1060 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1061 *period = *offset = 0;
1062 } else {
1063 *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1064 *offset = sync_data & 0xF;
1065 }
1066 }
1067
1068 void
1069 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1070 u_int tid, u_int period, u_int offset, u_int type)
1071 {
1072 struct adv_target_transinfo* tinfo;
1073 u_int old_period;
1074 u_int old_offset;
1075 u_int8_t sdtr_data;
1076
1077 tinfo = &adv->tinfo[tid];
1078
1079 /* Filter our input */
1080 sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1081 &offset, tid);
1082
1083 old_period = tinfo->current.period;
1084 old_offset = tinfo->current.offset;
1085
1086 if ((type & ADV_TRANS_CUR) != 0
1087 && ((old_period != period || old_offset != offset)
1088 || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1089 int s;
1090 int halted;
1091
1092 s = splcam();
1093 halted = adv_is_chip_halted(adv);
1094 if (halted == 0)
1095 /* Must halt the chip first */
1096 adv_host_req_chip_halt(adv);
1097
1098 /* Update current hardware settings */
1099 adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1100
1101 /*
1102 * If a target can run in sync mode, we don't need
1103 * to check it for sync problems.
1104 */
1105 if (offset != 0)
1106 adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1107
1108 if (halted == 0)
1109 /* Start the chip again */
1110 adv_start_chip(adv);
1111
1112 splx(s);
1113 tinfo->current.period = period;
1114 tinfo->current.offset = offset;
1115
1116 if (path != NULL) {
1117 /*
1118 * Tell the SCSI layer about the
1119 * new transfer parameters.
1120 */
1121 struct ccb_trans_settings neg;
1122
1123 neg.sync_period = period;
1124 neg.sync_offset = offset;
1125 neg.valid = CCB_TRANS_SYNC_RATE_VALID
1126 | CCB_TRANS_SYNC_OFFSET_VALID;
1127 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1128 xpt_async(AC_TRANSFER_NEG, path, &neg);
1129 }
1130 }
1131
1132 if ((type & ADV_TRANS_GOAL) != 0) {
1133 tinfo->goal.period = period;
1134 tinfo->goal.offset = offset;
1135 }
1136
1137 if ((type & ADV_TRANS_USER) != 0) {
1138 tinfo->user.period = period;
1139 tinfo->user.offset = offset;
1140 }
1141 }
1142
1143 u_int8_t
1144 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1145 u_int *offset, int tid)
1146 {
1147 u_int i;
1148 u_int dummy_offset;
1149 u_int dummy_period;
1150
1151 if (offset == NULL) {
1152 dummy_offset = 0;
1153 offset = &dummy_offset;
1154 }
1155
1156 if (period == NULL) {
1157 dummy_period = 0;
1158 period = &dummy_period;
1159 }
1160
1161 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
1162
1163 *offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1164 if (*period != 0 && *offset != 0) {
1165 for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1166 if (*period <= adv->sdtr_period_tbl[i]) {
1167 /*
1168 * When responding to a target that requests
1169 * sync, the requested rate may fall between
1170 * two rates that we can output, but still be
1171 * a rate that we can receive. Because of this,
1172 * we want to respond to the target with
1173 * the same rate that it sent to us even
1174 * if the period we use to send data to it
1175 * is lower. Only lower the response period
1176 * if we must.
1177 */
1178 if (i == 0 /* Our maximum rate */)
1179 *period = adv->sdtr_period_tbl[0];
1180 return ((i << 4) | *offset);
1181 }
1182 }
1183 }
1184
1185 /* Must go async */
1186 *period = 0;
1187 *offset = 0;
1188 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1189 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1190 return (0);
1191 }
1192
1193 /* Internal Routines */
1194
1195 static void
1196 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1197 u_int16_t *buffer, int count)
1198 {
1199 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1200 ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1201 }
1202
1203 static void
1204 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1205 u_int16_t *buffer, int count)
1206 {
1207 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1208 ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1209 }
1210
1211 static void
1212 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1213 u_int16_t set_value, int count)
1214 {
1215 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1216 bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1217 set_value, count);
1218 }
1219
1220 static u_int32_t
1221 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1222 {
1223 u_int32_t sum;
1224 int i;
1225
1226 sum = 0;
1227 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1228 for (i = 0; i < count; i++)
1229 sum += ADV_INW(adv, ADV_LRAM_DATA);
1230 return (sum);
1231 }
1232
1233 static int
1234 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1235 u_int16_t value)
1236 {
1237 int retval;
1238
1239 retval = 0;
1240 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1241 ADV_OUTW(adv, ADV_LRAM_DATA, value);
1242 DELAY(10000);
1243 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1244 if (value != ADV_INW(adv, ADV_LRAM_DATA))
1245 retval = 1;
1246 return (retval);
1247 }
1248
1249 static u_int32_t
1250 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1251 {
1252 u_int16_t val_low, val_high;
1253
1254 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1255
1256 #if BYTE_ORDER == BIG_ENDIAN
1257 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1258 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1259 #else
1260 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1261 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1262 #endif
1263
1264 return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1265 }
1266
1267 static void
1268 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1269 {
1270 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1271
1272 #if BYTE_ORDER == BIG_ENDIAN
1273 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1274 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1275 #else
1276 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1277 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1278 #endif
1279 }
1280
1281 static void
1282 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1283 u_int32_t *buffer, int count)
1284 {
1285 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1286 ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1287 }
1288
1289 static u_int16_t
1290 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1291 {
1292 u_int16_t read_wval;
1293 u_int8_t cmd_reg;
1294
1295 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1296 DELAY(1000);
1297 cmd_reg = addr | ADV_EEPROM_CMD_READ;
1298 adv_write_eeprom_cmd_reg(adv, cmd_reg);
1299 DELAY(1000);
1300 read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1301 DELAY(1000);
1302 return (read_wval);
1303 }
1304
1305 static u_int16_t
1306 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1307 {
1308 u_int16_t read_value;
1309
1310 read_value = adv_read_eeprom_16(adv, addr);
1311 if (read_value != value) {
1312 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1313 DELAY(1000);
1314
1315 ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1316 DELAY(1000);
1317
1318 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1319 DELAY(20 * 1000);
1320
1321 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1322 DELAY(1000);
1323 read_value = adv_read_eeprom_16(adv, addr);
1324 }
1325 return (read_value);
1326 }
1327
1328 static int
1329 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1330 {
1331 u_int8_t read_back;
1332 int retry;
1333
1334 retry = 0;
1335 while (1) {
1336 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1337 DELAY(1000);
1338 read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1339 if (read_back == cmd_reg) {
1340 return (1);
1341 }
1342 if (retry++ > ADV_EEPROM_MAX_RETRY) {
1343 return (0);
1344 }
1345 }
1346 }
1347
1348 static int
1349 adv_set_eeprom_config_once(struct adv_softc *adv,
1350 struct adv_eeprom_config *eeprom_config)
1351 {
1352 int n_error;
1353 u_int16_t *wbuf;
1354 u_int16_t sum;
1355 u_int8_t s_addr;
1356 u_int8_t cfg_beg;
1357 u_int8_t cfg_end;
1358
1359 wbuf = (u_int16_t *)eeprom_config;
1360 n_error = 0;
1361 sum = 0;
1362 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1363 sum += *wbuf;
1364 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1365 n_error++;
1366 }
1367 }
1368 if (adv->type & ADV_VL) {
1369 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1370 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1371 } else {
1372 cfg_beg = ADV_EEPROM_CFG_BEG;
1373 cfg_end = ADV_EEPROM_MAX_ADDR;
1374 }
1375
1376 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1377 sum += *wbuf;
1378 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1379 n_error++;
1380 }
1381 }
1382 *wbuf = sum;
1383 if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1384 n_error++;
1385 }
1386 wbuf = (u_int16_t *)eeprom_config;
1387 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1388 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1389 n_error++;
1390 }
1391 }
1392 for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1393 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1394 n_error++;
1395 }
1396 }
1397 return (n_error);
1398 }
1399
1400 static u_int32_t
1401 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1402 u_int16_t *mcode_buf, u_int16_t mcode_size)
1403 {
1404 u_int32_t chksum;
1405 u_int16_t mcode_lram_size;
1406 u_int16_t mcode_chksum;
1407
1408 mcode_lram_size = mcode_size >> 1;
1409 /* XXX Why zero the memory just before you write the whole thing?? */
1410 adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1411 adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1412
1413 chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1414 mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1415 ((mcode_size - s_addr
1416 - ADV_CODE_SEC_BEG) >> 1));
1417 adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1418 adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1419 return (chksum);
1420 }
1421
1422 static void
1423 adv_reinit_lram(struct adv_softc *adv) {
1424 adv_init_lram(adv);
1425 adv_init_qlink_var(adv);
1426 }
1427
1428 static void
1429 adv_init_lram(struct adv_softc *adv)
1430 {
1431 u_int8_t i;
1432 u_int16_t s_addr;
1433
1434 adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1435 (((adv->max_openings + 2 + 1) * 64) >> 1));
1436
1437 i = ADV_MIN_ACTIVE_QNO;
1438 s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1439
1440 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1441 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1442 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1443 i++;
1444 s_addr += ADV_QBLK_SIZE;
1445 for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1446 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1447 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1448 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1449 }
1450
1451 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1452 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1453 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1454 i++;
1455 s_addr += ADV_QBLK_SIZE;
1456
1457 for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1458 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1459 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1460 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1461 }
1462 }
1463
1464 static int
1465 adv_init_microcode_var(struct adv_softc *adv)
1466 {
1467 int i;
1468
1469 for (i = 0; i <= ADV_MAX_TID; i++) {
1470
1471 /* Start out async all around */
1472 adv_set_syncrate(adv, /*path*/NULL,
1473 i, 0, 0,
1474 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1475 }
1476
1477 adv_init_qlink_var(adv);
1478
1479 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1480 adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1481
1482 adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1483
1484 adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1485
1486 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1487 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1488 printf("adv%d: Unable to set program counter. Aborting.\n",
1489 adv->unit);
1490 return (1);
1491 }
1492 return (0);
1493 }
1494
1495 static void
1496 adv_init_qlink_var(struct adv_softc *adv)
1497 {
1498 int i;
1499 u_int16_t lram_addr;
1500
1501 adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1502 adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1503
1504 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1505 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1506
1507 adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1508 (u_int8_t)((int) adv->max_openings + 1));
1509 adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1510 (u_int8_t)((int) adv->max_openings + 2));
1511
1512 adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1513
1514 adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1515 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1516 adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1517 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1518 adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1519 adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1520
1521 lram_addr = ADV_QADR_BEG;
1522 for (i = 0; i < 32; i++, lram_addr += 2)
1523 adv_write_lram_16(adv, lram_addr, 0);
1524 }
1525
1526 static void
1527 adv_disable_interrupt(struct adv_softc *adv)
1528 {
1529 u_int16_t cfg;
1530
1531 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1532 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1533 }
1534
1535 static void
1536 adv_enable_interrupt(struct adv_softc *adv)
1537 {
1538 u_int16_t cfg;
1539
1540 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1541 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1542 }
1543
1544 static void
1545 adv_toggle_irq_act(struct adv_softc *adv)
1546 {
1547 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1548 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1549 }
1550
1551 void
1552 adv_start_execution(struct adv_softc *adv)
1553 {
1554 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1555 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1556 }
1557 }
1558
1559 static int
1560 adv_stop_chip(struct adv_softc *adv)
1561 {
1562 u_int8_t cc_val;
1563
1564 cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1565 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1566 ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1567 adv_set_chip_ih(adv, ADV_INS_HALT);
1568 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1569 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1570 return (0);
1571 }
1572 return (1);
1573 }
1574
1575 static int
1576 adv_host_req_chip_halt(struct adv_softc *adv)
1577 {
1578 int count;
1579 u_int8_t saved_stop_code;
1580
1581 if (adv_is_chip_halted(adv))
1582 return (1);
1583
1584 count = 0;
1585 saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1586 adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1587 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1588 while (adv_is_chip_halted(adv) == 0
1589 && count++ < 2000)
1590 ;
1591
1592 adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1593 return (count < 2000);
1594 }
1595
1596 static void
1597 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1598 {
1599 adv_set_bank(adv, 1);
1600 ADV_OUTW(adv, ADV_REG_IH, ins_code);
1601 adv_set_bank(adv, 0);
1602 }
1603
1604 #if UNUSED
1605 static u_int8_t
1606 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1607 {
1608 u_int8_t scsi_ctrl;
1609
1610 adv_set_bank(adv, 1);
1611 scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1612 adv_set_bank(adv, 0);
1613 return (scsi_ctrl);
1614 }
1615 #endif
1616
1617 /*
1618 * XXX Looks like more padding issues in this routine as well.
1619 * There has to be a way to turn this into an insw.
1620 */
1621 static void
1622 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1623 u_int16_t *inbuf, int words)
1624 {
1625 int i;
1626
1627 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1628 for (i = 0; i < words; i++, inbuf++) {
1629 if (i == 5) {
1630 continue;
1631 }
1632 *inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1633 }
1634 }
1635
1636 static u_int
1637 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1638 {
1639 u_int cur_used_qs;
1640 u_int cur_free_qs;
1641
1642 cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1643
1644 if ((cur_used_qs + n_qs) <= adv->max_openings) {
1645 cur_free_qs = adv->max_openings - cur_used_qs;
1646 return (cur_free_qs);
1647 }
1648 adv->openings_needed = n_qs;
1649 return (0);
1650 }
1651
1652 static u_int8_t
1653 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1654 u_int8_t n_free_q)
1655 {
1656 int i;
1657
1658 for (i = 0; i < n_free_q; i++) {
1659 free_q_head = adv_alloc_free_queue(adv, free_q_head);
1660 if (free_q_head == ADV_QLINK_END)
1661 break;
1662 }
1663 return (free_q_head);
1664 }
1665
1666 static u_int8_t
1667 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1668 {
1669 u_int16_t q_addr;
1670 u_int8_t next_qp;
1671 u_int8_t q_status;
1672
1673 next_qp = ADV_QLINK_END;
1674 q_addr = ADV_QNO_TO_QADDR(free_q_head);
1675 q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS);
1676
1677 if ((q_status & QS_READY) == 0)
1678 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1679
1680 return (next_qp);
1681 }
1682
1683 static int
1684 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1685 u_int8_t n_q_required)
1686 {
1687 u_int8_t free_q_head;
1688 u_int8_t next_qp;
1689 u_int8_t tid_no;
1690 u_int8_t target_ix;
1691 int retval;
1692
1693 retval = 1;
1694 target_ix = scsiq->q2.target_ix;
1695 tid_no = ADV_TIX_TO_TID(target_ix);
1696 free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1697 if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1698 != ADV_QLINK_END) {
1699 scsiq->q1.q_no = free_q_head;
1700
1701 /*
1702 * Now that we know our Q number, point our sense
1703 * buffer pointer to a bus dma mapped area where
1704 * we can dma the data to.
1705 */
1706 scsiq->q1.sense_addr = adv->sense_physbase
1707 + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1708 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1709 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1710 adv->cur_active += n_q_required;
1711 retval = 0;
1712 }
1713 return (retval);
1714 }
1715
1716
1717 static void
1718 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1719 u_int q_no)
1720 {
1721 u_int8_t sg_list_dwords;
1722 u_int8_t sg_index, i;
1723 u_int8_t sg_entry_cnt;
1724 u_int8_t next_qp;
1725 u_int16_t q_addr;
1726 struct adv_sg_head *sg_head;
1727 struct adv_sg_list_q scsi_sg_q;
1728
1729 sg_head = scsiq->sg_head;
1730
1731 if (sg_head) {
1732 sg_entry_cnt = sg_head->entry_cnt - 1;
1733 #ifdef DIAGNOSTIC
1734 if (sg_entry_cnt == 0)
1735 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1736 "a SG list but only one element");
1737 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1738 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1739 "a SG list but QC_SG_HEAD not set");
1740 #endif
1741 q_addr = ADV_QNO_TO_QADDR(q_no);
1742 sg_index = 1;
1743 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1744 scsi_sg_q.sg_head_qp = q_no;
1745 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1746 for (i = 0; i < sg_head->queue_cnt; i++) {
1747 u_int8_t segs_this_q;
1748
1749 if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1750 segs_this_q = ADV_SG_LIST_PER_Q;
1751 else {
1752 /* This will be the last segment then */
1753 segs_this_q = sg_entry_cnt;
1754 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1755 }
1756 scsi_sg_q.seq_no = i + 1;
1757 sg_list_dwords = segs_this_q << 1;
1758 if (i == 0) {
1759 scsi_sg_q.sg_list_cnt = segs_this_q;
1760 scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1761 } else {
1762 scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1763 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1764 }
1765 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1766 scsi_sg_q.q_no = next_qp;
1767 q_addr = ADV_QNO_TO_QADDR(next_qp);
1768
1769 adv_write_lram_16_multi(adv,
1770 q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1771 (u_int16_t *)&scsi_sg_q,
1772 sizeof(scsi_sg_q) >> 1);
1773 adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1774 (u_int32_t *)&sg_head->sg_list[sg_index],
1775 sg_list_dwords);
1776 sg_entry_cnt -= segs_this_q;
1777 sg_index += ADV_SG_LIST_PER_Q;
1778 }
1779 }
1780 adv_put_ready_queue(adv, scsiq, q_no);
1781 }
1782
1783 static void
1784 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1785 u_int q_no)
1786 {
1787 struct adv_target_transinfo* tinfo;
1788 u_int q_addr;
1789 u_int tid_no;
1790
1791 tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1792 tinfo = &adv->tinfo[tid_no];
1793 if ((tinfo->current.period != tinfo->goal.period)
1794 || (tinfo->current.offset != tinfo->goal.offset)) {
1795
1796 adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1797 scsiq->q1.cntl |= QC_MSG_OUT;
1798 }
1799 q_addr = ADV_QNO_TO_QADDR(q_no);
1800
1801 scsiq->q1.status = QS_FREE;
1802
1803 adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1804 (u_int16_t *)scsiq->cdbptr,
1805 scsiq->q2.cdb_len >> 1);
1806
1807 #if BYTE_ORDER == BIG_ENDIAN
1808 adv_adj_scsiq_endian(scsiq);
1809 #endif
1810
1811 adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1812 (u_int16_t *) &scsiq->q1.cntl,
1813 ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1814
1815 #if CC_WRITE_IO_COUNT
1816 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1817 adv->req_count);
1818 #endif
1819
1820 #if CC_CLEAR_DMA_REMAIN
1821
1822 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1823 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1824 #endif
1825
1826 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1827 (scsiq->q1.q_no << 8) | QS_READY);
1828 }
1829
1830 static void
1831 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1832 u_int16_t *buffer, int words)
1833 {
1834 int i;
1835
1836 /*
1837 * XXX This routine makes *gross* assumptions
1838 * about padding in the data structures.
1839 * Either the data structures should have explicit
1840 * padding members added, or they should have padding
1841 * turned off via compiler attributes depending on
1842 * which yields better overall performance. My hunch
1843 * would be that turning off padding would be the
1844 * faster approach as an outsw is much faster than
1845 * this crude loop and accessing un-aligned data
1846 * members isn't *that* expensive. The other choice
1847 * would be to modify the ASC script so that the
1848 * the adv_scsiq_1 structure can be re-arranged so
1849 * padding isn't required.
1850 */
1851 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1852 for (i = 0; i < words; i++, buffer++) {
1853 if (i == 2 || i == 10) {
1854 continue;
1855 }
1856 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1857 }
1858 }
1859
1860 static void
1861 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1862 u_int8_t q_cntl, target_bit_vector target_mask,
1863 int tid_no)
1864 {
1865 struct ext_msg ext_msg;
1866
1867 adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1868 sizeof(ext_msg) >> 1);
1869 if ((ext_msg.msg_type == MSG_EXTENDED)
1870 && (ext_msg.msg_req == MSG_EXT_SDTR)
1871 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1872 union ccb *ccb;
1873 struct adv_target_transinfo* tinfo;
1874 u_int period;
1875 u_int offset;
1876 int sdtr_accept;
1877 u_int8_t orig_offset;
1878
1879 ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
1880 + ADV_SCSIQ_D_CCBPTR);
1881 tinfo = &adv->tinfo[tid_no];
1882 sdtr_accept = TRUE;
1883
1884 orig_offset = ext_msg.req_ack_offset;
1885 if (ext_msg.xfer_period < tinfo->goal.period) {
1886 sdtr_accept = FALSE;
1887 ext_msg.xfer_period = tinfo->goal.period;
1888 }
1889
1890 /* Perform range checking */
1891 period = ext_msg.xfer_period;
1892 offset = ext_msg.req_ack_offset;
1893 adv_period_offset_to_sdtr(adv, &period, &offset, tid_no);
1894 ext_msg.xfer_period = period;
1895 ext_msg.req_ack_offset = offset;
1896
1897 /* Record our current sync settings */
1898 adv_set_syncrate(adv, ccb->ccb_h.path,
1899 tid_no, ext_msg.xfer_period,
1900 ext_msg.req_ack_offset,
1901 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1902
1903 /* Offset too high or large period forced async */
1904 if (orig_offset != ext_msg.req_ack_offset)
1905 sdtr_accept = FALSE;
1906
1907 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1908 /* Valid response to our requested negotiation */
1909 q_cntl &= ~QC_MSG_OUT;
1910 } else {
1911 /* Must Respond */
1912 q_cntl |= QC_MSG_OUT;
1913 adv_msgout_sdtr(adv, ext_msg.xfer_period,
1914 ext_msg.req_ack_offset);
1915 }
1916
1917 } else if (ext_msg.msg_type == MSG_EXTENDED
1918 && ext_msg.msg_req == MSG_EXT_WDTR
1919 && ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1920
1921 ext_msg.wdtr_width = 0;
1922 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1923 (u_int16_t *)&ext_msg,
1924 sizeof(ext_msg) >> 1);
1925 q_cntl |= QC_MSG_OUT;
1926 } else {
1927
1928 ext_msg.msg_type = MSG_MESSAGE_REJECT;
1929 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1930 (u_int16_t *)&ext_msg,
1931 sizeof(ext_msg) >> 1);
1932 q_cntl |= QC_MSG_OUT;
1933 }
1934 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1935 }
1936
1937 static void
1938 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1939 u_int8_t sdtr_offset)
1940 {
1941 struct ext_msg sdtr_buf;
1942
1943 sdtr_buf.msg_type = MSG_EXTENDED;
1944 sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1945 sdtr_buf.msg_req = MSG_EXT_SDTR;
1946 sdtr_buf.xfer_period = sdtr_period;
1947 sdtr_offset &= ADV_SYN_MAX_OFFSET;
1948 sdtr_buf.req_ack_offset = sdtr_offset;
1949 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1950 (u_int16_t *) &sdtr_buf,
1951 sizeof(sdtr_buf) / 2);
1952 }
1953
1954 int
1955 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1956 u_int32_t status, int queued_only)
1957 {
1958 u_int16_t q_addr;
1959 u_int8_t q_no;
1960 struct adv_q_done_info scsiq_buf;
1961 struct adv_q_done_info *scsiq;
1962 u_int8_t target_ix;
1963 int count;
1964
1965 scsiq = &scsiq_buf;
1966 target_ix = ADV_TIDLUN_TO_IX(target, lun);
1967 count = 0;
1968 for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1969 q_addr = ADV_QNO_TO_QADDR(q_no);
1970
1971 adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1972 if (((scsiq->q_status & QS_READY) != 0)
1973 && ((scsiq->q_status & QS_ABORTED) == 0)
1974 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1975 && (scsiq->d2.target_ix == target_ix)
1976 && (queued_only == 0
1977 || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1978 && (ccb == NULL || (ccb == (union ccb *)scsiq->d2.ccb_ptr))) {
1979 union ccb *aborted_ccb;
1980 struct adv_ccb_info *cinfo;
1981
1982 scsiq->q_status |= QS_ABORTED;
1983 adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
1984 scsiq->q_status);
1985 aborted_ccb = (union ccb *)scsiq->d2.ccb_ptr;
1986 /* Don't clobber earlier error codes */
1987 if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
1988 == CAM_REQ_INPROG)
1989 aborted_ccb->ccb_h.status |= status;
1990 cinfo = (struct adv_ccb_info *)
1991 aborted_ccb->ccb_h.ccb_cinfo_ptr;
1992 cinfo->state |= ACCB_ABORT_QUEUED;
1993 count++;
1994 }
1995 }
1996 return (count);
1997 }
1998
1999 int
2000 adv_reset_bus(struct adv_softc *adv)
2001 {
2002 int count;
2003 int i;
2004 union ccb *ccb;
2005
2006 adv_reset_chip_and_scsi_bus(adv);
2007 adv_reinit_lram(adv);
2008 for (i = 0; i <= ADV_MAX_TID; i++) {
2009 if (adv->fix_asyn_xfer & (0x01 << i))
2010 adv_set_sdtr_reg_at_id(adv, i,
2011 ASYN_SDTR_DATA_FIX_PCI_REV_AB);
2012 }
2013 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2014
2015 /* Tell the XPT layer that a bus reset occured */
2016 if (adv->path != NULL)
2017 xpt_async(AC_BUS_RESET, adv->path, NULL);
2018
2019 count = 0;
2020 while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2021 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2022 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2023 adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2024 count++;
2025 }
2026
2027 adv_start_chip(adv);
2028 return (count);
2029 }
2030
2031 static void
2032 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2033 {
2034 int orig_id;
2035
2036 adv_set_bank(adv, 1);
2037 orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2038 ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2039 if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2040 adv_set_bank(adv, 0);
2041 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2042 }
2043 adv_set_bank(adv, 1);
2044 ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2045 adv_set_bank(adv, 0);
2046 }
Cache object: f8afc8ccbe8dc8cec775fa50880c8837
|