1 /*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: releng/5.0/sys/dev/advansys/advlib.c 67164 2000-10-15 14:19:01Z phk $
32 */
33 /*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1996 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/systm.h>
49
50 #include <machine/bus_pio.h>
51 #include <machine/bus.h>
52 #include <machine/resource.h>
53 #include <sys/bus.h>
54 #include <sys/rman.h>
55
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 #include <cam/cam_sim.h>
59 #include <cam/cam_xpt_sim.h>
60
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 #include <cam/scsi/scsi_da.h>
64 #include <cam/scsi/scsi_cd.h>
65
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/pmap.h>
69
70 #include <dev/advansys/advansys.h>
71 #include <dev/advansys/advmcode.h>
72
73 struct adv_quirk_entry {
74 struct scsi_inquiry_pattern inq_pat;
75 u_int8_t quirks;
76 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS 0x01
77 #define ADV_QUIRK_FIX_ASYN_XFER 0x02
78 };
79
80 static struct adv_quirk_entry adv_quirk_table[] =
81 {
82 {
83 { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
84 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
85 },
86 {
87 { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
88 0
89 },
90 {
91 {
92 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
93 "TANDBERG", " TDC 36", "*"
94 },
95 0
96 },
97 {
98 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
99 0
100 },
101 {
102 {
103 T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
104 "*", "*", "*"
105 },
106 0
107 },
108 {
109 {
110 T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
111 "*", "*", "*"
112 },
113 0
114 },
115 {
116 /* Default quirk entry */
117 {
118 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119 /*vendor*/"*", /*product*/"*", /*revision*/"*"
120 },
121 ADV_QUIRK_FIX_ASYN_XFER,
122 }
123 };
124
125 /*
126 * Allowable periods in ns
127 */
128 static u_int8_t adv_sdtr_period_tbl[] =
129 {
130 25,
131 30,
132 35,
133 40,
134 50,
135 60,
136 70,
137 85
138 };
139
140 static u_int8_t adv_sdtr_period_tbl_ultra[] =
141 {
142 12,
143 19,
144 25,
145 32,
146 38,
147 44,
148 50,
149 57,
150 63,
151 69,
152 75,
153 82,
154 88,
155 94,
156 100,
157 107
158 };
159
160 struct ext_msg {
161 u_int8_t msg_type;
162 u_int8_t msg_len;
163 u_int8_t msg_req;
164 union {
165 struct {
166 u_int8_t sdtr_xfer_period;
167 u_int8_t sdtr_req_ack_offset;
168 } sdtr;
169 struct {
170 u_int8_t wdtr_width;
171 } wdtr;
172 struct {
173 u_int8_t mdp[4];
174 } mdp;
175 } u_ext_msg;
176 u_int8_t res;
177 };
178
179 #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
180 #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
181 #define wdtr_width u_ext_msg.wdtr.wdtr_width
182 #define mdp_b3 u_ext_msg.mdp_b3
183 #define mdp_b2 u_ext_msg.mdp_b2
184 #define mdp_b1 u_ext_msg.mdp_b1
185 #define mdp_b0 u_ext_msg.mdp_b0
186
187 /*
188 * Some of the early PCI adapters have problems with
189 * async transfers. Instead use an offset of 1.
190 */
191 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
192
193 /* LRAM routines */
194 static void adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
195 u_int16_t *buffer, int count);
196 static void adv_write_lram_16_multi(struct adv_softc *adv,
197 u_int16_t s_addr, u_int16_t *buffer,
198 int count);
199 static void adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200 u_int16_t set_value, int count);
201 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202 int count);
203
204 static int adv_write_and_verify_lram_16(struct adv_softc *adv,
205 u_int16_t addr, u_int16_t value);
206 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
207
208
209 static void adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
210 u_int32_t value);
211 static void adv_write_lram_32_multi(struct adv_softc *adv,
212 u_int16_t s_addr, u_int32_t *buffer,
213 int count);
214
215 /* EEPROM routines */
216 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
217 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
218 u_int16_t value);
219 static int adv_write_eeprom_cmd_reg(struct adv_softc *adv,
220 u_int8_t cmd_reg);
221 static int adv_set_eeprom_config_once(struct adv_softc *adv,
222 struct adv_eeprom_config *eeconfig);
223
224 /* Initialization */
225 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
226 u_int16_t *mcode_buf, u_int16_t mcode_size);
227
228 static void adv_reinit_lram(struct adv_softc *adv);
229 static void adv_init_lram(struct adv_softc *adv);
230 static int adv_init_microcode_var(struct adv_softc *adv);
231 static void adv_init_qlink_var(struct adv_softc *adv);
232
233 /* Interrupts */
234 static void adv_disable_interrupt(struct adv_softc *adv);
235 static void adv_enable_interrupt(struct adv_softc *adv);
236 static void adv_toggle_irq_act(struct adv_softc *adv);
237
238 /* Chip Control */
239 static int adv_host_req_chip_halt(struct adv_softc *adv);
240 static void adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
241 #if UNUSED
242 static u_int8_t adv_get_chip_scsi_ctrl(struct adv_softc *adv);
243 #endif
244
245 /* Queue handling and execution */
246 static __inline int
247 adv_sgcount_to_qcount(int sgcount);
248
249 static __inline int
250 adv_sgcount_to_qcount(int sgcount)
251 {
252 int n_sg_list_qs;
253
254 n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
255 if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
256 n_sg_list_qs++;
257 return (n_sg_list_qs + 1);
258 }
259
260 static void adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
261 u_int16_t *inbuf, int words);
262 static u_int adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
263 static u_int8_t adv_alloc_free_queues(struct adv_softc *adv,
264 u_int8_t free_q_head, u_int8_t n_free_q);
265 static u_int8_t adv_alloc_free_queue(struct adv_softc *adv,
266 u_int8_t free_q_head);
267 static int adv_send_scsi_queue(struct adv_softc *adv,
268 struct adv_scsi_q *scsiq,
269 u_int8_t n_q_required);
270 static void adv_put_ready_sg_list_queue(struct adv_softc *adv,
271 struct adv_scsi_q *scsiq,
272 u_int q_no);
273 static void adv_put_ready_queue(struct adv_softc *adv,
274 struct adv_scsi_q *scsiq, u_int q_no);
275 static void adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
276 u_int16_t *buffer, int words);
277
278 /* Messages */
279 static void adv_handle_extmsg_in(struct adv_softc *adv,
280 u_int16_t halt_q_addr, u_int8_t q_cntl,
281 target_bit_vector target_id,
282 int tid);
283 static void adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
284 u_int8_t sdtr_offset);
285 static void adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
286 u_int8_t sdtr_data);
287
288
289 /* Exported functions first */
290
291 void
292 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
293 {
294 struct adv_softc *adv;
295
296 adv = (struct adv_softc *)callback_arg;
297 switch (code) {
298 case AC_FOUND_DEVICE:
299 {
300 struct ccb_getdev *cgd;
301 target_bit_vector target_mask;
302 int num_entries;
303 caddr_t match;
304 struct adv_quirk_entry *entry;
305 struct adv_target_transinfo* tinfo;
306
307 cgd = (struct ccb_getdev *)arg;
308
309 target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
310
311 num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
312 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
313 (caddr_t)adv_quirk_table,
314 num_entries, sizeof(*adv_quirk_table),
315 scsi_inquiry_match);
316
317 if (match == NULL)
318 panic("advasync: device didn't match wildcard entry!!");
319
320 entry = (struct adv_quirk_entry *)match;
321
322 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
323 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
324 adv->fix_asyn_xfer_always |= target_mask;
325 else
326 adv->fix_asyn_xfer_always &= ~target_mask;
327 /*
328 * We start out life with all bits set and clear them
329 * after we've determined that the fix isn't necessary.
330 * It may well be that we've already cleared a target
331 * before the full inquiry session completes, so don't
332 * gratuitously set a target bit even if it has this
333 * quirk. But, if the quirk exonerates a device, clear
334 * the bit now.
335 */
336 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
337 adv->fix_asyn_xfer &= ~target_mask;
338 }
339 /*
340 * Reset our sync settings now that we've determined
341 * what quirks are in effect for the device.
342 */
343 tinfo = &adv->tinfo[cgd->ccb_h.target_id];
344 adv_set_syncrate(adv, cgd->ccb_h.path,
345 cgd->ccb_h.target_id,
346 tinfo->current.period,
347 tinfo->current.offset,
348 ADV_TRANS_CUR);
349 break;
350 }
351 case AC_LOST_DEVICE:
352 {
353 u_int target_mask;
354
355 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
356 target_mask = 0x01 << xpt_path_target_id(path);
357 adv->fix_asyn_xfer |= target_mask;
358 }
359
360 /*
361 * Revert to async transfers
362 * for the next device.
363 */
364 adv_set_syncrate(adv, /*path*/NULL,
365 xpt_path_target_id(path),
366 /*period*/0,
367 /*offset*/0,
368 ADV_TRANS_GOAL|ADV_TRANS_CUR);
369 }
370 default:
371 break;
372 }
373 }
374
375 void
376 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
377 {
378 u_int8_t control;
379
380 /*
381 * Start out with the bank reset to 0
382 */
383 control = ADV_INB(adv, ADV_CHIP_CTRL)
384 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
385 | ADV_CC_DIAG | ADV_CC_SCSI_RESET
386 | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
387 if (bank == 1) {
388 control |= ADV_CC_BANK_ONE;
389 } else if (bank == 2) {
390 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
391 }
392 ADV_OUTB(adv, ADV_CHIP_CTRL, control);
393 }
394
395 u_int8_t
396 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
397 {
398 u_int8_t byte_data;
399 u_int16_t word_data;
400
401 /*
402 * LRAM is accessed on 16bit boundaries.
403 */
404 ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
405 word_data = ADV_INW(adv, ADV_LRAM_DATA);
406 if (addr & 1) {
407 #if BYTE_ORDER == BIG_ENDIAN
408 byte_data = (u_int8_t)(word_data & 0xFF);
409 #else
410 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
411 #endif
412 } else {
413 #if BYTE_ORDER == BIG_ENDIAN
414 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
415 #else
416 byte_data = (u_int8_t)(word_data & 0xFF);
417 #endif
418 }
419 return (byte_data);
420 }
421
422 void
423 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
424 {
425 u_int16_t word_data;
426
427 word_data = adv_read_lram_16(adv, addr & 0xFFFE);
428 if (addr & 1) {
429 word_data &= 0x00FF;
430 word_data |= (((u_int8_t)value << 8) & 0xFF00);
431 } else {
432 word_data &= 0xFF00;
433 word_data |= ((u_int8_t)value & 0x00FF);
434 }
435 adv_write_lram_16(adv, addr & 0xFFFE, word_data);
436 }
437
438
439 u_int16_t
440 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
441 {
442 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
443 return (ADV_INW(adv, ADV_LRAM_DATA));
444 }
445
446 void
447 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
448 {
449 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
450 ADV_OUTW(adv, ADV_LRAM_DATA, value);
451 }
452
453 /*
454 * Determine if there is a board at "iobase" by looking
455 * for the AdvanSys signatures. Return 1 if a board is
456 * found, 0 otherwise.
457 */
458 int
459 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
460 {
461 u_int16_t signature;
462
463 if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
464 signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
465 if ((signature == ADV_1000_ID0W)
466 || (signature == ADV_1000_ID0W_FIX))
467 return (1);
468 }
469 return (0);
470 }
471
472 void
473 adv_lib_init(struct adv_softc *adv)
474 {
475 if ((adv->type & ADV_ULTRA) != 0) {
476 adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
477 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
478 } else {
479 adv->sdtr_period_tbl = adv_sdtr_period_tbl;
480 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
481 }
482 }
483
484 u_int16_t
485 adv_get_eeprom_config(struct adv_softc *adv, struct
486 adv_eeprom_config *eeprom_config)
487 {
488 u_int16_t sum;
489 u_int16_t *wbuf;
490 u_int8_t cfg_beg;
491 u_int8_t cfg_end;
492 u_int8_t s_addr;
493
494 wbuf = (u_int16_t *)eeprom_config;
495 sum = 0;
496
497 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
498 *wbuf = adv_read_eeprom_16(adv, s_addr);
499 sum += *wbuf;
500 }
501
502 if (adv->type & ADV_VL) {
503 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
504 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
505 } else {
506 cfg_beg = ADV_EEPROM_CFG_BEG;
507 cfg_end = ADV_EEPROM_MAX_ADDR;
508 }
509
510 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
511 *wbuf = adv_read_eeprom_16(adv, s_addr);
512 sum += *wbuf;
513 #if ADV_DEBUG_EEPROM
514 printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
515 #endif
516 }
517 *wbuf = adv_read_eeprom_16(adv, s_addr);
518 return (sum);
519 }
520
521 int
522 adv_set_eeprom_config(struct adv_softc *adv,
523 struct adv_eeprom_config *eeprom_config)
524 {
525 int retry;
526
527 retry = 0;
528 while (1) {
529 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
530 break;
531 }
532 if (++retry > ADV_EEPROM_MAX_RETRY) {
533 break;
534 }
535 }
536 return (retry > ADV_EEPROM_MAX_RETRY);
537 }
538
539 int
540 adv_reset_chip(struct adv_softc *adv, int reset_bus)
541 {
542 adv_stop_chip(adv);
543 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
544 | (reset_bus ? ADV_CC_SCSI_RESET : 0));
545 DELAY(60);
546
547 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
548 adv_set_chip_ih(adv, ADV_INS_HALT);
549
550 if (reset_bus)
551 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
552
553 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
554 if (reset_bus)
555 DELAY(200 * 1000);
556
557 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
558 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
559 return (adv_is_chip_halted(adv));
560 }
561
562 int
563 adv_test_external_lram(struct adv_softc* adv)
564 {
565 u_int16_t q_addr;
566 u_int16_t saved_value;
567 int success;
568
569 success = 0;
570
571 q_addr = ADV_QNO_TO_QADDR(241);
572 saved_value = adv_read_lram_16(adv, q_addr);
573 if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
574 success = 1;
575 adv_write_lram_16(adv, q_addr, saved_value);
576 }
577 return (success);
578 }
579
580
581 int
582 adv_init_lram_and_mcode(struct adv_softc *adv)
583 {
584 u_int32_t retval;
585
586 adv_disable_interrupt(adv);
587
588 adv_init_lram(adv);
589
590 retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
591 adv_mcode_size);
592 if (retval != adv_mcode_chksum) {
593 printf("adv%d: Microcode download failed checksum!\n",
594 adv->unit);
595 return (1);
596 }
597
598 if (adv_init_microcode_var(adv) != 0)
599 return (1);
600
601 adv_enable_interrupt(adv);
602 return (0);
603 }
604
605 u_int8_t
606 adv_get_chip_irq(struct adv_softc *adv)
607 {
608 u_int16_t cfg_lsw;
609 u_int8_t chip_irq;
610
611 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
612
613 if ((adv->type & ADV_VL) != 0) {
614 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
615 if ((chip_irq == 0) ||
616 (chip_irq == 4) ||
617 (chip_irq == 7)) {
618 return (0);
619 }
620 return (chip_irq + (ADV_MIN_IRQ_NO - 1));
621 }
622 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
623 if (chip_irq == 3)
624 chip_irq += 2;
625 return (chip_irq + ADV_MIN_IRQ_NO);
626 }
627
628 u_int8_t
629 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
630 {
631 u_int16_t cfg_lsw;
632
633 if ((adv->type & ADV_VL) != 0) {
634 if (irq_no != 0) {
635 if ((irq_no < ADV_MIN_IRQ_NO)
636 || (irq_no > ADV_MAX_IRQ_NO)) {
637 irq_no = 0;
638 } else {
639 irq_no -= ADV_MIN_IRQ_NO - 1;
640 }
641 }
642 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
643 cfg_lsw |= 0x0010;
644 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
645 adv_toggle_irq_act(adv);
646
647 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
648 cfg_lsw |= (irq_no & 0x07) << 2;
649 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
650 adv_toggle_irq_act(adv);
651 } else if ((adv->type & ADV_ISA) != 0) {
652 if (irq_no == 15)
653 irq_no -= 2;
654 irq_no -= ADV_MIN_IRQ_NO;
655 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
656 cfg_lsw |= (irq_no & 0x03) << 2;
657 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
658 }
659 return (adv_get_chip_irq(adv));
660 }
661
662 void
663 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
664 {
665 u_int16_t cfg_lsw;
666
667 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
668 if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
669 return;
670 cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
671 cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
672 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
673 }
674
675 int
676 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
677 u_int32_t datalen)
678 {
679 struct adv_target_transinfo* tinfo;
680 u_int32_t *p_data_addr;
681 u_int32_t *p_data_bcount;
682 int disable_syn_offset_one_fix;
683 int retval;
684 u_int n_q_required;
685 u_int32_t addr;
686 u_int8_t sg_entry_cnt;
687 u_int8_t target_ix;
688 u_int8_t sg_entry_cnt_minus_one;
689 u_int8_t tid_no;
690
691 scsiq->q1.q_no = 0;
692 retval = 1; /* Default to error case */
693 target_ix = scsiq->q2.target_ix;
694 tid_no = ADV_TIX_TO_TID(target_ix);
695 tinfo = &adv->tinfo[tid_no];
696
697 if (scsiq->cdbptr[0] == REQUEST_SENSE) {
698 /* Renegotiate if appropriate. */
699 adv_set_syncrate(adv, /*struct cam_path */NULL,
700 tid_no, /*period*/0, /*offset*/0,
701 ADV_TRANS_CUR);
702 if (tinfo->current.period != tinfo->goal.period) {
703 adv_msgout_sdtr(adv, tinfo->goal.period,
704 tinfo->goal.offset);
705 scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
706 }
707 }
708
709 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
710 sg_entry_cnt = scsiq->sg_head->entry_cnt;
711 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
712
713 #ifdef DIAGNOSTIC
714 if (sg_entry_cnt <= 1)
715 panic("adv_execute_scsi_queue: Queue "
716 "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
717
718 if (sg_entry_cnt > ADV_MAX_SG_LIST)
719 panic("adv_execute_scsi_queue: "
720 "Queue with too many segs.");
721
722 if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
723 int i;
724
725 for (i = 0; i < sg_entry_cnt_minus_one; i++) {
726 addr = scsiq->sg_head->sg_list[i].addr +
727 scsiq->sg_head->sg_list[i].bytes;
728
729 if ((addr & 0x0003) != 0)
730 panic("adv_execute_scsi_queue: SG "
731 "with odd address or byte count");
732 }
733 }
734 #endif
735 p_data_addr =
736 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
737 p_data_bcount =
738 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
739
740 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
741 scsiq->sg_head->queue_cnt = n_q_required - 1;
742 } else {
743 p_data_addr = &scsiq->q1.data_addr;
744 p_data_bcount = &scsiq->q1.data_cnt;
745 n_q_required = 1;
746 }
747
748 disable_syn_offset_one_fix = FALSE;
749
750 if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
751 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
752
753 if (datalen != 0) {
754 if (datalen < 512) {
755 disable_syn_offset_one_fix = TRUE;
756 } else {
757 if (scsiq->cdbptr[0] == INQUIRY
758 || scsiq->cdbptr[0] == REQUEST_SENSE
759 || scsiq->cdbptr[0] == READ_CAPACITY
760 || scsiq->cdbptr[0] == MODE_SELECT_6
761 || scsiq->cdbptr[0] == MODE_SENSE_6
762 || scsiq->cdbptr[0] == MODE_SENSE_10
763 || scsiq->cdbptr[0] == MODE_SELECT_10
764 || scsiq->cdbptr[0] == READ_TOC) {
765 disable_syn_offset_one_fix = TRUE;
766 }
767 }
768 }
769 }
770
771 if (disable_syn_offset_one_fix) {
772 scsiq->q2.tag_code &=
773 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
774 scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
775 | ADV_TAG_FLAG_DISABLE_DISCONNECT);
776 }
777
778 if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
779 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
780 u_int8_t extra_bytes;
781
782 addr = *p_data_addr + *p_data_bcount;
783 extra_bytes = addr & 0x0003;
784 if (extra_bytes != 0
785 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
786 || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
787 scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
788 scsiq->q1.extra_bytes = extra_bytes;
789 *p_data_bcount -= extra_bytes;
790 }
791 }
792
793 if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
794 || ((scsiq->q1.cntl & QC_URGENT) != 0))
795 retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
796
797 return (retval);
798 }
799
800
801 u_int8_t
802 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
803 struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
804 {
805 u_int16_t val;
806 u_int8_t sg_queue_cnt;
807
808 adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
809 (u_int16_t *)scsiq,
810 (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
811
812 #if BYTE_ORDER == BIG_ENDIAN
813 adv_adj_endian_qdone_info(scsiq);
814 #endif
815
816 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
817 scsiq->q_status = val & 0xFF;
818 scsiq->q_no = (val >> 8) & 0XFF;
819
820 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
821 scsiq->cntl = val & 0xFF;
822 sg_queue_cnt = (val >> 8) & 0xFF;
823
824 val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
825 scsiq->sense_len = val & 0xFF;
826 scsiq->extra_bytes = (val >> 8) & 0xFF;
827
828 /*
829 * Due to a bug in accessing LRAM on the 940UA, the residual
830 * is split into separate high and low 16bit quantities.
831 */
832 scsiq->remain_bytes =
833 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
834 scsiq->remain_bytes |=
835 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
836
837 /*
838 * XXX Is this just a safeguard or will the counter really
839 * have bogus upper bits?
840 */
841 scsiq->remain_bytes &= max_dma_count;
842
843 return (sg_queue_cnt);
844 }
845
846 int
847 adv_start_chip(struct adv_softc *adv)
848 {
849 ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
850 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
851 return (0);
852 return (1);
853 }
854
855 int
856 adv_stop_execution(struct adv_softc *adv)
857 {
858 int count;
859
860 count = 0;
861 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
862 adv_write_lram_8(adv, ADV_STOP_CODE_B,
863 ADV_STOP_REQ_RISC_STOP);
864 do {
865 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
866 ADV_STOP_ACK_RISC_STOP) {
867 return (1);
868 }
869 DELAY(1000);
870 } while (count++ < 20);
871 }
872 return (0);
873 }
874
875 int
876 adv_is_chip_halted(struct adv_softc *adv)
877 {
878 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
879 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
880 return (1);
881 }
882 }
883 return (0);
884 }
885
886 /*
887 * XXX The numeric constants and the loops in this routine
888 * need to be documented.
889 */
890 void
891 adv_ack_interrupt(struct adv_softc *adv)
892 {
893 u_int8_t host_flag;
894 u_int8_t risc_flag;
895 int loop;
896
897 loop = 0;
898 do {
899 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
900 if (loop++ > 0x7FFF) {
901 break;
902 }
903 } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
904
905 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
906 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
907 host_flag | ADV_HOST_FLAG_ACK_INT);
908
909 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
910 loop = 0;
911 while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
912 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
913 if (loop++ > 3) {
914 break;
915 }
916 }
917
918 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
919 }
920
921 /*
922 * Handle all conditions that may halt the chip waiting
923 * for us to intervene.
924 */
925 void
926 adv_isr_chip_halted(struct adv_softc *adv)
927 {
928 u_int16_t int_halt_code;
929 u_int16_t halt_q_addr;
930 target_bit_vector target_mask;
931 target_bit_vector scsi_busy;
932 u_int8_t halt_qp;
933 u_int8_t target_ix;
934 u_int8_t q_cntl;
935 u_int8_t tid_no;
936
937 int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
938 halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
939 halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
940 target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
941 q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
942 tid_no = ADV_TIX_TO_TID(target_ix);
943 target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
944 if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
945 /*
946 * Temporarily disable the async fix by removing
947 * this target from the list of affected targets,
948 * setting our async rate, and then putting us
949 * back into the mask.
950 */
951 adv->fix_asyn_xfer &= ~target_mask;
952 adv_set_syncrate(adv, /*struct cam_path */NULL,
953 tid_no, /*period*/0, /*offset*/0,
954 ADV_TRANS_ACTIVE);
955 adv->fix_asyn_xfer |= target_mask;
956 } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
957 adv_set_syncrate(adv, /*struct cam_path */NULL,
958 tid_no, /*period*/0, /*offset*/0,
959 ADV_TRANS_ACTIVE);
960 } else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
961 adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
962 target_mask, tid_no);
963 } else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
964 struct adv_target_transinfo* tinfo;
965 union ccb *ccb;
966 u_int32_t cinfo_index;
967 u_int8_t tag_code;
968 u_int8_t q_status;
969
970 tinfo = &adv->tinfo[tid_no];
971 q_cntl |= QC_REQ_SENSE;
972
973 /* Renegotiate if appropriate. */
974 adv_set_syncrate(adv, /*struct cam_path */NULL,
975 tid_no, /*period*/0, /*offset*/0,
976 ADV_TRANS_CUR);
977 if (tinfo->current.period != tinfo->goal.period) {
978 adv_msgout_sdtr(adv, tinfo->goal.period,
979 tinfo->goal.offset);
980 q_cntl |= QC_MSG_OUT;
981 }
982 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
983
984 /* Don't tag request sense commands */
985 tag_code = adv_read_lram_8(adv,
986 halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
987 tag_code &=
988 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
989
990 if ((adv->fix_asyn_xfer & target_mask) != 0
991 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
992 tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
993 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
994 }
995 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
996 tag_code);
997 q_status = adv_read_lram_8(adv,
998 halt_q_addr + ADV_SCSIQ_B_STATUS);
999 q_status |= (QS_READY | QS_BUSY);
1000 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1001 q_status);
1002 /*
1003 * Freeze the devq until we can handle the sense condition.
1004 */
1005 cinfo_index =
1006 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1007 ccb = adv->ccb_infos[cinfo_index].ccb;
1008 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1009 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1010 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1011 /*ccb*/NULL, CAM_REQUEUE_REQ,
1012 /*queued_only*/TRUE);
1013 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1014 scsi_busy &= ~target_mask;
1015 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1016 /*
1017 * Ensure we have enough time to actually
1018 * retrieve the sense.
1019 */
1020 untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1021 ccb->ccb_h.timeout_ch =
1022 timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1023 } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1024 struct ext_msg out_msg;
1025
1026 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1027 (u_int16_t *) &out_msg,
1028 sizeof(out_msg)/2);
1029
1030 if ((out_msg.msg_type == MSG_EXTENDED)
1031 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1032 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1033
1034 /* Revert to Async */
1035 adv_set_syncrate(adv, /*struct cam_path */NULL,
1036 tid_no, /*period*/0, /*offset*/0,
1037 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1038 }
1039 q_cntl &= ~QC_MSG_OUT;
1040 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1041 } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1042 u_int8_t scsi_status;
1043 union ccb *ccb;
1044 u_int32_t cinfo_index;
1045
1046 scsi_status = adv_read_lram_8(adv, halt_q_addr
1047 + ADV_SCSIQ_SCSI_STATUS);
1048 cinfo_index =
1049 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1050 ccb = adv->ccb_infos[cinfo_index].ccb;
1051 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1052 ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1053 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1054 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1055 /*ccb*/NULL, CAM_REQUEUE_REQ,
1056 /*queued_only*/TRUE);
1057 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1058 scsi_busy &= ~target_mask;
1059 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1060 } else {
1061 printf("Unhandled Halt Code %x\n", int_halt_code);
1062 }
1063 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1064 }
1065
1066 void
1067 adv_sdtr_to_period_offset(struct adv_softc *adv,
1068 u_int8_t sync_data, u_int8_t *period,
1069 u_int8_t *offset, int tid)
1070 {
1071 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1072 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1073 *period = *offset = 0;
1074 } else {
1075 *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1076 *offset = sync_data & 0xF;
1077 }
1078 }
1079
1080 void
1081 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1082 u_int tid, u_int period, u_int offset, u_int type)
1083 {
1084 struct adv_target_transinfo* tinfo;
1085 u_int old_period;
1086 u_int old_offset;
1087 u_int8_t sdtr_data;
1088
1089 tinfo = &adv->tinfo[tid];
1090
1091 /* Filter our input */
1092 sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1093 &offset, tid);
1094
1095 old_period = tinfo->current.period;
1096 old_offset = tinfo->current.offset;
1097
1098 if ((type & ADV_TRANS_CUR) != 0
1099 && ((old_period != period || old_offset != offset)
1100 || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1101 int s;
1102 int halted;
1103
1104 s = splcam();
1105 halted = adv_is_chip_halted(adv);
1106 if (halted == 0)
1107 /* Must halt the chip first */
1108 adv_host_req_chip_halt(adv);
1109
1110 /* Update current hardware settings */
1111 adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1112
1113 /*
1114 * If a target can run in sync mode, we don't need
1115 * to check it for sync problems.
1116 */
1117 if (offset != 0)
1118 adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1119
1120 if (halted == 0)
1121 /* Start the chip again */
1122 adv_start_chip(adv);
1123
1124 splx(s);
1125 tinfo->current.period = period;
1126 tinfo->current.offset = offset;
1127
1128 if (path != NULL) {
1129 /*
1130 * Tell the SCSI layer about the
1131 * new transfer parameters.
1132 */
1133 struct ccb_trans_settings neg;
1134
1135 neg.sync_period = period;
1136 neg.sync_offset = offset;
1137 neg.valid = CCB_TRANS_SYNC_RATE_VALID
1138 | CCB_TRANS_SYNC_OFFSET_VALID;
1139 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1140 xpt_async(AC_TRANSFER_NEG, path, &neg);
1141 }
1142 }
1143
1144 if ((type & ADV_TRANS_GOAL) != 0) {
1145 tinfo->goal.period = period;
1146 tinfo->goal.offset = offset;
1147 }
1148
1149 if ((type & ADV_TRANS_USER) != 0) {
1150 tinfo->user.period = period;
1151 tinfo->user.offset = offset;
1152 }
1153 }
1154
1155 u_int8_t
1156 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1157 u_int *offset, int tid)
1158 {
1159 u_int i;
1160 u_int dummy_offset;
1161 u_int dummy_period;
1162
1163 if (offset == NULL) {
1164 dummy_offset = 0;
1165 offset = &dummy_offset;
1166 }
1167
1168 if (period == NULL) {
1169 dummy_period = 0;
1170 period = &dummy_period;
1171 }
1172
1173 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
1174
1175 *offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1176 if (*period != 0 && *offset != 0) {
1177 for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1178 if (*period <= adv->sdtr_period_tbl[i]) {
1179 /*
1180 * When responding to a target that requests
1181 * sync, the requested rate may fall between
1182 * two rates that we can output, but still be
1183 * a rate that we can receive. Because of this,
1184 * we want to respond to the target with
1185 * the same rate that it sent to us even
1186 * if the period we use to send data to it
1187 * is lower. Only lower the response period
1188 * if we must.
1189 */
1190 if (i == 0 /* Our maximum rate */)
1191 *period = adv->sdtr_period_tbl[0];
1192 return ((i << 4) | *offset);
1193 }
1194 }
1195 }
1196
1197 /* Must go async */
1198 *period = 0;
1199 *offset = 0;
1200 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1201 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1202 return (0);
1203 }
1204
1205 /* Internal Routines */
1206
1207 static void
1208 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1209 u_int16_t *buffer, int count)
1210 {
1211 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1212 ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1213 }
1214
1215 static void
1216 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1217 u_int16_t *buffer, int count)
1218 {
1219 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1220 ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1221 }
1222
1223 static void
1224 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1225 u_int16_t set_value, int count)
1226 {
1227 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1228 bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1229 set_value, count);
1230 }
1231
1232 static u_int32_t
1233 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1234 {
1235 u_int32_t sum;
1236 int i;
1237
1238 sum = 0;
1239 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1240 for (i = 0; i < count; i++)
1241 sum += ADV_INW(adv, ADV_LRAM_DATA);
1242 return (sum);
1243 }
1244
1245 static int
1246 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1247 u_int16_t value)
1248 {
1249 int retval;
1250
1251 retval = 0;
1252 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1253 ADV_OUTW(adv, ADV_LRAM_DATA, value);
1254 DELAY(10000);
1255 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1256 if (value != ADV_INW(adv, ADV_LRAM_DATA))
1257 retval = 1;
1258 return (retval);
1259 }
1260
1261 static u_int32_t
1262 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1263 {
1264 u_int16_t val_low, val_high;
1265
1266 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1267
1268 #if BYTE_ORDER == BIG_ENDIAN
1269 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1270 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1271 #else
1272 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1273 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1274 #endif
1275
1276 return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1277 }
1278
1279 static void
1280 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1281 {
1282 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1283
1284 #if BYTE_ORDER == BIG_ENDIAN
1285 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1286 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1287 #else
1288 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1289 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1290 #endif
1291 }
1292
1293 static void
1294 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1295 u_int32_t *buffer, int count)
1296 {
1297 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1298 ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1299 }
1300
1301 static u_int16_t
1302 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1303 {
1304 u_int16_t read_wval;
1305 u_int8_t cmd_reg;
1306
1307 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1308 DELAY(1000);
1309 cmd_reg = addr | ADV_EEPROM_CMD_READ;
1310 adv_write_eeprom_cmd_reg(adv, cmd_reg);
1311 DELAY(1000);
1312 read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1313 DELAY(1000);
1314 return (read_wval);
1315 }
1316
1317 static u_int16_t
1318 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1319 {
1320 u_int16_t read_value;
1321
1322 read_value = adv_read_eeprom_16(adv, addr);
1323 if (read_value != value) {
1324 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1325 DELAY(1000);
1326
1327 ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1328 DELAY(1000);
1329
1330 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1331 DELAY(20 * 1000);
1332
1333 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1334 DELAY(1000);
1335 read_value = adv_read_eeprom_16(adv, addr);
1336 }
1337 return (read_value);
1338 }
1339
1340 static int
1341 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1342 {
1343 u_int8_t read_back;
1344 int retry;
1345
1346 retry = 0;
1347 while (1) {
1348 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1349 DELAY(1000);
1350 read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1351 if (read_back == cmd_reg) {
1352 return (1);
1353 }
1354 if (retry++ > ADV_EEPROM_MAX_RETRY) {
1355 return (0);
1356 }
1357 }
1358 }
1359
1360 static int
1361 adv_set_eeprom_config_once(struct adv_softc *adv,
1362 struct adv_eeprom_config *eeprom_config)
1363 {
1364 int n_error;
1365 u_int16_t *wbuf;
1366 u_int16_t sum;
1367 u_int8_t s_addr;
1368 u_int8_t cfg_beg;
1369 u_int8_t cfg_end;
1370
1371 wbuf = (u_int16_t *)eeprom_config;
1372 n_error = 0;
1373 sum = 0;
1374 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1375 sum += *wbuf;
1376 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1377 n_error++;
1378 }
1379 }
1380 if (adv->type & ADV_VL) {
1381 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1382 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1383 } else {
1384 cfg_beg = ADV_EEPROM_CFG_BEG;
1385 cfg_end = ADV_EEPROM_MAX_ADDR;
1386 }
1387
1388 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1389 sum += *wbuf;
1390 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1391 n_error++;
1392 }
1393 }
1394 *wbuf = sum;
1395 if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1396 n_error++;
1397 }
1398 wbuf = (u_int16_t *)eeprom_config;
1399 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1400 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1401 n_error++;
1402 }
1403 }
1404 for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1405 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1406 n_error++;
1407 }
1408 }
1409 return (n_error);
1410 }
1411
1412 static u_int32_t
1413 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1414 u_int16_t *mcode_buf, u_int16_t mcode_size)
1415 {
1416 u_int32_t chksum;
1417 u_int16_t mcode_lram_size;
1418 u_int16_t mcode_chksum;
1419
1420 mcode_lram_size = mcode_size >> 1;
1421 /* XXX Why zero the memory just before you write the whole thing?? */
1422 adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1423 adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1424
1425 chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1426 mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1427 ((mcode_size - s_addr
1428 - ADV_CODE_SEC_BEG) >> 1));
1429 adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1430 adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1431 return (chksum);
1432 }
1433
1434 static void
1435 adv_reinit_lram(struct adv_softc *adv) {
1436 adv_init_lram(adv);
1437 adv_init_qlink_var(adv);
1438 }
1439
1440 static void
1441 adv_init_lram(struct adv_softc *adv)
1442 {
1443 u_int8_t i;
1444 u_int16_t s_addr;
1445
1446 adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1447 (((adv->max_openings + 2 + 1) * 64) >> 1));
1448
1449 i = ADV_MIN_ACTIVE_QNO;
1450 s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1451
1452 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1453 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1454 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1455 i++;
1456 s_addr += ADV_QBLK_SIZE;
1457 for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1458 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1459 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1460 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1461 }
1462
1463 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1464 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1465 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1466 i++;
1467 s_addr += ADV_QBLK_SIZE;
1468
1469 for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1470 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1471 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1472 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1473 }
1474 }
1475
1476 static int
1477 adv_init_microcode_var(struct adv_softc *adv)
1478 {
1479 int i;
1480
1481 for (i = 0; i <= ADV_MAX_TID; i++) {
1482
1483 /* Start out async all around */
1484 adv_set_syncrate(adv, /*path*/NULL,
1485 i, 0, 0,
1486 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1487 }
1488
1489 adv_init_qlink_var(adv);
1490
1491 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1492 adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1493
1494 adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1495
1496 adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1497
1498 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1499 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1500 printf("adv%d: Unable to set program counter. Aborting.\n",
1501 adv->unit);
1502 return (1);
1503 }
1504 return (0);
1505 }
1506
1507 static void
1508 adv_init_qlink_var(struct adv_softc *adv)
1509 {
1510 int i;
1511 u_int16_t lram_addr;
1512
1513 adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1514 adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1515
1516 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1517 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1518
1519 adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1520 (u_int8_t)((int) adv->max_openings + 1));
1521 adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1522 (u_int8_t)((int) adv->max_openings + 2));
1523
1524 adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1525
1526 adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1527 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1528 adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1529 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1530 adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1531 adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1532
1533 lram_addr = ADV_QADR_BEG;
1534 for (i = 0; i < 32; i++, lram_addr += 2)
1535 adv_write_lram_16(adv, lram_addr, 0);
1536 }
1537
1538 static void
1539 adv_disable_interrupt(struct adv_softc *adv)
1540 {
1541 u_int16_t cfg;
1542
1543 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1544 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1545 }
1546
1547 static void
1548 adv_enable_interrupt(struct adv_softc *adv)
1549 {
1550 u_int16_t cfg;
1551
1552 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1553 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1554 }
1555
1556 static void
1557 adv_toggle_irq_act(struct adv_softc *adv)
1558 {
1559 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1560 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1561 }
1562
1563 void
1564 adv_start_execution(struct adv_softc *adv)
1565 {
1566 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1567 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1568 }
1569 }
1570
1571 int
1572 adv_stop_chip(struct adv_softc *adv)
1573 {
1574 u_int8_t cc_val;
1575
1576 cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1577 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1578 ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1579 adv_set_chip_ih(adv, ADV_INS_HALT);
1580 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1581 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1582 return (0);
1583 }
1584 return (1);
1585 }
1586
1587 static int
1588 adv_host_req_chip_halt(struct adv_softc *adv)
1589 {
1590 int count;
1591 u_int8_t saved_stop_code;
1592
1593 if (adv_is_chip_halted(adv))
1594 return (1);
1595
1596 count = 0;
1597 saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1598 adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1599 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1600 while (adv_is_chip_halted(adv) == 0
1601 && count++ < 2000)
1602 ;
1603
1604 adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1605 return (count < 2000);
1606 }
1607
1608 static void
1609 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1610 {
1611 adv_set_bank(adv, 1);
1612 ADV_OUTW(adv, ADV_REG_IH, ins_code);
1613 adv_set_bank(adv, 0);
1614 }
1615
1616 #if UNUSED
1617 static u_int8_t
1618 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1619 {
1620 u_int8_t scsi_ctrl;
1621
1622 adv_set_bank(adv, 1);
1623 scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1624 adv_set_bank(adv, 0);
1625 return (scsi_ctrl);
1626 }
1627 #endif
1628
1629 /*
1630 * XXX Looks like more padding issues in this routine as well.
1631 * There has to be a way to turn this into an insw.
1632 */
1633 static void
1634 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1635 u_int16_t *inbuf, int words)
1636 {
1637 int i;
1638
1639 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1640 for (i = 0; i < words; i++, inbuf++) {
1641 if (i == 5) {
1642 continue;
1643 }
1644 *inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1645 }
1646 }
1647
1648 static u_int
1649 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1650 {
1651 u_int cur_used_qs;
1652 u_int cur_free_qs;
1653
1654 cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1655
1656 if ((cur_used_qs + n_qs) <= adv->max_openings) {
1657 cur_free_qs = adv->max_openings - cur_used_qs;
1658 return (cur_free_qs);
1659 }
1660 adv->openings_needed = n_qs;
1661 return (0);
1662 }
1663
1664 static u_int8_t
1665 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1666 u_int8_t n_free_q)
1667 {
1668 int i;
1669
1670 for (i = 0; i < n_free_q; i++) {
1671 free_q_head = adv_alloc_free_queue(adv, free_q_head);
1672 if (free_q_head == ADV_QLINK_END)
1673 break;
1674 }
1675 return (free_q_head);
1676 }
1677
1678 static u_int8_t
1679 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1680 {
1681 u_int16_t q_addr;
1682 u_int8_t next_qp;
1683 u_int8_t q_status;
1684
1685 next_qp = ADV_QLINK_END;
1686 q_addr = ADV_QNO_TO_QADDR(free_q_head);
1687 q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS);
1688
1689 if ((q_status & QS_READY) == 0)
1690 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1691
1692 return (next_qp);
1693 }
1694
1695 static int
1696 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1697 u_int8_t n_q_required)
1698 {
1699 u_int8_t free_q_head;
1700 u_int8_t next_qp;
1701 u_int8_t tid_no;
1702 u_int8_t target_ix;
1703 int retval;
1704
1705 retval = 1;
1706 target_ix = scsiq->q2.target_ix;
1707 tid_no = ADV_TIX_TO_TID(target_ix);
1708 free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1709 if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1710 != ADV_QLINK_END) {
1711 scsiq->q1.q_no = free_q_head;
1712
1713 /*
1714 * Now that we know our Q number, point our sense
1715 * buffer pointer to a bus dma mapped area where
1716 * we can dma the data to.
1717 */
1718 scsiq->q1.sense_addr = adv->sense_physbase
1719 + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1720 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1721 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1722 adv->cur_active += n_q_required;
1723 retval = 0;
1724 }
1725 return (retval);
1726 }
1727
1728
1729 static void
1730 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1731 u_int q_no)
1732 {
1733 u_int8_t sg_list_dwords;
1734 u_int8_t sg_index, i;
1735 u_int8_t sg_entry_cnt;
1736 u_int8_t next_qp;
1737 u_int16_t q_addr;
1738 struct adv_sg_head *sg_head;
1739 struct adv_sg_list_q scsi_sg_q;
1740
1741 sg_head = scsiq->sg_head;
1742
1743 if (sg_head) {
1744 sg_entry_cnt = sg_head->entry_cnt - 1;
1745 #ifdef DIAGNOSTIC
1746 if (sg_entry_cnt == 0)
1747 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1748 "a SG list but only one element");
1749 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1750 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1751 "a SG list but QC_SG_HEAD not set");
1752 #endif
1753 q_addr = ADV_QNO_TO_QADDR(q_no);
1754 sg_index = 1;
1755 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1756 scsi_sg_q.sg_head_qp = q_no;
1757 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1758 for (i = 0; i < sg_head->queue_cnt; i++) {
1759 u_int8_t segs_this_q;
1760
1761 if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1762 segs_this_q = ADV_SG_LIST_PER_Q;
1763 else {
1764 /* This will be the last segment then */
1765 segs_this_q = sg_entry_cnt;
1766 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1767 }
1768 scsi_sg_q.seq_no = i + 1;
1769 sg_list_dwords = segs_this_q << 1;
1770 if (i == 0) {
1771 scsi_sg_q.sg_list_cnt = segs_this_q;
1772 scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1773 } else {
1774 scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1775 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1776 }
1777 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1778 scsi_sg_q.q_no = next_qp;
1779 q_addr = ADV_QNO_TO_QADDR(next_qp);
1780
1781 adv_write_lram_16_multi(adv,
1782 q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1783 (u_int16_t *)&scsi_sg_q,
1784 sizeof(scsi_sg_q) >> 1);
1785 adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1786 (u_int32_t *)&sg_head->sg_list[sg_index],
1787 sg_list_dwords);
1788 sg_entry_cnt -= segs_this_q;
1789 sg_index += ADV_SG_LIST_PER_Q;
1790 }
1791 }
1792 adv_put_ready_queue(adv, scsiq, q_no);
1793 }
1794
1795 static void
1796 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1797 u_int q_no)
1798 {
1799 struct adv_target_transinfo* tinfo;
1800 u_int q_addr;
1801 u_int tid_no;
1802
1803 tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1804 tinfo = &adv->tinfo[tid_no];
1805 if ((tinfo->current.period != tinfo->goal.period)
1806 || (tinfo->current.offset != tinfo->goal.offset)) {
1807
1808 adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1809 scsiq->q1.cntl |= QC_MSG_OUT;
1810 }
1811 q_addr = ADV_QNO_TO_QADDR(q_no);
1812
1813 scsiq->q1.status = QS_FREE;
1814
1815 adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1816 (u_int16_t *)scsiq->cdbptr,
1817 scsiq->q2.cdb_len >> 1);
1818
1819 #if BYTE_ORDER == BIG_ENDIAN
1820 adv_adj_scsiq_endian(scsiq);
1821 #endif
1822
1823 adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1824 (u_int16_t *) &scsiq->q1.cntl,
1825 ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1826
1827 #if CC_WRITE_IO_COUNT
1828 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1829 adv->req_count);
1830 #endif
1831
1832 #if CC_CLEAR_DMA_REMAIN
1833
1834 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1835 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1836 #endif
1837
1838 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1839 (scsiq->q1.q_no << 8) | QS_READY);
1840 }
1841
1842 static void
1843 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1844 u_int16_t *buffer, int words)
1845 {
1846 int i;
1847
1848 /*
1849 * XXX This routine makes *gross* assumptions
1850 * about padding in the data structures.
1851 * Either the data structures should have explicit
1852 * padding members added, or they should have padding
1853 * turned off via compiler attributes depending on
1854 * which yields better overall performance. My hunch
1855 * would be that turning off padding would be the
1856 * faster approach as an outsw is much faster than
1857 * this crude loop and accessing un-aligned data
1858 * members isn't *that* expensive. The other choice
1859 * would be to modify the ASC script so that the
1860 * the adv_scsiq_1 structure can be re-arranged so
1861 * padding isn't required.
1862 */
1863 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1864 for (i = 0; i < words; i++, buffer++) {
1865 if (i == 2 || i == 10) {
1866 continue;
1867 }
1868 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1869 }
1870 }
1871
1872 static void
1873 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1874 u_int8_t q_cntl, target_bit_vector target_mask,
1875 int tid_no)
1876 {
1877 struct ext_msg ext_msg;
1878
1879 adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1880 sizeof(ext_msg) >> 1);
1881 if ((ext_msg.msg_type == MSG_EXTENDED)
1882 && (ext_msg.msg_req == MSG_EXT_SDTR)
1883 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1884 union ccb *ccb;
1885 struct adv_target_transinfo* tinfo;
1886 u_int32_t cinfo_index;
1887 u_int period;
1888 u_int offset;
1889 int sdtr_accept;
1890 u_int8_t orig_offset;
1891
1892 cinfo_index =
1893 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1894 ccb = adv->ccb_infos[cinfo_index].ccb;
1895 tinfo = &adv->tinfo[tid_no];
1896 sdtr_accept = TRUE;
1897
1898 orig_offset = ext_msg.req_ack_offset;
1899 if (ext_msg.xfer_period < tinfo->goal.period) {
1900 sdtr_accept = FALSE;
1901 ext_msg.xfer_period = tinfo->goal.period;
1902 }
1903
1904 /* Perform range checking */
1905 period = ext_msg.xfer_period;
1906 offset = ext_msg.req_ack_offset;
1907 adv_period_offset_to_sdtr(adv, &period, &offset, tid_no);
1908 ext_msg.xfer_period = period;
1909 ext_msg.req_ack_offset = offset;
1910
1911 /* Record our current sync settings */
1912 adv_set_syncrate(adv, ccb->ccb_h.path,
1913 tid_no, ext_msg.xfer_period,
1914 ext_msg.req_ack_offset,
1915 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1916
1917 /* Offset too high or large period forced async */
1918 if (orig_offset != ext_msg.req_ack_offset)
1919 sdtr_accept = FALSE;
1920
1921 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1922 /* Valid response to our requested negotiation */
1923 q_cntl &= ~QC_MSG_OUT;
1924 } else {
1925 /* Must Respond */
1926 q_cntl |= QC_MSG_OUT;
1927 adv_msgout_sdtr(adv, ext_msg.xfer_period,
1928 ext_msg.req_ack_offset);
1929 }
1930
1931 } else if (ext_msg.msg_type == MSG_EXTENDED
1932 && ext_msg.msg_req == MSG_EXT_WDTR
1933 && ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1934
1935 ext_msg.wdtr_width = 0;
1936 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1937 (u_int16_t *)&ext_msg,
1938 sizeof(ext_msg) >> 1);
1939 q_cntl |= QC_MSG_OUT;
1940 } else {
1941
1942 ext_msg.msg_type = MSG_MESSAGE_REJECT;
1943 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1944 (u_int16_t *)&ext_msg,
1945 sizeof(ext_msg) >> 1);
1946 q_cntl |= QC_MSG_OUT;
1947 }
1948 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1949 }
1950
1951 static void
1952 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1953 u_int8_t sdtr_offset)
1954 {
1955 struct ext_msg sdtr_buf;
1956
1957 sdtr_buf.msg_type = MSG_EXTENDED;
1958 sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1959 sdtr_buf.msg_req = MSG_EXT_SDTR;
1960 sdtr_buf.xfer_period = sdtr_period;
1961 sdtr_offset &= ADV_SYN_MAX_OFFSET;
1962 sdtr_buf.req_ack_offset = sdtr_offset;
1963 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1964 (u_int16_t *) &sdtr_buf,
1965 sizeof(sdtr_buf) / 2);
1966 }
1967
1968 int
1969 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1970 u_int32_t status, int queued_only)
1971 {
1972 u_int16_t q_addr;
1973 u_int8_t q_no;
1974 struct adv_q_done_info scsiq_buf;
1975 struct adv_q_done_info *scsiq;
1976 u_int8_t target_ix;
1977 int count;
1978
1979 scsiq = &scsiq_buf;
1980 target_ix = ADV_TIDLUN_TO_IX(target, lun);
1981 count = 0;
1982 for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1983 struct adv_ccb_info *ccb_info;
1984 q_addr = ADV_QNO_TO_QADDR(q_no);
1985
1986 adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1987 ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1988 if (((scsiq->q_status & QS_READY) != 0)
1989 && ((scsiq->q_status & QS_ABORTED) == 0)
1990 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1991 && (scsiq->d2.target_ix == target_ix)
1992 && (queued_only == 0
1993 || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1994 && (ccb == NULL || (ccb == ccb_info->ccb))) {
1995 union ccb *aborted_ccb;
1996 struct adv_ccb_info *cinfo;
1997
1998 scsiq->q_status |= QS_ABORTED;
1999 adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2000 scsiq->q_status);
2001 aborted_ccb = ccb_info->ccb;
2002 /* Don't clobber earlier error codes */
2003 if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2004 == CAM_REQ_INPROG)
2005 aborted_ccb->ccb_h.status |= status;
2006 cinfo = (struct adv_ccb_info *)
2007 aborted_ccb->ccb_h.ccb_cinfo_ptr;
2008 cinfo->state |= ACCB_ABORT_QUEUED;
2009 count++;
2010 }
2011 }
2012 return (count);
2013 }
2014
2015 int
2016 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2017 {
2018 int count;
2019 int i;
2020 union ccb *ccb;
2021
2022 i = 200;
2023 while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2024 && i--)
2025 DELAY(1000);
2026 adv_reset_chip(adv, initiate_bus_reset);
2027 adv_reinit_lram(adv);
2028 for (i = 0; i <= ADV_MAX_TID; i++)
2029 adv_set_syncrate(adv, NULL, i, /*period*/0,
2030 /*offset*/0, ADV_TRANS_CUR);
2031 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2032
2033 /* Tell the XPT layer that a bus reset occured */
2034 if (adv->path != NULL)
2035 xpt_async(AC_BUS_RESET, adv->path, NULL);
2036
2037 count = 0;
2038 while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2039 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2040 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2041 adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2042 count++;
2043 }
2044
2045 adv_start_chip(adv);
2046 return (count);
2047 }
2048
2049 static void
2050 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2051 {
2052 int orig_id;
2053
2054 adv_set_bank(adv, 1);
2055 orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2056 ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2057 if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2058 adv_set_bank(adv, 0);
2059 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2060 }
2061 adv_set_bank(adv, 1);
2062 ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2063 adv_set_bank(adv, 0);
2064 }
Cache object: a06333fdbfe50963b995b827a9fd4ead
|