1 /*
2 * CAM SCSI interface for the the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
4 *
5 * Product specific probe and attach routines can be found in:
6 *
7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W
8 *
9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD$
34 */
35 /*
36 * Ported from:
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
38 *
39 * Copyright (c) 1995-1998 Advanced System Products, Inc.
40 * All Rights Reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
45 * modification.
46 */
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/bus.h>
53
54 #include <machine/bus_pio.h>
55 #include <machine/bus_memio.h>
56 #include <machine/bus.h>
57 #include <machine/clock.h>
58 #include <machine/resource.h>
59
60 #include <sys/rman.h>
61
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_sim.h>
65 #include <cam/cam_xpt_sim.h>
66 #include <cam/cam_debug.h>
67
68 #include <cam/scsi/scsi_message.h>
69
70 #include <dev/advansys/adwvar.h>
71
72 /* Definitions for our use of the SIM private CCB area */
73 #define ccb_acb_ptr spriv_ptr0
74 #define ccb_adw_ptr spriv_ptr1
75
76 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
77
78 u_long adw_unit;
79
80 static __inline cam_status adwccbstatus(union ccb*);
81 static __inline struct acb* adwgetacb(struct adw_softc *adw);
82 static __inline void adwfreeacb(struct adw_softc *adw,
83 struct acb *acb);
84
85 static void adwmapmem(void *arg, bus_dma_segment_t *segs,
86 int nseg, int error);
87 static struct sg_map_node*
88 adwallocsgmap(struct adw_softc *adw);
89 static int adwallocacbs(struct adw_softc *adw);
90
91 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
92 int nseg, int error);
93 static void adw_action(struct cam_sim *sim, union ccb *ccb);
94 static void adw_poll(struct cam_sim *sim);
95 static void adw_async(void *callback_arg, u_int32_t code,
96 struct cam_path *path, void *arg);
97 static void adwprocesserror(struct adw_softc *adw, struct acb *acb);
98 static void adwtimeout(void *arg);
99 static void adw_handle_device_reset(struct adw_softc *adw,
100 u_int target);
101 static void adw_handle_bus_reset(struct adw_softc *adw,
102 int initiated);
103
104 static __inline cam_status
105 adwccbstatus(union ccb* ccb)
106 {
107 return (ccb->ccb_h.status & CAM_STATUS_MASK);
108 }
109
110 static __inline struct acb*
111 adwgetacb(struct adw_softc *adw)
112 {
113 struct acb* acb;
114 int s;
115
116 s = splcam();
117 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
118 SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
119 } else if (adw->num_acbs < adw->max_acbs) {
120 adwallocacbs(adw);
121 acb = SLIST_FIRST(&adw->free_acb_list);
122 if (acb == NULL)
123 printf("%s: Can't malloc ACB\n", adw_name(adw));
124 else {
125 SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
126 }
127 }
128 splx(s);
129
130 return (acb);
131 }
132
133 static __inline void
134 adwfreeacb(struct adw_softc *adw, struct acb *acb)
135 {
136 int s;
137
138 s = splcam();
139 if ((acb->state & ACB_ACTIVE) != 0)
140 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
141 if ((acb->state & ACB_RELEASE_SIMQ) != 0)
142 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
143 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
144 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
145 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
146 adw->state &= ~ADW_RESOURCE_SHORTAGE;
147 }
148 acb->state = ACB_FREE;
149 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
150 splx(s);
151 }
152
153 static void
154 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
155 {
156 bus_addr_t *busaddrp;
157
158 busaddrp = (bus_addr_t *)arg;
159 *busaddrp = segs->ds_addr;
160 }
161
162 static struct sg_map_node *
163 adwallocsgmap(struct adw_softc *adw)
164 {
165 struct sg_map_node *sg_map;
166
167 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
168
169 if (sg_map == NULL)
170 return (NULL);
171
172 /* Allocate S/G space for the next batch of ACBS */
173 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
174 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
175 free(sg_map, M_DEVBUF);
176 return (NULL);
177 }
178
179 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
180
181 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
182 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
183
184 bzero(sg_map->sg_vaddr, PAGE_SIZE);
185 return (sg_map);
186 }
187
188 /*
189 * Allocate another chunk of CCB's. Return count of entries added.
190 * Assumed to be called at splcam().
191 */
192 static int
193 adwallocacbs(struct adw_softc *adw)
194 {
195 struct acb *next_acb;
196 struct sg_map_node *sg_map;
197 bus_addr_t busaddr;
198 struct adw_sg_block *blocks;
199 int newcount;
200 int i;
201
202 next_acb = &adw->acbs[adw->num_acbs];
203 sg_map = adwallocsgmap(adw);
204
205 if (sg_map == NULL)
206 return (0);
207
208 blocks = sg_map->sg_vaddr;
209 busaddr = sg_map->sg_physaddr;
210
211 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
212 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
213 int error;
214
215 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
216 &next_acb->dmamap);
217 if (error != 0)
218 break;
219 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb);
220 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb);
221 next_acb->queue.sense_baddr =
222 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data);
223 next_acb->sg_blocks = blocks;
224 next_acb->sg_busaddr = busaddr;
225 next_acb->state = ACB_FREE;
226 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
227 blocks += ADW_SG_BLOCKCNT;
228 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
229 next_acb++;
230 adw->num_acbs++;
231 }
232 return (i);
233 }
234
235 static void
236 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
237 {
238 struct acb *acb;
239 union ccb *ccb;
240 struct adw_softc *adw;
241 int s;
242
243 acb = (struct acb *)arg;
244 ccb = acb->ccb;
245 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
246
247 if (error != 0) {
248 if (error != EFBIG)
249 printf("%s: Unexepected error 0x%x returned from "
250 "bus_dmamap_load\n", adw_name(adw), error);
251 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
252 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
253 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
254 }
255 adwfreeacb(adw, acb);
256 xpt_done(ccb);
257 return;
258 }
259
260 if (nseg != 0) {
261 bus_dmasync_op_t op;
262
263 acb->queue.data_addr = dm_segs[0].ds_addr;
264 acb->queue.data_cnt = ccb->csio.dxfer_len;
265 if (nseg > 1) {
266 struct adw_sg_block *sg_block;
267 struct adw_sg_elm *sg;
268 bus_addr_t sg_busaddr;
269 u_int sg_index;
270 bus_dma_segment_t *end_seg;
271
272 end_seg = dm_segs + nseg;
273
274 sg_busaddr = acb->sg_busaddr;
275 sg_index = 0;
276 /* Copy the segments into our SG list */
277 for (sg_block = acb->sg_blocks;; sg_block++) {
278 u_int i;
279
280 sg = sg_block->sg_list;
281 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) {
282 if (dm_segs >= end_seg)
283 break;
284
285 sg->sg_addr = dm_segs->ds_addr;
286 sg->sg_count = dm_segs->ds_len;
287 sg++;
288 dm_segs++;
289 }
290 sg_block->sg_cnt = i;
291 sg_index += i;
292 if (dm_segs == end_seg) {
293 sg_block->sg_busaddr_next = 0;
294 break;
295 } else {
296 sg_busaddr +=
297 sizeof(struct adw_sg_block);
298 sg_block->sg_busaddr_next = sg_busaddr;
299 }
300 }
301 acb->queue.sg_real_addr = acb->sg_busaddr;
302 } else {
303 acb->queue.sg_real_addr = 0;
304 }
305
306 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
307 op = BUS_DMASYNC_PREREAD;
308 else
309 op = BUS_DMASYNC_PREWRITE;
310
311 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
312
313 } else {
314 acb->queue.data_addr = 0;
315 acb->queue.data_cnt = 0;
316 acb->queue.sg_real_addr = 0;
317 }
318
319 s = splcam();
320
321 /*
322 * Last time we need to check if this CCB needs to
323 * be aborted.
324 */
325 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
326 if (nseg != 0)
327 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
328 adwfreeacb(adw, acb);
329 xpt_done(ccb);
330 splx(s);
331 return;
332 }
333
334 acb->state |= ACB_ACTIVE;
335 ccb->ccb_h.status |= CAM_SIM_QUEUED;
336 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
337 ccb->ccb_h.timeout_ch =
338 timeout(adwtimeout, (caddr_t)acb,
339 (ccb->ccb_h.timeout * hz) / 1000);
340
341 adw_send_acb(adw, acb, acbvtob(adw, acb));
342
343 splx(s);
344 }
345
346 static void
347 adw_action(struct cam_sim *sim, union ccb *ccb)
348 {
349 struct adw_softc *adw;
350
351 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
352
353 adw = (struct adw_softc *)cam_sim_softc(sim);
354
355 switch (ccb->ccb_h.func_code) {
356 /* Common cases first */
357 case XPT_SCSI_IO: /* Execute the requested I/O operation */
358 {
359 struct ccb_scsiio *csio;
360 struct ccb_hdr *ccbh;
361 struct acb *acb;
362
363 csio = &ccb->csio;
364 ccbh = &ccb->ccb_h;
365
366 /* Max supported CDB length is 12 bytes */
367 if (csio->cdb_len > 12) {
368 ccb->ccb_h.status = CAM_REQ_INVALID;
369 xpt_done(ccb);
370 return;
371 }
372
373 if ((acb = adwgetacb(adw)) == NULL) {
374 int s;
375
376 s = splcam();
377 adw->state |= ADW_RESOURCE_SHORTAGE;
378 splx(s);
379 xpt_freeze_simq(sim, /*count*/1);
380 ccb->ccb_h.status = CAM_REQUEUE_REQ;
381 xpt_done(ccb);
382 return;
383 }
384
385 /* Link acb and ccb so we can find one from the other */
386 acb->ccb = ccb;
387 ccb->ccb_h.ccb_acb_ptr = acb;
388 ccb->ccb_h.ccb_adw_ptr = adw;
389
390 acb->queue.cntl = 0;
391 acb->queue.target_cmd = 0;
392 acb->queue.target_id = ccb->ccb_h.target_id;
393 acb->queue.target_lun = ccb->ccb_h.target_lun;
394
395 acb->queue.mflag = 0;
396 acb->queue.sense_len =
397 MIN(csio->sense_len, sizeof(acb->sense_data));
398 acb->queue.cdb_len = csio->cdb_len;
399 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
400 switch (csio->tag_action) {
401 case MSG_SIMPLE_Q_TAG:
402 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG;
403 break;
404 case MSG_HEAD_OF_Q_TAG:
405 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG;
406 break;
407 case MSG_ORDERED_Q_TAG:
408 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG;
409 break;
410 default:
411 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
412 break;
413 }
414 } else
415 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
416
417 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
418 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC;
419
420 acb->queue.done_status = 0;
421 acb->queue.scsi_status = 0;
422 acb->queue.host_status = 0;
423 acb->queue.sg_wk_ix = 0;
424 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
425 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
426 bcopy(csio->cdb_io.cdb_ptr,
427 acb->queue.cdb, csio->cdb_len);
428 } else {
429 /* I guess I could map it in... */
430 ccb->ccb_h.status = CAM_REQ_INVALID;
431 adwfreeacb(adw, acb);
432 xpt_done(ccb);
433 return;
434 }
435 } else {
436 bcopy(csio->cdb_io.cdb_bytes,
437 acb->queue.cdb, csio->cdb_len);
438 }
439
440 /*
441 * If we have any data to send with this command,
442 * map it into bus space.
443 */
444 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
445 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
446 /*
447 * We've been given a pointer
448 * to a single buffer.
449 */
450 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
451 int s;
452 int error;
453
454 s = splsoftvm();
455 error =
456 bus_dmamap_load(adw->buffer_dmat,
457 acb->dmamap,
458 csio->data_ptr,
459 csio->dxfer_len,
460 adwexecuteacb,
461 acb, /*flags*/0);
462 if (error == EINPROGRESS) {
463 /*
464 * So as to maintain ordering,
465 * freeze the controller queue
466 * until our mapping is
467 * returned.
468 */
469 xpt_freeze_simq(sim, 1);
470 acb->state |= CAM_RELEASE_SIMQ;
471 }
472 splx(s);
473 } else {
474 struct bus_dma_segment seg;
475
476 /* Pointer to physical buffer */
477 seg.ds_addr =
478 (bus_addr_t)csio->data_ptr;
479 seg.ds_len = csio->dxfer_len;
480 adwexecuteacb(acb, &seg, 1, 0);
481 }
482 } else {
483 struct bus_dma_segment *segs;
484
485 if ((ccbh->flags & CAM_DATA_PHYS) != 0)
486 panic("adw_action - Physical "
487 "segment pointers "
488 "unsupported");
489
490 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
491 panic("adw_action - Virtual "
492 "segment addresses "
493 "unsupported");
494
495 /* Just use the segments provided */
496 segs = (struct bus_dma_segment *)csio->data_ptr;
497 adwexecuteacb(acb, segs, csio->sglist_cnt,
498 (csio->sglist_cnt < ADW_SGSIZE)
499 ? 0 : EFBIG);
500 }
501 } else {
502 adwexecuteacb(acb, NULL, 0, 0);
503 }
504 break;
505 }
506 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
507 {
508 adw_idle_cmd_status_t status;
509
510 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
511 ccb->ccb_h.target_id);
512 if (status == ADW_IDLE_CMD_SUCCESS) {
513 ccb->ccb_h.status = CAM_REQ_CMP;
514 if (bootverbose) {
515 xpt_print_path(ccb->ccb_h.path);
516 printf("BDR Delivered\n");
517 }
518 } else
519 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
520 xpt_done(ccb);
521 break;
522 }
523 case XPT_ABORT: /* Abort the specified CCB */
524 /* XXX Implement */
525 ccb->ccb_h.status = CAM_REQ_INVALID;
526 xpt_done(ccb);
527 break;
528 case XPT_SET_TRAN_SETTINGS:
529 {
530 struct ccb_trans_settings *cts;
531 u_int target_mask;
532 int s;
533
534 cts = &ccb->cts;
535 target_mask = 0x01 << ccb->ccb_h.target_id;
536
537 s = splcam();
538 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
539 u_int sdtrdone;
540
541 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE);
542 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
543 u_int discenb;
544
545 discenb =
546 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
547
548 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
549 discenb |= target_mask;
550 else
551 discenb &= ~target_mask;
552
553 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
554 discenb);
555 }
556
557 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
558
559 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
560 adw->tagenb |= target_mask;
561 else
562 adw->tagenb &= ~target_mask;
563 }
564
565 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
566 u_int wdtrenb_orig;
567 u_int wdtrenb;
568 u_int wdtrdone;
569
570 wdtrenb_orig =
571 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
572 wdtrenb = wdtrenb_orig;
573 wdtrdone = adw_lram_read_16(adw,
574 ADW_MC_WDTR_DONE);
575 switch (cts->bus_width) {
576 case MSG_EXT_WDTR_BUS_32_BIT:
577 case MSG_EXT_WDTR_BUS_16_BIT:
578 wdtrenb |= target_mask;
579 break;
580 case MSG_EXT_WDTR_BUS_8_BIT:
581 default:
582 wdtrenb &= ~target_mask;
583 break;
584 }
585 if (wdtrenb != wdtrenb_orig) {
586 adw_lram_write_16(adw,
587 ADW_MC_WDTR_ABLE,
588 wdtrenb);
589 wdtrdone &= ~target_mask;
590 adw_lram_write_16(adw,
591 ADW_MC_WDTR_DONE,
592 wdtrdone);
593 /* Wide negotiation forces async */
594 sdtrdone &= ~target_mask;
595 adw_lram_write_16(adw,
596 ADW_MC_SDTR_DONE,
597 sdtrdone);
598 }
599 }
600
601 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
602 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
603 u_int sdtr_orig;
604 u_int sdtr;
605 u_int sdtrable_orig;
606 u_int sdtrable;
607
608 sdtr = adw_get_chip_sdtr(adw,
609 ccb->ccb_h.target_id);
610 sdtr_orig = sdtr;
611 sdtrable = adw_lram_read_16(adw,
612 ADW_MC_SDTR_ABLE);
613 sdtrable_orig = sdtrable;
614
615 if ((cts->valid
616 & CCB_TRANS_SYNC_RATE_VALID) != 0) {
617
618 sdtr =
619 adw_find_sdtr(adw,
620 cts->sync_period);
621 }
622
623 if ((cts->valid
624 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) {
625 if (cts->sync_offset == 0)
626 sdtr = ADW_MC_SDTR_ASYNC;
627 }
628
629 if (sdtr == ADW_MC_SDTR_ASYNC)
630 sdtrable &= ~target_mask;
631 else
632 sdtrable |= target_mask;
633 if (sdtr != sdtr_orig
634 || sdtrable != sdtrable_orig) {
635 adw_set_chip_sdtr(adw,
636 ccb->ccb_h.target_id,
637 sdtr);
638 sdtrdone &= ~target_mask;
639 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
640 sdtrable);
641 adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
642 sdtrdone);
643
644 }
645 }
646 }
647 splx(s);
648 ccb->ccb_h.status = CAM_REQ_CMP;
649 xpt_done(ccb);
650 break;
651 }
652 case XPT_GET_TRAN_SETTINGS:
653 /* Get default/user set transfer settings for the target */
654 {
655 struct ccb_trans_settings *cts;
656 u_int target_mask;
657
658 cts = &ccb->cts;
659 target_mask = 0x01 << ccb->ccb_h.target_id;
660 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
661 u_int mc_sdtr;
662
663 cts->flags = 0;
664 if ((adw->user_discenb & target_mask) != 0)
665 cts->flags |= CCB_TRANS_DISC_ENB;
666
667 if ((adw->user_tagenb & target_mask) != 0)
668 cts->flags |= CCB_TRANS_TAG_ENB;
669
670 if ((adw->user_wdtr & target_mask) != 0)
671 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
672 else
673 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
674
675 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id);
676 cts->sync_period = adw_find_period(adw, mc_sdtr);
677 if (cts->sync_period != 0)
678 cts->sync_offset = 15; /* XXX ??? */
679 else
680 cts->sync_offset = 0;
681
682 cts->valid = CCB_TRANS_SYNC_RATE_VALID
683 | CCB_TRANS_SYNC_OFFSET_VALID
684 | CCB_TRANS_BUS_WIDTH_VALID
685 | CCB_TRANS_DISC_VALID
686 | CCB_TRANS_TQ_VALID;
687 ccb->ccb_h.status = CAM_REQ_CMP;
688 } else {
689 u_int targ_tinfo;
690
691 cts->flags = 0;
692 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
693 & target_mask) != 0)
694 cts->flags |= CCB_TRANS_DISC_ENB;
695
696 if ((adw->tagenb & target_mask) != 0)
697 cts->flags |= CCB_TRANS_TAG_ENB;
698
699 targ_tinfo =
700 adw_lram_read_16(adw,
701 ADW_MC_DEVICE_HSHK_CFG_TABLE
702 + (2 * ccb->ccb_h.target_id));
703
704 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
705 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
706 else
707 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
708
709 cts->sync_period =
710 adw_hshk_cfg_period_factor(targ_tinfo);
711
712 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
713 if (cts->sync_period == 0)
714 cts->sync_offset = 0;
715
716 if (cts->sync_offset == 0)
717 cts->sync_period = 0;
718 }
719 cts->valid = CCB_TRANS_SYNC_RATE_VALID
720 | CCB_TRANS_SYNC_OFFSET_VALID
721 | CCB_TRANS_BUS_WIDTH_VALID
722 | CCB_TRANS_DISC_VALID
723 | CCB_TRANS_TQ_VALID;
724 ccb->ccb_h.status = CAM_REQ_CMP;
725 xpt_done(ccb);
726 break;
727 }
728 case XPT_CALC_GEOMETRY:
729 {
730 struct ccb_calc_geometry *ccg;
731 u_int32_t size_mb;
732 u_int32_t secs_per_cylinder;
733 int extended;
734
735 /*
736 * XXX Use Adaptec translation until I find out how to
737 * get this information from the card.
738 */
739 ccg = &ccb->ccg;
740 size_mb = ccg->volume_size
741 / ((1024L * 1024L) / ccg->block_size);
742 extended = 1;
743
744 if (size_mb > 1024 && extended) {
745 ccg->heads = 255;
746 ccg->secs_per_track = 63;
747 } else {
748 ccg->heads = 64;
749 ccg->secs_per_track = 32;
750 }
751 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
752 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
753 ccb->ccb_h.status = CAM_REQ_CMP;
754 xpt_done(ccb);
755 break;
756 }
757 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
758 {
759 int failure;
760
761 failure = adw_reset_bus(adw);
762 if (failure != 0) {
763 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
764 } else {
765 if (bootverbose) {
766 xpt_print_path(adw->path);
767 printf("Bus Reset Delivered\n");
768 }
769 ccb->ccb_h.status = CAM_REQ_CMP;
770 }
771 xpt_done(ccb);
772 break;
773 }
774 case XPT_TERM_IO: /* Terminate the I/O process */
775 /* XXX Implement */
776 ccb->ccb_h.status = CAM_REQ_INVALID;
777 xpt_done(ccb);
778 break;
779 case XPT_PATH_INQ: /* Path routing inquiry */
780 {
781 struct ccb_pathinq *cpi = &ccb->cpi;
782
783 cpi->version_num = 1;
784 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
785 cpi->target_sprt = 0;
786 cpi->hba_misc = 0;
787 cpi->hba_eng_cnt = 0;
788 cpi->max_target = ADW_MAX_TID;
789 cpi->max_lun = ADW_MAX_LUN;
790 cpi->initiator_id = adw->initiator_id;
791 cpi->bus_id = cam_sim_bus(sim);
792 cpi->base_transfer_speed = 3300;
793 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
794 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
795 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
796 cpi->unit_number = cam_sim_unit(sim);
797 cpi->ccb_h.status = CAM_REQ_CMP;
798 xpt_done(ccb);
799 break;
800 }
801 default:
802 ccb->ccb_h.status = CAM_REQ_INVALID;
803 xpt_done(ccb);
804 break;
805 }
806 }
807
808 static void
809 adw_poll(struct cam_sim *sim)
810 {
811 adw_intr(cam_sim_softc(sim));
812 }
813
814 static void
815 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
816 {
817 }
818
819 struct adw_softc *
820 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id)
821 {
822 struct adw_softc *adw;
823 int i;
824
825 /*
826 * Allocate a storage area for us
827 */
828 adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT | M_ZERO);
829 if (adw == NULL) {
830 printf("adw%d: cannot malloc!\n", device_get_unit(dev));
831 return NULL;
832 }
833 LIST_INIT(&adw->pending_ccbs);
834 SLIST_INIT(&adw->sg_maps);
835 adw->device = dev;
836 adw->unit = device_get_unit(dev);
837 adw->regs_res_type = regs_type;
838 adw->regs_res_id = regs_id;
839 adw->regs = regs;
840 adw->tag = rman_get_bustag(regs);
841 adw->bsh = rman_get_bushandle(regs);
842 i = adw->unit / 10;
843 adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT);
844 if (adw->name == NULL) {
845 printf("adw%d: cannot malloc name!\n", adw->unit);
846 free(adw, M_DEVBUF);
847 return NULL;
848 }
849 sprintf(adw->name, "adw%d", adw->unit);
850 return(adw);
851 }
852
853 void
854 adw_free(struct adw_softc *adw)
855 {
856 switch (adw->init_level) {
857 case 9:
858 {
859 struct sg_map_node *sg_map;
860
861 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
862 SLIST_REMOVE_HEAD(&adw->sg_maps, links);
863 bus_dmamap_unload(adw->sg_dmat,
864 sg_map->sg_dmamap);
865 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
866 sg_map->sg_dmamap);
867 free(sg_map, M_DEVBUF);
868 }
869 bus_dma_tag_destroy(adw->sg_dmat);
870 }
871 case 8:
872 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
873 case 7:
874 bus_dmamem_free(adw->acb_dmat, adw->acbs,
875 adw->acb_dmamap);
876 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap);
877 case 6:
878 bus_dma_tag_destroy(adw->acb_dmat);
879 case 5:
880 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap);
881 case 4:
882 bus_dmamem_free(adw->carrier_dmat, adw->carriers,
883 adw->carrier_dmamap);
884 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap);
885 case 3:
886 bus_dma_tag_destroy(adw->carrier_dmat);
887 case 2:
888 bus_dma_tag_destroy(adw->buffer_dmat);
889 case 1:
890 bus_dma_tag_destroy(adw->parent_dmat);
891 case 0:
892 break;
893 }
894
895 if (adw->regs != NULL)
896 bus_release_resource(adw->device,
897 adw->regs_res_type,
898 adw->regs_res_id,
899 adw->regs);
900
901 if (adw->irq != NULL)
902 bus_release_resource(adw->device,
903 adw->irq_res_type,
904 0, adw->irq);
905
906 if (adw->sim != NULL) {
907 if (adw->path != NULL) {
908 xpt_async(AC_LOST_DEVICE, adw->path, NULL);
909 xpt_free_path(adw->path);
910 }
911 xpt_bus_deregister(cam_sim_path(adw->sim));
912 cam_sim_free(adw->sim, /*free_devq*/TRUE);
913 }
914 free(adw->name, M_DEVBUF);
915 free(adw, M_DEVBUF);
916 }
917
918 int
919 adw_init(struct adw_softc *adw)
920 {
921 struct adw_eeprom eep_config;
922 u_int tid;
923 u_int i;
924 u_int16_t checksum;
925 u_int16_t scsicfg1;
926
927 checksum = adw_eeprom_read(adw, &eep_config);
928 bcopy(eep_config.serial_number, adw->serial_number,
929 sizeof(adw->serial_number));
930 if (checksum != eep_config.checksum) {
931 u_int16_t serial_number[3];
932
933 adw->flags |= ADW_EEPROM_FAILED;
934 printf("%s: EEPROM checksum failed. Restoring Defaults\n",
935 adw_name(adw));
936
937 /*
938 * Restore the default EEPROM settings.
939 * Assume the 6 byte board serial number that was read
940 * from EEPROM is correct even if the EEPROM checksum
941 * failed.
942 */
943 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config));
944 bcopy(adw->serial_number, eep_config.serial_number,
945 sizeof(serial_number));
946 adw_eeprom_write(adw, &eep_config);
947 }
948
949 /* Pull eeprom information into our softc. */
950 adw->bios_ctrl = eep_config.bios_ctrl;
951 adw->user_wdtr = eep_config.wdtr_able;
952 for (tid = 0; tid < ADW_MAX_TID; tid++) {
953 u_int mc_sdtr;
954 u_int16_t tid_mask;
955
956 tid_mask = 0x1 << tid;
957 if ((adw->features & ADW_ULTRA) != 0) {
958 /*
959 * Ultra chips store sdtr and ultraenb
960 * bits in their seeprom, so we must
961 * construct valid mc_sdtr entries for
962 * indirectly.
963 */
964 if (eep_config.sync1.sync_enable & tid_mask) {
965 if (eep_config.sync2.ultra_enable & tid_mask)
966 mc_sdtr = ADW_MC_SDTR_20;
967 else
968 mc_sdtr = ADW_MC_SDTR_10;
969 } else
970 mc_sdtr = ADW_MC_SDTR_ASYNC;
971 } else {
972 switch (ADW_TARGET_GROUP(tid)) {
973 case 3:
974 mc_sdtr = eep_config.sync4.sdtr4;
975 break;
976 case 2:
977 mc_sdtr = eep_config.sync3.sdtr3;
978 break;
979 case 1:
980 mc_sdtr = eep_config.sync2.sdtr2;
981 break;
982 default: /* Shut up compiler */
983 case 0:
984 mc_sdtr = eep_config.sync1.sdtr1;
985 break;
986 }
987 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid);
988 mc_sdtr &= 0xFF;
989 }
990 adw_set_user_sdtr(adw, tid, mc_sdtr);
991 }
992 adw->user_tagenb = eep_config.tagqng_able;
993 adw->user_discenb = eep_config.disc_enable;
994 adw->max_acbs = eep_config.max_host_qng;
995 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
996
997 /*
998 * Sanity check the number of host openings.
999 */
1000 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
1001 adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
1002 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
1003 /* If the value is zero, assume it is uninitialized. */
1004 if (adw->max_acbs == 0)
1005 adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
1006 else
1007 adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
1008 }
1009
1010 scsicfg1 = 0;
1011 if ((adw->features & ADW_ULTRA2) != 0) {
1012 switch (eep_config.termination_lvd) {
1013 default:
1014 printf("%s: Invalid EEPROM LVD Termination Settings.\n",
1015 adw_name(adw));
1016 printf("%s: Reverting to Automatic LVD Termination\n",
1017 adw_name(adw));
1018 /* FALLTHROUGH */
1019 case ADW_EEPROM_TERM_AUTO:
1020 break;
1021 case ADW_EEPROM_TERM_BOTH_ON:
1022 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO;
1023 /* FALLTHROUGH */
1024 case ADW_EEPROM_TERM_HIGH_ON:
1025 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI;
1026 /* FALLTHROUGH */
1027 case ADW_EEPROM_TERM_OFF:
1028 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV;
1029 break;
1030 }
1031 }
1032
1033 switch (eep_config.termination_se) {
1034 default:
1035 printf("%s: Invalid SE EEPROM Termination Settings.\n",
1036 adw_name(adw));
1037 printf("%s: Reverting to Automatic SE Termination\n",
1038 adw_name(adw));
1039 /* FALLTHROUGH */
1040 case ADW_EEPROM_TERM_AUTO:
1041 break;
1042 case ADW_EEPROM_TERM_BOTH_ON:
1043 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
1044 /* FALLTHROUGH */
1045 case ADW_EEPROM_TERM_HIGH_ON:
1046 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
1047 /* FALLTHROUGH */
1048 case ADW_EEPROM_TERM_OFF:
1049 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
1050 break;
1051 }
1052 printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id);
1053
1054 /* DMA tag for mapping buffers into device visible space. */
1055 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
1056 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
1057 /*highaddr*/BUS_SPACE_MAXADDR,
1058 /*filter*/NULL, /*filterarg*/NULL,
1059 /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE,
1060 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1061 /*flags*/BUS_DMA_ALLOCNOW,
1062 &adw->buffer_dmat) != 0) {
1063 return (ENOMEM);
1064 }
1065
1066 adw->init_level++;
1067
1068 /* DMA tag for our ccb carrier structures */
1069 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10,
1070 /*boundary*/0,
1071 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
1072 /*highaddr*/BUS_SPACE_MAXADDR,
1073 /*filter*/NULL, /*filterarg*/NULL,
1074 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1075 * sizeof(struct adw_carrier),
1076 /*nsegments*/1,
1077 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1078 /*flags*/0, &adw->carrier_dmat) != 0) {
1079 return (ENOMEM);
1080 }
1081
1082 adw->init_level++;
1083
1084 /* Allocation for our ccb carrier structures */
1085 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers,
1086 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) {
1087 return (ENOMEM);
1088 }
1089
1090 adw->init_level++;
1091
1092 /* And permanently map them */
1093 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap,
1094 adw->carriers,
1095 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1096 * sizeof(struct adw_carrier),
1097 adwmapmem, &adw->carrier_busbase, /*flags*/0);
1098
1099 /* Clear them out. */
1100 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1101 * sizeof(struct adw_carrier));
1102
1103 /* Setup our free carrier list */
1104 adw->free_carriers = adw->carriers;
1105 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) {
1106 adw->carriers[i].carr_offset =
1107 carriervtobo(adw, &adw->carriers[i]);
1108 adw->carriers[i].carr_ba =
1109 carriervtob(adw, &adw->carriers[i]);
1110 adw->carriers[i].areq_ba = 0;
1111 adw->carriers[i].next_ba =
1112 carriervtobo(adw, &adw->carriers[i+1]);
1113 }
1114 /* Terminal carrier. Never leaves the freelist */
1115 adw->carriers[i].carr_offset =
1116 carriervtobo(adw, &adw->carriers[i]);
1117 adw->carriers[i].carr_ba =
1118 carriervtob(adw, &adw->carriers[i]);
1119 adw->carriers[i].areq_ba = 0;
1120 adw->carriers[i].next_ba = ~0;
1121
1122 adw->init_level++;
1123
1124 /* DMA tag for our acb structures */
1125 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
1126 /*lowaddr*/BUS_SPACE_MAXADDR,
1127 /*highaddr*/BUS_SPACE_MAXADDR,
1128 /*filter*/NULL, /*filterarg*/NULL,
1129 adw->max_acbs * sizeof(struct acb),
1130 /*nsegments*/1,
1131 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1132 /*flags*/0, &adw->acb_dmat) != 0) {
1133 return (ENOMEM);
1134 }
1135
1136 adw->init_level++;
1137
1138 /* Allocation for our ccbs */
1139 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
1140 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0)
1141 return (ENOMEM);
1142
1143 adw->init_level++;
1144
1145 /* And permanently map them */
1146 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1147 adw->acbs,
1148 adw->max_acbs * sizeof(struct acb),
1149 adwmapmem, &adw->acb_busbase, /*flags*/0);
1150
1151 /* Clear them out. */
1152 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1153
1154 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1155 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
1156 /*lowaddr*/BUS_SPACE_MAXADDR,
1157 /*highaddr*/BUS_SPACE_MAXADDR,
1158 /*filter*/NULL, /*filterarg*/NULL,
1159 PAGE_SIZE, /*nsegments*/1,
1160 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1161 /*flags*/0, &adw->sg_dmat) != 0) {
1162 return (ENOMEM);
1163 }
1164
1165 adw->init_level++;
1166
1167 /* Allocate our first batch of ccbs */
1168 if (adwallocacbs(adw) == 0)
1169 return (ENOMEM);
1170
1171 if (adw_init_chip(adw, scsicfg1) != 0)
1172 return (ENXIO);
1173
1174 printf("Queue Depth %d\n", adw->max_acbs);
1175
1176 return (0);
1177 }
1178
1179 /*
1180 * Attach all the sub-devices we can find
1181 */
1182 int
1183 adw_attach(struct adw_softc *adw)
1184 {
1185 struct ccb_setasync csa;
1186 struct cam_devq *devq;
1187 int s;
1188 int error;
1189
1190 error = 0;
1191 s = splcam();
1192 /* Hook up our interrupt handler */
1193 if ((error = bus_setup_intr(adw->device, adw->irq, INTR_TYPE_CAM,
1194 adw_intr, adw, &adw->ih)) != 0) {
1195 device_printf(adw->device, "bus_setup_intr() failed: %d\n",
1196 error);
1197 goto fail;
1198 }
1199
1200 /* Start the Risc processor now that we are fully configured. */
1201 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1202
1203 /*
1204 * Create the device queue for our SIM.
1205 */
1206 devq = cam_simq_alloc(adw->max_acbs);
1207 if (devq == NULL)
1208 return (ENOMEM);
1209
1210 /*
1211 * Construct our SIM entry.
1212 */
1213 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit,
1214 1, adw->max_acbs, devq);
1215 if (adw->sim == NULL) {
1216 error = ENOMEM;
1217 goto fail;
1218 }
1219
1220 /*
1221 * Register the bus.
1222 */
1223 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) {
1224 cam_sim_free(adw->sim, /*free devq*/TRUE);
1225 error = ENOMEM;
1226 goto fail;
1227 }
1228
1229 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1230 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1231 == CAM_REQ_CMP) {
1232 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1233 csa.ccb_h.func_code = XPT_SASYNC_CB;
1234 csa.event_enable = AC_LOST_DEVICE;
1235 csa.callback = adw_async;
1236 csa.callback_arg = adw;
1237 xpt_action((union ccb *)&csa);
1238 }
1239
1240 fail:
1241 splx(s);
1242 return (error);
1243 }
1244
1245 void
1246 adw_intr(void *arg)
1247 {
1248 struct adw_softc *adw;
1249 u_int int_stat;
1250
1251 adw = (struct adw_softc *)arg;
1252 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1253 return;
1254
1255 /* Reading the register clears the interrupt. */
1256 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1257
1258 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1259 u_int intrb_code;
1260
1261 /* Async Microcode Event */
1262 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE);
1263 switch (intrb_code) {
1264 case ADW_ASYNC_CARRIER_READY_FAILURE:
1265 /*
1266 * The RISC missed our update of
1267 * the commandq.
1268 */
1269 if (LIST_FIRST(&adw->pending_ccbs) != NULL)
1270 adw_tickle_risc(adw, ADW_TICKLE_A);
1271 break;
1272 case ADW_ASYNC_SCSI_BUS_RESET_DET:
1273 /*
1274 * The firmware detected a SCSI Bus reset.
1275 */
1276 printf("Someone Reset the Bus\n");
1277 adw_handle_bus_reset(adw, /*initiated*/FALSE);
1278 break;
1279 case ADW_ASYNC_RDMA_FAILURE:
1280 /*
1281 * Handle RDMA failure by resetting the
1282 * SCSI Bus and chip.
1283 */
1284 #if XXX
1285 AdvResetChipAndSB(adv_dvc_varp);
1286 #endif
1287 break;
1288
1289 case ADW_ASYNC_HOST_SCSI_BUS_RESET:
1290 /*
1291 * Host generated SCSI bus reset occurred.
1292 */
1293 adw_handle_bus_reset(adw, /*initiated*/TRUE);
1294 break;
1295 default:
1296 printf("adw_intr: unknown async code 0x%x\n",
1297 intrb_code);
1298 break;
1299 }
1300 }
1301
1302 /*
1303 * Run down the RequestQ.
1304 */
1305 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) {
1306 struct adw_carrier *free_carrier;
1307 struct acb *acb;
1308 union ccb *ccb;
1309
1310 #if 0
1311 printf("0x%x, 0x%x, 0x%x, 0x%x\n",
1312 adw->responseq->carr_offset,
1313 adw->responseq->carr_ba,
1314 adw->responseq->areq_ba,
1315 adw->responseq->next_ba);
1316 #endif
1317 /*
1318 * The firmware copies the adw_scsi_req_q.acb_baddr
1319 * field into the areq_ba field of the carrier.
1320 */
1321 acb = acbbotov(adw, adw->responseq->areq_ba);
1322
1323 /*
1324 * The least significant four bits of the next_ba
1325 * field are used as flags. Mask them out and then
1326 * advance through the list.
1327 */
1328 free_carrier = adw->responseq;
1329 adw->responseq =
1330 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK);
1331 free_carrier->next_ba = adw->free_carriers->carr_offset;
1332 adw->free_carriers = free_carrier;
1333
1334 /* Process CCB */
1335 ccb = acb->ccb;
1336 untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch);
1337 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1338 bus_dmasync_op_t op;
1339
1340 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1341 op = BUS_DMASYNC_POSTREAD;
1342 else
1343 op = BUS_DMASYNC_POSTWRITE;
1344 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1345 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1346 ccb->csio.resid = acb->queue.data_cnt;
1347 } else
1348 ccb->csio.resid = 0;
1349
1350 /* Common Cases inline... */
1351 if (acb->queue.host_status == QHSTA_NO_ERROR
1352 && (acb->queue.done_status == QD_NO_ERROR
1353 || acb->queue.done_status == QD_WITH_ERROR)) {
1354 ccb->csio.scsi_status = acb->queue.scsi_status;
1355 ccb->ccb_h.status = 0;
1356 switch (ccb->csio.scsi_status) {
1357 case SCSI_STATUS_OK:
1358 ccb->ccb_h.status |= CAM_REQ_CMP;
1359 break;
1360 case SCSI_STATUS_CHECK_COND:
1361 case SCSI_STATUS_CMD_TERMINATED:
1362 bcopy(&acb->sense_data, &ccb->csio.sense_data,
1363 ccb->csio.sense_len);
1364 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1365 ccb->csio.sense_resid = acb->queue.sense_len;
1366 /* FALLTHROUGH */
1367 default:
1368 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1369 | CAM_DEV_QFRZN;
1370 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1371 break;
1372 }
1373 adwfreeacb(adw, acb);
1374 xpt_done(ccb);
1375 } else {
1376 adwprocesserror(adw, acb);
1377 }
1378 }
1379 }
1380
1381 static void
1382 adwprocesserror(struct adw_softc *adw, struct acb *acb)
1383 {
1384 union ccb *ccb;
1385
1386 ccb = acb->ccb;
1387 if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1388 ccb->ccb_h.status = CAM_REQ_ABORTED;
1389 } else {
1390
1391 switch (acb->queue.host_status) {
1392 case QHSTA_M_SEL_TIMEOUT:
1393 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1394 break;
1395 case QHSTA_M_SXFR_OFF_UFLW:
1396 case QHSTA_M_SXFR_OFF_OFLW:
1397 case QHSTA_M_DATA_OVER_RUN:
1398 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1399 break;
1400 case QHSTA_M_SXFR_DESELECTED:
1401 case QHSTA_M_UNEXPECTED_BUS_FREE:
1402 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1403 break;
1404 case QHSTA_M_SCSI_BUS_RESET:
1405 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1406 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1407 break;
1408 case QHSTA_M_BUS_DEVICE_RESET:
1409 ccb->ccb_h.status = CAM_BDR_SENT;
1410 break;
1411 case QHSTA_M_QUEUE_ABORTED:
1412 /* BDR or Bus Reset */
1413 printf("Saw Queue Aborted\n");
1414 ccb->ccb_h.status = adw->last_reset;
1415 break;
1416 case QHSTA_M_SXFR_SDMA_ERR:
1417 case QHSTA_M_SXFR_SXFR_PERR:
1418 case QHSTA_M_RDMA_PERR:
1419 ccb->ccb_h.status = CAM_UNCOR_PARITY;
1420 break;
1421 case QHSTA_M_WTM_TIMEOUT:
1422 case QHSTA_M_SXFR_WD_TMO:
1423 {
1424 /* The SCSI bus hung in a phase */
1425 xpt_print_path(adw->path);
1426 printf("Watch Dog timer expired. Reseting bus\n");
1427 adw_reset_bus(adw);
1428 break;
1429 }
1430 case QHSTA_M_SXFR_XFR_PH_ERR:
1431 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1432 break;
1433 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1434 break;
1435 case QHSTA_M_BAD_CMPL_STATUS_IN:
1436 /* No command complete after a status message */
1437 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1438 break;
1439 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1440 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1441 break;
1442 case QHSTA_M_INVALID_DEVICE:
1443 ccb->ccb_h.status = CAM_PATH_INVALID;
1444 break;
1445 case QHSTA_M_NO_AUTO_REQ_SENSE:
1446 /*
1447 * User didn't request sense, but we got a
1448 * check condition.
1449 */
1450 ccb->csio.scsi_status = acb->queue.scsi_status;
1451 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1452 break;
1453 default:
1454 panic("%s: Unhandled Host status error %x",
1455 adw_name(adw), acb->queue.host_status);
1456 /* NOTREACHED */
1457 }
1458 }
1459 if ((acb->state & ACB_RECOVERY_ACB) != 0) {
1460 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET
1461 || ccb->ccb_h.status == CAM_BDR_SENT)
1462 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1463 }
1464 if (ccb->ccb_h.status != CAM_REQ_CMP) {
1465 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1466 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1467 }
1468 adwfreeacb(adw, acb);
1469 xpt_done(ccb);
1470 }
1471
1472 static void
1473 adwtimeout(void *arg)
1474 {
1475 struct acb *acb;
1476 union ccb *ccb;
1477 struct adw_softc *adw;
1478 adw_idle_cmd_status_t status;
1479 int target_id;
1480 int s;
1481
1482 acb = (struct acb *)arg;
1483 ccb = acb->ccb;
1484 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1485 xpt_print_path(ccb->ccb_h.path);
1486 printf("ACB %p - timed out\n", (void *)acb);
1487
1488 s = splcam();
1489
1490 if ((acb->state & ACB_ACTIVE) == 0) {
1491 xpt_print_path(ccb->ccb_h.path);
1492 printf("ACB %p - timed out CCB already completed\n",
1493 (void *)acb);
1494 splx(s);
1495 return;
1496 }
1497
1498 acb->state |= ACB_RECOVERY_ACB;
1499 target_id = ccb->ccb_h.target_id;
1500
1501 /* Attempt a BDR first */
1502 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1503 ccb->ccb_h.target_id);
1504 splx(s);
1505 if (status == ADW_IDLE_CMD_SUCCESS) {
1506 printf("%s: BDR Delivered. No longer in timeout\n",
1507 adw_name(adw));
1508 adw_handle_device_reset(adw, target_id);
1509 } else {
1510 adw_reset_bus(adw);
1511 xpt_print_path(adw->path);
1512 printf("Bus Reset Delivered. No longer in timeout\n");
1513 }
1514 }
1515
1516 static void
1517 adw_handle_device_reset(struct adw_softc *adw, u_int target)
1518 {
1519 struct cam_path *path;
1520 cam_status error;
1521
1522 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1523 target, CAM_LUN_WILDCARD);
1524
1525 if (error == CAM_REQ_CMP) {
1526 xpt_async(AC_SENT_BDR, path, NULL);
1527 xpt_free_path(path);
1528 }
1529 adw->last_reset = CAM_BDR_SENT;
1530 }
1531
1532 static void
1533 adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1534 {
1535 if (initiated) {
1536 /*
1537 * The microcode currently sets the SCSI Bus Reset signal
1538 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1539 * command above. But the SCSI Bus Reset Hold Time in the
1540 * microcode is not deterministic (it may in fact be for less
1541 * than the SCSI Spec. minimum of 25 us). Therefore on return
1542 * the Adv Library sets the SCSI Bus Reset signal for
1543 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1544 * than 25 us.
1545 */
1546 u_int scsi_ctrl;
1547
1548 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1549 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1550 DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1551 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1552
1553 /*
1554 * We will perform the async notification when the
1555 * SCSI Reset interrupt occurs.
1556 */
1557 } else
1558 xpt_async(AC_BUS_RESET, adw->path, NULL);
1559 adw->last_reset = CAM_SCSI_BUS_RESET;
1560 }
Cache object: 3120c00a48175754f0b3073791a99e0e
|