1 /*-
2 * CAM SCSI interface for the the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
4 *
5 * Product specific probe and attach routines can be found in:
6 *
7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W
8 *
9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33 /*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1998 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD: releng/6.1/sys/dev/advansys/adwcam.c 146734 2005-05-29 04:42:30Z nyan $");
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/bus.h>
56
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59
60 #include <sys/rman.h>
61
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_sim.h>
65 #include <cam/cam_xpt_sim.h>
66 #include <cam/cam_debug.h>
67
68 #include <cam/scsi/scsi_message.h>
69
70 #include <dev/advansys/adwvar.h>
71
72 /* Definitions for our use of the SIM private CCB area */
73 #define ccb_acb_ptr spriv_ptr0
74 #define ccb_adw_ptr spriv_ptr1
75
76 u_long adw_unit;
77
78 static __inline cam_status adwccbstatus(union ccb*);
79 static __inline struct acb* adwgetacb(struct adw_softc *adw);
80 static __inline void adwfreeacb(struct adw_softc *adw,
81 struct acb *acb);
82
83 static void adwmapmem(void *arg, bus_dma_segment_t *segs,
84 int nseg, int error);
85 static struct sg_map_node*
86 adwallocsgmap(struct adw_softc *adw);
87 static int adwallocacbs(struct adw_softc *adw);
88
89 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
90 int nseg, int error);
91 static void adw_action(struct cam_sim *sim, union ccb *ccb);
92 static void adw_poll(struct cam_sim *sim);
93 static void adw_async(void *callback_arg, u_int32_t code,
94 struct cam_path *path, void *arg);
95 static void adwprocesserror(struct adw_softc *adw, struct acb *acb);
96 static void adwtimeout(void *arg);
97 static void adw_handle_device_reset(struct adw_softc *adw,
98 u_int target);
99 static void adw_handle_bus_reset(struct adw_softc *adw,
100 int initiated);
101
102 static __inline cam_status
103 adwccbstatus(union ccb* ccb)
104 {
105 return (ccb->ccb_h.status & CAM_STATUS_MASK);
106 }
107
108 static __inline struct acb*
109 adwgetacb(struct adw_softc *adw)
110 {
111 struct acb* acb;
112 int s;
113
114 s = splcam();
115 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
116 SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
117 } else if (adw->num_acbs < adw->max_acbs) {
118 adwallocacbs(adw);
119 acb = SLIST_FIRST(&adw->free_acb_list);
120 if (acb == NULL)
121 printf("%s: Can't malloc ACB\n", adw_name(adw));
122 else {
123 SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
124 }
125 }
126 splx(s);
127
128 return (acb);
129 }
130
131 static __inline void
132 adwfreeacb(struct adw_softc *adw, struct acb *acb)
133 {
134 int s;
135
136 s = splcam();
137 if ((acb->state & ACB_ACTIVE) != 0)
138 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
139 if ((acb->state & ACB_RELEASE_SIMQ) != 0)
140 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
141 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
142 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
143 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
144 adw->state &= ~ADW_RESOURCE_SHORTAGE;
145 }
146 acb->state = ACB_FREE;
147 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
148 splx(s);
149 }
150
151 static void
152 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
153 {
154 bus_addr_t *busaddrp;
155
156 busaddrp = (bus_addr_t *)arg;
157 *busaddrp = segs->ds_addr;
158 }
159
160 static struct sg_map_node *
161 adwallocsgmap(struct adw_softc *adw)
162 {
163 struct sg_map_node *sg_map;
164
165 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
166
167 if (sg_map == NULL)
168 return (NULL);
169
170 /* Allocate S/G space for the next batch of ACBS */
171 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
172 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
173 free(sg_map, M_DEVBUF);
174 return (NULL);
175 }
176
177 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
178
179 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
180 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
181
182 bzero(sg_map->sg_vaddr, PAGE_SIZE);
183 return (sg_map);
184 }
185
186 /*
187 * Allocate another chunk of CCB's. Return count of entries added.
188 * Assumed to be called at splcam().
189 */
190 static int
191 adwallocacbs(struct adw_softc *adw)
192 {
193 struct acb *next_acb;
194 struct sg_map_node *sg_map;
195 bus_addr_t busaddr;
196 struct adw_sg_block *blocks;
197 int newcount;
198 int i;
199
200 next_acb = &adw->acbs[adw->num_acbs];
201 sg_map = adwallocsgmap(adw);
202
203 if (sg_map == NULL)
204 return (0);
205
206 blocks = sg_map->sg_vaddr;
207 busaddr = sg_map->sg_physaddr;
208
209 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
210 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
211 int error;
212
213 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
214 &next_acb->dmamap);
215 if (error != 0)
216 break;
217 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb);
218 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb);
219 next_acb->queue.sense_baddr =
220 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data);
221 next_acb->sg_blocks = blocks;
222 next_acb->sg_busaddr = busaddr;
223 next_acb->state = ACB_FREE;
224 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
225 blocks += ADW_SG_BLOCKCNT;
226 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
227 next_acb++;
228 adw->num_acbs++;
229 }
230 return (i);
231 }
232
233 static void
234 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
235 {
236 struct acb *acb;
237 union ccb *ccb;
238 struct adw_softc *adw;
239 int s;
240
241 acb = (struct acb *)arg;
242 ccb = acb->ccb;
243 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
244
245 if (error != 0) {
246 if (error != EFBIG)
247 printf("%s: Unexepected error 0x%x returned from "
248 "bus_dmamap_load\n", adw_name(adw), error);
249 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
250 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
251 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
252 }
253 adwfreeacb(adw, acb);
254 xpt_done(ccb);
255 return;
256 }
257
258 if (nseg != 0) {
259 bus_dmasync_op_t op;
260
261 acb->queue.data_addr = dm_segs[0].ds_addr;
262 acb->queue.data_cnt = ccb->csio.dxfer_len;
263 if (nseg > 1) {
264 struct adw_sg_block *sg_block;
265 struct adw_sg_elm *sg;
266 bus_addr_t sg_busaddr;
267 u_int sg_index;
268 bus_dma_segment_t *end_seg;
269
270 end_seg = dm_segs + nseg;
271
272 sg_busaddr = acb->sg_busaddr;
273 sg_index = 0;
274 /* Copy the segments into our SG list */
275 for (sg_block = acb->sg_blocks;; sg_block++) {
276 u_int i;
277
278 sg = sg_block->sg_list;
279 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) {
280 if (dm_segs >= end_seg)
281 break;
282
283 sg->sg_addr = dm_segs->ds_addr;
284 sg->sg_count = dm_segs->ds_len;
285 sg++;
286 dm_segs++;
287 }
288 sg_block->sg_cnt = i;
289 sg_index += i;
290 if (dm_segs == end_seg) {
291 sg_block->sg_busaddr_next = 0;
292 break;
293 } else {
294 sg_busaddr +=
295 sizeof(struct adw_sg_block);
296 sg_block->sg_busaddr_next = sg_busaddr;
297 }
298 }
299 acb->queue.sg_real_addr = acb->sg_busaddr;
300 } else {
301 acb->queue.sg_real_addr = 0;
302 }
303
304 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
305 op = BUS_DMASYNC_PREREAD;
306 else
307 op = BUS_DMASYNC_PREWRITE;
308
309 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
310
311 } else {
312 acb->queue.data_addr = 0;
313 acb->queue.data_cnt = 0;
314 acb->queue.sg_real_addr = 0;
315 }
316
317 s = splcam();
318
319 /*
320 * Last time we need to check if this CCB needs to
321 * be aborted.
322 */
323 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
324 if (nseg != 0)
325 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
326 adwfreeacb(adw, acb);
327 xpt_done(ccb);
328 splx(s);
329 return;
330 }
331
332 acb->state |= ACB_ACTIVE;
333 ccb->ccb_h.status |= CAM_SIM_QUEUED;
334 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
335 ccb->ccb_h.timeout_ch =
336 timeout(adwtimeout, (caddr_t)acb,
337 (ccb->ccb_h.timeout * hz) / 1000);
338
339 adw_send_acb(adw, acb, acbvtob(adw, acb));
340
341 splx(s);
342 }
343
344 static void
345 adw_action(struct cam_sim *sim, union ccb *ccb)
346 {
347 struct adw_softc *adw;
348
349 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
350
351 adw = (struct adw_softc *)cam_sim_softc(sim);
352
353 switch (ccb->ccb_h.func_code) {
354 /* Common cases first */
355 case XPT_SCSI_IO: /* Execute the requested I/O operation */
356 {
357 struct ccb_scsiio *csio;
358 struct ccb_hdr *ccbh;
359 struct acb *acb;
360
361 csio = &ccb->csio;
362 ccbh = &ccb->ccb_h;
363
364 /* Max supported CDB length is 12 bytes */
365 if (csio->cdb_len > 12) {
366 ccb->ccb_h.status = CAM_REQ_INVALID;
367 xpt_done(ccb);
368 return;
369 }
370
371 if ((acb = adwgetacb(adw)) == NULL) {
372 int s;
373
374 s = splcam();
375 adw->state |= ADW_RESOURCE_SHORTAGE;
376 splx(s);
377 xpt_freeze_simq(sim, /*count*/1);
378 ccb->ccb_h.status = CAM_REQUEUE_REQ;
379 xpt_done(ccb);
380 return;
381 }
382
383 /* Link acb and ccb so we can find one from the other */
384 acb->ccb = ccb;
385 ccb->ccb_h.ccb_acb_ptr = acb;
386 ccb->ccb_h.ccb_adw_ptr = adw;
387
388 acb->queue.cntl = 0;
389 acb->queue.target_cmd = 0;
390 acb->queue.target_id = ccb->ccb_h.target_id;
391 acb->queue.target_lun = ccb->ccb_h.target_lun;
392
393 acb->queue.mflag = 0;
394 acb->queue.sense_len =
395 MIN(csio->sense_len, sizeof(acb->sense_data));
396 acb->queue.cdb_len = csio->cdb_len;
397 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
398 switch (csio->tag_action) {
399 case MSG_SIMPLE_Q_TAG:
400 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG;
401 break;
402 case MSG_HEAD_OF_Q_TAG:
403 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG;
404 break;
405 case MSG_ORDERED_Q_TAG:
406 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG;
407 break;
408 default:
409 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
410 break;
411 }
412 } else
413 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
414
415 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
416 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC;
417
418 acb->queue.done_status = 0;
419 acb->queue.scsi_status = 0;
420 acb->queue.host_status = 0;
421 acb->queue.sg_wk_ix = 0;
422 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
423 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
424 bcopy(csio->cdb_io.cdb_ptr,
425 acb->queue.cdb, csio->cdb_len);
426 } else {
427 /* I guess I could map it in... */
428 ccb->ccb_h.status = CAM_REQ_INVALID;
429 adwfreeacb(adw, acb);
430 xpt_done(ccb);
431 return;
432 }
433 } else {
434 bcopy(csio->cdb_io.cdb_bytes,
435 acb->queue.cdb, csio->cdb_len);
436 }
437
438 /*
439 * If we have any data to send with this command,
440 * map it into bus space.
441 */
442 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
443 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
444 /*
445 * We've been given a pointer
446 * to a single buffer.
447 */
448 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
449 int s;
450 int error;
451
452 s = splsoftvm();
453 error =
454 bus_dmamap_load(adw->buffer_dmat,
455 acb->dmamap,
456 csio->data_ptr,
457 csio->dxfer_len,
458 adwexecuteacb,
459 acb, /*flags*/0);
460 if (error == EINPROGRESS) {
461 /*
462 * So as to maintain ordering,
463 * freeze the controller queue
464 * until our mapping is
465 * returned.
466 */
467 xpt_freeze_simq(sim, 1);
468 acb->state |= CAM_RELEASE_SIMQ;
469 }
470 splx(s);
471 } else {
472 struct bus_dma_segment seg;
473
474 /* Pointer to physical buffer */
475 seg.ds_addr =
476 (bus_addr_t)csio->data_ptr;
477 seg.ds_len = csio->dxfer_len;
478 adwexecuteacb(acb, &seg, 1, 0);
479 }
480 } else {
481 struct bus_dma_segment *segs;
482
483 if ((ccbh->flags & CAM_DATA_PHYS) != 0)
484 panic("adw_action - Physical "
485 "segment pointers "
486 "unsupported");
487
488 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
489 panic("adw_action - Virtual "
490 "segment addresses "
491 "unsupported");
492
493 /* Just use the segments provided */
494 segs = (struct bus_dma_segment *)csio->data_ptr;
495 adwexecuteacb(acb, segs, csio->sglist_cnt,
496 (csio->sglist_cnt < ADW_SGSIZE)
497 ? 0 : EFBIG);
498 }
499 } else {
500 adwexecuteacb(acb, NULL, 0, 0);
501 }
502 break;
503 }
504 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
505 {
506 adw_idle_cmd_status_t status;
507
508 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
509 ccb->ccb_h.target_id);
510 if (status == ADW_IDLE_CMD_SUCCESS) {
511 ccb->ccb_h.status = CAM_REQ_CMP;
512 if (bootverbose) {
513 xpt_print_path(ccb->ccb_h.path);
514 printf("BDR Delivered\n");
515 }
516 } else
517 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
518 xpt_done(ccb);
519 break;
520 }
521 case XPT_ABORT: /* Abort the specified CCB */
522 /* XXX Implement */
523 ccb->ccb_h.status = CAM_REQ_INVALID;
524 xpt_done(ccb);
525 break;
526 case XPT_SET_TRAN_SETTINGS:
527 {
528 struct ccb_trans_settings *cts;
529 u_int target_mask;
530 int s;
531
532 cts = &ccb->cts;
533 target_mask = 0x01 << ccb->ccb_h.target_id;
534
535 s = splcam();
536 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
537 u_int sdtrdone;
538
539 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE);
540 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
541 u_int discenb;
542
543 discenb =
544 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
545
546 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
547 discenb |= target_mask;
548 else
549 discenb &= ~target_mask;
550
551 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
552 discenb);
553 }
554
555 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
556
557 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
558 adw->tagenb |= target_mask;
559 else
560 adw->tagenb &= ~target_mask;
561 }
562
563 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
564 u_int wdtrenb_orig;
565 u_int wdtrenb;
566 u_int wdtrdone;
567
568 wdtrenb_orig =
569 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
570 wdtrenb = wdtrenb_orig;
571 wdtrdone = adw_lram_read_16(adw,
572 ADW_MC_WDTR_DONE);
573 switch (cts->bus_width) {
574 case MSG_EXT_WDTR_BUS_32_BIT:
575 case MSG_EXT_WDTR_BUS_16_BIT:
576 wdtrenb |= target_mask;
577 break;
578 case MSG_EXT_WDTR_BUS_8_BIT:
579 default:
580 wdtrenb &= ~target_mask;
581 break;
582 }
583 if (wdtrenb != wdtrenb_orig) {
584 adw_lram_write_16(adw,
585 ADW_MC_WDTR_ABLE,
586 wdtrenb);
587 wdtrdone &= ~target_mask;
588 adw_lram_write_16(adw,
589 ADW_MC_WDTR_DONE,
590 wdtrdone);
591 /* Wide negotiation forces async */
592 sdtrdone &= ~target_mask;
593 adw_lram_write_16(adw,
594 ADW_MC_SDTR_DONE,
595 sdtrdone);
596 }
597 }
598
599 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
600 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
601 u_int sdtr_orig;
602 u_int sdtr;
603 u_int sdtrable_orig;
604 u_int sdtrable;
605
606 sdtr = adw_get_chip_sdtr(adw,
607 ccb->ccb_h.target_id);
608 sdtr_orig = sdtr;
609 sdtrable = adw_lram_read_16(adw,
610 ADW_MC_SDTR_ABLE);
611 sdtrable_orig = sdtrable;
612
613 if ((cts->valid
614 & CCB_TRANS_SYNC_RATE_VALID) != 0) {
615
616 sdtr =
617 adw_find_sdtr(adw,
618 cts->sync_period);
619 }
620
621 if ((cts->valid
622 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) {
623 if (cts->sync_offset == 0)
624 sdtr = ADW_MC_SDTR_ASYNC;
625 }
626
627 if (sdtr == ADW_MC_SDTR_ASYNC)
628 sdtrable &= ~target_mask;
629 else
630 sdtrable |= target_mask;
631 if (sdtr != sdtr_orig
632 || sdtrable != sdtrable_orig) {
633 adw_set_chip_sdtr(adw,
634 ccb->ccb_h.target_id,
635 sdtr);
636 sdtrdone &= ~target_mask;
637 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
638 sdtrable);
639 adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
640 sdtrdone);
641
642 }
643 }
644 }
645 splx(s);
646 ccb->ccb_h.status = CAM_REQ_CMP;
647 xpt_done(ccb);
648 break;
649 }
650 case XPT_GET_TRAN_SETTINGS:
651 /* Get default/user set transfer settings for the target */
652 {
653 struct ccb_trans_settings *cts;
654 u_int target_mask;
655
656 cts = &ccb->cts;
657 target_mask = 0x01 << ccb->ccb_h.target_id;
658 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
659 u_int mc_sdtr;
660
661 cts->flags = 0;
662 if ((adw->user_discenb & target_mask) != 0)
663 cts->flags |= CCB_TRANS_DISC_ENB;
664
665 if ((adw->user_tagenb & target_mask) != 0)
666 cts->flags |= CCB_TRANS_TAG_ENB;
667
668 if ((adw->user_wdtr & target_mask) != 0)
669 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
670 else
671 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
672
673 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id);
674 cts->sync_period = adw_find_period(adw, mc_sdtr);
675 if (cts->sync_period != 0)
676 cts->sync_offset = 15; /* XXX ??? */
677 else
678 cts->sync_offset = 0;
679
680 cts->valid = CCB_TRANS_SYNC_RATE_VALID
681 | CCB_TRANS_SYNC_OFFSET_VALID
682 | CCB_TRANS_BUS_WIDTH_VALID
683 | CCB_TRANS_DISC_VALID
684 | CCB_TRANS_TQ_VALID;
685 ccb->ccb_h.status = CAM_REQ_CMP;
686 } else {
687 u_int targ_tinfo;
688
689 cts->flags = 0;
690 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
691 & target_mask) != 0)
692 cts->flags |= CCB_TRANS_DISC_ENB;
693
694 if ((adw->tagenb & target_mask) != 0)
695 cts->flags |= CCB_TRANS_TAG_ENB;
696
697 targ_tinfo =
698 adw_lram_read_16(adw,
699 ADW_MC_DEVICE_HSHK_CFG_TABLE
700 + (2 * ccb->ccb_h.target_id));
701
702 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
703 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
704 else
705 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
706
707 cts->sync_period =
708 adw_hshk_cfg_period_factor(targ_tinfo);
709
710 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
711 if (cts->sync_period == 0)
712 cts->sync_offset = 0;
713
714 if (cts->sync_offset == 0)
715 cts->sync_period = 0;
716 }
717 cts->valid = CCB_TRANS_SYNC_RATE_VALID
718 | CCB_TRANS_SYNC_OFFSET_VALID
719 | CCB_TRANS_BUS_WIDTH_VALID
720 | CCB_TRANS_DISC_VALID
721 | CCB_TRANS_TQ_VALID;
722 ccb->ccb_h.status = CAM_REQ_CMP;
723 xpt_done(ccb);
724 break;
725 }
726 case XPT_CALC_GEOMETRY:
727 {
728 /*
729 * XXX Use Adaptec translation until I find out how to
730 * get this information from the card.
731 */
732 cam_calc_geometry(&ccb->ccg, /*extended*/1);
733 xpt_done(ccb);
734 break;
735 }
736 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
737 {
738 int failure;
739
740 failure = adw_reset_bus(adw);
741 if (failure != 0) {
742 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
743 } else {
744 if (bootverbose) {
745 xpt_print_path(adw->path);
746 printf("Bus Reset Delivered\n");
747 }
748 ccb->ccb_h.status = CAM_REQ_CMP;
749 }
750 xpt_done(ccb);
751 break;
752 }
753 case XPT_TERM_IO: /* Terminate the I/O process */
754 /* XXX Implement */
755 ccb->ccb_h.status = CAM_REQ_INVALID;
756 xpt_done(ccb);
757 break;
758 case XPT_PATH_INQ: /* Path routing inquiry */
759 {
760 struct ccb_pathinq *cpi = &ccb->cpi;
761
762 cpi->version_num = 1;
763 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
764 cpi->target_sprt = 0;
765 cpi->hba_misc = 0;
766 cpi->hba_eng_cnt = 0;
767 cpi->max_target = ADW_MAX_TID;
768 cpi->max_lun = ADW_MAX_LUN;
769 cpi->initiator_id = adw->initiator_id;
770 cpi->bus_id = cam_sim_bus(sim);
771 cpi->base_transfer_speed = 3300;
772 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
773 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
774 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
775 cpi->unit_number = cam_sim_unit(sim);
776 cpi->ccb_h.status = CAM_REQ_CMP;
777 xpt_done(ccb);
778 break;
779 }
780 default:
781 ccb->ccb_h.status = CAM_REQ_INVALID;
782 xpt_done(ccb);
783 break;
784 }
785 }
786
787 static void
788 adw_poll(struct cam_sim *sim)
789 {
790 adw_intr(cam_sim_softc(sim));
791 }
792
793 static void
794 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
795 {
796 }
797
798 struct adw_softc *
799 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id)
800 {
801 struct adw_softc *adw;
802 int i;
803
804 /*
805 * Allocate a storage area for us
806 */
807 adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT | M_ZERO);
808 if (adw == NULL) {
809 printf("adw%d: cannot malloc!\n", device_get_unit(dev));
810 return NULL;
811 }
812 LIST_INIT(&adw->pending_ccbs);
813 SLIST_INIT(&adw->sg_maps);
814 adw->device = dev;
815 adw->unit = device_get_unit(dev);
816 adw->regs_res_type = regs_type;
817 adw->regs_res_id = regs_id;
818 adw->regs = regs;
819 adw->tag = rman_get_bustag(regs);
820 adw->bsh = rman_get_bushandle(regs);
821 i = adw->unit / 10;
822 adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT);
823 if (adw->name == NULL) {
824 printf("adw%d: cannot malloc name!\n", adw->unit);
825 free(adw, M_DEVBUF);
826 return NULL;
827 }
828 sprintf(adw->name, "adw%d", adw->unit);
829 return(adw);
830 }
831
832 void
833 adw_free(struct adw_softc *adw)
834 {
835 switch (adw->init_level) {
836 case 9:
837 {
838 struct sg_map_node *sg_map;
839
840 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
841 SLIST_REMOVE_HEAD(&adw->sg_maps, links);
842 bus_dmamap_unload(adw->sg_dmat,
843 sg_map->sg_dmamap);
844 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
845 sg_map->sg_dmamap);
846 free(sg_map, M_DEVBUF);
847 }
848 bus_dma_tag_destroy(adw->sg_dmat);
849 }
850 case 8:
851 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
852 case 7:
853 bus_dmamem_free(adw->acb_dmat, adw->acbs,
854 adw->acb_dmamap);
855 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap);
856 case 6:
857 bus_dma_tag_destroy(adw->acb_dmat);
858 case 5:
859 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap);
860 case 4:
861 bus_dmamem_free(adw->carrier_dmat, adw->carriers,
862 adw->carrier_dmamap);
863 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap);
864 case 3:
865 bus_dma_tag_destroy(adw->carrier_dmat);
866 case 2:
867 bus_dma_tag_destroy(adw->buffer_dmat);
868 case 1:
869 bus_dma_tag_destroy(adw->parent_dmat);
870 case 0:
871 break;
872 }
873
874 if (adw->regs != NULL)
875 bus_release_resource(adw->device,
876 adw->regs_res_type,
877 adw->regs_res_id,
878 adw->regs);
879
880 if (adw->irq != NULL)
881 bus_release_resource(adw->device,
882 adw->irq_res_type,
883 0, adw->irq);
884
885 if (adw->sim != NULL) {
886 if (adw->path != NULL) {
887 xpt_async(AC_LOST_DEVICE, adw->path, NULL);
888 xpt_free_path(adw->path);
889 }
890 xpt_bus_deregister(cam_sim_path(adw->sim));
891 cam_sim_free(adw->sim, /*free_devq*/TRUE);
892 }
893 free(adw->name, M_DEVBUF);
894 free(adw, M_DEVBUF);
895 }
896
897 int
898 adw_init(struct adw_softc *adw)
899 {
900 struct adw_eeprom eep_config;
901 u_int tid;
902 u_int i;
903 u_int16_t checksum;
904 u_int16_t scsicfg1;
905
906 checksum = adw_eeprom_read(adw, &eep_config);
907 bcopy(eep_config.serial_number, adw->serial_number,
908 sizeof(adw->serial_number));
909 if (checksum != eep_config.checksum) {
910 u_int16_t serial_number[3];
911
912 adw->flags |= ADW_EEPROM_FAILED;
913 printf("%s: EEPROM checksum failed. Restoring Defaults\n",
914 adw_name(adw));
915
916 /*
917 * Restore the default EEPROM settings.
918 * Assume the 6 byte board serial number that was read
919 * from EEPROM is correct even if the EEPROM checksum
920 * failed.
921 */
922 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config));
923 bcopy(adw->serial_number, eep_config.serial_number,
924 sizeof(serial_number));
925 adw_eeprom_write(adw, &eep_config);
926 }
927
928 /* Pull eeprom information into our softc. */
929 adw->bios_ctrl = eep_config.bios_ctrl;
930 adw->user_wdtr = eep_config.wdtr_able;
931 for (tid = 0; tid < ADW_MAX_TID; tid++) {
932 u_int mc_sdtr;
933 u_int16_t tid_mask;
934
935 tid_mask = 0x1 << tid;
936 if ((adw->features & ADW_ULTRA) != 0) {
937 /*
938 * Ultra chips store sdtr and ultraenb
939 * bits in their seeprom, so we must
940 * construct valid mc_sdtr entries for
941 * indirectly.
942 */
943 if (eep_config.sync1.sync_enable & tid_mask) {
944 if (eep_config.sync2.ultra_enable & tid_mask)
945 mc_sdtr = ADW_MC_SDTR_20;
946 else
947 mc_sdtr = ADW_MC_SDTR_10;
948 } else
949 mc_sdtr = ADW_MC_SDTR_ASYNC;
950 } else {
951 switch (ADW_TARGET_GROUP(tid)) {
952 case 3:
953 mc_sdtr = eep_config.sync4.sdtr4;
954 break;
955 case 2:
956 mc_sdtr = eep_config.sync3.sdtr3;
957 break;
958 case 1:
959 mc_sdtr = eep_config.sync2.sdtr2;
960 break;
961 default: /* Shut up compiler */
962 case 0:
963 mc_sdtr = eep_config.sync1.sdtr1;
964 break;
965 }
966 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid);
967 mc_sdtr &= 0xFF;
968 }
969 adw_set_user_sdtr(adw, tid, mc_sdtr);
970 }
971 adw->user_tagenb = eep_config.tagqng_able;
972 adw->user_discenb = eep_config.disc_enable;
973 adw->max_acbs = eep_config.max_host_qng;
974 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
975
976 /*
977 * Sanity check the number of host openings.
978 */
979 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
980 adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
981 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
982 /* If the value is zero, assume it is uninitialized. */
983 if (adw->max_acbs == 0)
984 adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
985 else
986 adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
987 }
988
989 scsicfg1 = 0;
990 if ((adw->features & ADW_ULTRA2) != 0) {
991 switch (eep_config.termination_lvd) {
992 default:
993 printf("%s: Invalid EEPROM LVD Termination Settings.\n",
994 adw_name(adw));
995 printf("%s: Reverting to Automatic LVD Termination\n",
996 adw_name(adw));
997 /* FALLTHROUGH */
998 case ADW_EEPROM_TERM_AUTO:
999 break;
1000 case ADW_EEPROM_TERM_BOTH_ON:
1001 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO;
1002 /* FALLTHROUGH */
1003 case ADW_EEPROM_TERM_HIGH_ON:
1004 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI;
1005 /* FALLTHROUGH */
1006 case ADW_EEPROM_TERM_OFF:
1007 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV;
1008 break;
1009 }
1010 }
1011
1012 switch (eep_config.termination_se) {
1013 default:
1014 printf("%s: Invalid SE EEPROM Termination Settings.\n",
1015 adw_name(adw));
1016 printf("%s: Reverting to Automatic SE Termination\n",
1017 adw_name(adw));
1018 /* FALLTHROUGH */
1019 case ADW_EEPROM_TERM_AUTO:
1020 break;
1021 case ADW_EEPROM_TERM_BOTH_ON:
1022 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
1023 /* FALLTHROUGH */
1024 case ADW_EEPROM_TERM_HIGH_ON:
1025 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
1026 /* FALLTHROUGH */
1027 case ADW_EEPROM_TERM_OFF:
1028 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
1029 break;
1030 }
1031 printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id);
1032
1033 /* DMA tag for mapping buffers into device visible space. */
1034 if (bus_dma_tag_create(
1035 /* parent */ adw->parent_dmat,
1036 /* alignment */ 1,
1037 /* boundary */ 0,
1038 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,
1039 /* highaddr */ BUS_SPACE_MAXADDR,
1040 /* filter */ NULL,
1041 /* filterarg */ NULL,
1042 /* maxsize */ MAXBSIZE,
1043 /* nsegments */ ADW_SGSIZE,
1044 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1045 /* flags */ BUS_DMA_ALLOCNOW,
1046 /* lockfunc */ busdma_lock_mutex,
1047 /* lockarg */ &Giant,
1048 &adw->buffer_dmat) != 0) {
1049 return (ENOMEM);
1050 }
1051
1052 adw->init_level++;
1053
1054 /* DMA tag for our ccb carrier structures */
1055 if (bus_dma_tag_create(
1056 /* parent */ adw->parent_dmat,
1057 /* alignment */ 0x10,
1058 /* boundary */ 0,
1059 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,
1060 /* highaddr */ BUS_SPACE_MAXADDR,
1061 /* filter */ NULL,
1062 /* filterarg */ NULL,
1063 /* maxsize */ (adw->max_acbs +
1064 ADW_NUM_CARRIER_QUEUES + 1) *
1065 sizeof(struct adw_carrier),
1066 /* nsegments */ 1,
1067 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1068 /* flags */ 0,
1069 /* lockfunc */ busdma_lock_mutex,
1070 /* lockarg */ &Giant,
1071 &adw->carrier_dmat) != 0) {
1072 return (ENOMEM);
1073 }
1074
1075 adw->init_level++;
1076
1077 /* Allocation for our ccb carrier structures */
1078 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers,
1079 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) {
1080 return (ENOMEM);
1081 }
1082
1083 adw->init_level++;
1084
1085 /* And permanently map them */
1086 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap,
1087 adw->carriers,
1088 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1089 * sizeof(struct adw_carrier),
1090 adwmapmem, &adw->carrier_busbase, /*flags*/0);
1091
1092 /* Clear them out. */
1093 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1094 * sizeof(struct adw_carrier));
1095
1096 /* Setup our free carrier list */
1097 adw->free_carriers = adw->carriers;
1098 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) {
1099 adw->carriers[i].carr_offset =
1100 carriervtobo(adw, &adw->carriers[i]);
1101 adw->carriers[i].carr_ba =
1102 carriervtob(adw, &adw->carriers[i]);
1103 adw->carriers[i].areq_ba = 0;
1104 adw->carriers[i].next_ba =
1105 carriervtobo(adw, &adw->carriers[i+1]);
1106 }
1107 /* Terminal carrier. Never leaves the freelist */
1108 adw->carriers[i].carr_offset =
1109 carriervtobo(adw, &adw->carriers[i]);
1110 adw->carriers[i].carr_ba =
1111 carriervtob(adw, &adw->carriers[i]);
1112 adw->carriers[i].areq_ba = 0;
1113 adw->carriers[i].next_ba = ~0;
1114
1115 adw->init_level++;
1116
1117 /* DMA tag for our acb structures */
1118 if (bus_dma_tag_create(
1119 /* parent */ adw->parent_dmat,
1120 /* alignment */ 1,
1121 /* boundary */ 0,
1122 /* lowaddr */ BUS_SPACE_MAXADDR,
1123 /* highaddr */ BUS_SPACE_MAXADDR,
1124 /* filter */ NULL,
1125 /* filterarg */ NULL,
1126 /* maxsize */ adw->max_acbs * sizeof(struct acb),
1127 /* nsegments */ 1,
1128 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1129 /* flags */ 0,
1130 /* lockfunc */ busdma_lock_mutex,
1131 /* lockarg */ &Giant,
1132 &adw->acb_dmat) != 0) {
1133 return (ENOMEM);
1134 }
1135
1136 adw->init_level++;
1137
1138 /* Allocation for our ccbs */
1139 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
1140 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0)
1141 return (ENOMEM);
1142
1143 adw->init_level++;
1144
1145 /* And permanently map them */
1146 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1147 adw->acbs,
1148 adw->max_acbs * sizeof(struct acb),
1149 adwmapmem, &adw->acb_busbase, /*flags*/0);
1150
1151 /* Clear them out. */
1152 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1153
1154 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1155 if (bus_dma_tag_create(
1156 /* parent */ adw->parent_dmat,
1157 /* alignment */ 1,
1158 /* boundary */ 0,
1159 /* lowaddr */ BUS_SPACE_MAXADDR,
1160 /* highaddr */ BUS_SPACE_MAXADDR,
1161 /* filter */ NULL,
1162 /* filterarg */ NULL,
1163 /* maxsize */ PAGE_SIZE,
1164 /* nsegments */ 1,
1165 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1166 /* flags */ 0,
1167 /* lockfunc */ busdma_lock_mutex,
1168 /* lockarg */ &Giant,
1169 &adw->sg_dmat) != 0) {
1170 return (ENOMEM);
1171 }
1172
1173 adw->init_level++;
1174
1175 /* Allocate our first batch of ccbs */
1176 if (adwallocacbs(adw) == 0)
1177 return (ENOMEM);
1178
1179 if (adw_init_chip(adw, scsicfg1) != 0)
1180 return (ENXIO);
1181
1182 printf("Queue Depth %d\n", adw->max_acbs);
1183
1184 return (0);
1185 }
1186
1187 /*
1188 * Attach all the sub-devices we can find
1189 */
1190 int
1191 adw_attach(struct adw_softc *adw)
1192 {
1193 struct ccb_setasync csa;
1194 struct cam_devq *devq;
1195 int s;
1196 int error;
1197
1198 error = 0;
1199 s = splcam();
1200 /* Hook up our interrupt handler */
1201 if ((error = bus_setup_intr(adw->device, adw->irq,
1202 INTR_TYPE_CAM | INTR_ENTROPY, adw_intr,
1203 adw, &adw->ih)) != 0) {
1204 device_printf(adw->device, "bus_setup_intr() failed: %d\n",
1205 error);
1206 goto fail;
1207 }
1208
1209 /* Start the Risc processor now that we are fully configured. */
1210 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1211
1212 /*
1213 * Create the device queue for our SIM.
1214 */
1215 devq = cam_simq_alloc(adw->max_acbs);
1216 if (devq == NULL)
1217 return (ENOMEM);
1218
1219 /*
1220 * Construct our SIM entry.
1221 */
1222 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit,
1223 1, adw->max_acbs, devq);
1224 if (adw->sim == NULL) {
1225 error = ENOMEM;
1226 goto fail;
1227 }
1228
1229 /*
1230 * Register the bus.
1231 */
1232 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) {
1233 cam_sim_free(adw->sim, /*free devq*/TRUE);
1234 error = ENOMEM;
1235 goto fail;
1236 }
1237
1238 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1239 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1240 == CAM_REQ_CMP) {
1241 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1242 csa.ccb_h.func_code = XPT_SASYNC_CB;
1243 csa.event_enable = AC_LOST_DEVICE;
1244 csa.callback = adw_async;
1245 csa.callback_arg = adw;
1246 xpt_action((union ccb *)&csa);
1247 }
1248
1249 fail:
1250 splx(s);
1251 return (error);
1252 }
1253
1254 void
1255 adw_intr(void *arg)
1256 {
1257 struct adw_softc *adw;
1258 u_int int_stat;
1259
1260 adw = (struct adw_softc *)arg;
1261 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1262 return;
1263
1264 /* Reading the register clears the interrupt. */
1265 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1266
1267 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1268 u_int intrb_code;
1269
1270 /* Async Microcode Event */
1271 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE);
1272 switch (intrb_code) {
1273 case ADW_ASYNC_CARRIER_READY_FAILURE:
1274 /*
1275 * The RISC missed our update of
1276 * the commandq.
1277 */
1278 if (LIST_FIRST(&adw->pending_ccbs) != NULL)
1279 adw_tickle_risc(adw, ADW_TICKLE_A);
1280 break;
1281 case ADW_ASYNC_SCSI_BUS_RESET_DET:
1282 /*
1283 * The firmware detected a SCSI Bus reset.
1284 */
1285 printf("Someone Reset the Bus\n");
1286 adw_handle_bus_reset(adw, /*initiated*/FALSE);
1287 break;
1288 case ADW_ASYNC_RDMA_FAILURE:
1289 /*
1290 * Handle RDMA failure by resetting the
1291 * SCSI Bus and chip.
1292 */
1293 #if XXX
1294 AdvResetChipAndSB(adv_dvc_varp);
1295 #endif
1296 break;
1297
1298 case ADW_ASYNC_HOST_SCSI_BUS_RESET:
1299 /*
1300 * Host generated SCSI bus reset occurred.
1301 */
1302 adw_handle_bus_reset(adw, /*initiated*/TRUE);
1303 break;
1304 default:
1305 printf("adw_intr: unknown async code 0x%x\n",
1306 intrb_code);
1307 break;
1308 }
1309 }
1310
1311 /*
1312 * Run down the RequestQ.
1313 */
1314 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) {
1315 struct adw_carrier *free_carrier;
1316 struct acb *acb;
1317 union ccb *ccb;
1318
1319 #if 0
1320 printf("0x%x, 0x%x, 0x%x, 0x%x\n",
1321 adw->responseq->carr_offset,
1322 adw->responseq->carr_ba,
1323 adw->responseq->areq_ba,
1324 adw->responseq->next_ba);
1325 #endif
1326 /*
1327 * The firmware copies the adw_scsi_req_q.acb_baddr
1328 * field into the areq_ba field of the carrier.
1329 */
1330 acb = acbbotov(adw, adw->responseq->areq_ba);
1331
1332 /*
1333 * The least significant four bits of the next_ba
1334 * field are used as flags. Mask them out and then
1335 * advance through the list.
1336 */
1337 free_carrier = adw->responseq;
1338 adw->responseq =
1339 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK);
1340 free_carrier->next_ba = adw->free_carriers->carr_offset;
1341 adw->free_carriers = free_carrier;
1342
1343 /* Process CCB */
1344 ccb = acb->ccb;
1345 untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch);
1346 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1347 bus_dmasync_op_t op;
1348
1349 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1350 op = BUS_DMASYNC_POSTREAD;
1351 else
1352 op = BUS_DMASYNC_POSTWRITE;
1353 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1354 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1355 ccb->csio.resid = acb->queue.data_cnt;
1356 } else
1357 ccb->csio.resid = 0;
1358
1359 /* Common Cases inline... */
1360 if (acb->queue.host_status == QHSTA_NO_ERROR
1361 && (acb->queue.done_status == QD_NO_ERROR
1362 || acb->queue.done_status == QD_WITH_ERROR)) {
1363 ccb->csio.scsi_status = acb->queue.scsi_status;
1364 ccb->ccb_h.status = 0;
1365 switch (ccb->csio.scsi_status) {
1366 case SCSI_STATUS_OK:
1367 ccb->ccb_h.status |= CAM_REQ_CMP;
1368 break;
1369 case SCSI_STATUS_CHECK_COND:
1370 case SCSI_STATUS_CMD_TERMINATED:
1371 bcopy(&acb->sense_data, &ccb->csio.sense_data,
1372 ccb->csio.sense_len);
1373 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1374 ccb->csio.sense_resid = acb->queue.sense_len;
1375 /* FALLTHROUGH */
1376 default:
1377 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1378 | CAM_DEV_QFRZN;
1379 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1380 break;
1381 }
1382 adwfreeacb(adw, acb);
1383 xpt_done(ccb);
1384 } else {
1385 adwprocesserror(adw, acb);
1386 }
1387 }
1388 }
1389
1390 static void
1391 adwprocesserror(struct adw_softc *adw, struct acb *acb)
1392 {
1393 union ccb *ccb;
1394
1395 ccb = acb->ccb;
1396 if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1397 ccb->ccb_h.status = CAM_REQ_ABORTED;
1398 } else {
1399
1400 switch (acb->queue.host_status) {
1401 case QHSTA_M_SEL_TIMEOUT:
1402 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1403 break;
1404 case QHSTA_M_SXFR_OFF_UFLW:
1405 case QHSTA_M_SXFR_OFF_OFLW:
1406 case QHSTA_M_DATA_OVER_RUN:
1407 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1408 break;
1409 case QHSTA_M_SXFR_DESELECTED:
1410 case QHSTA_M_UNEXPECTED_BUS_FREE:
1411 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1412 break;
1413 case QHSTA_M_SCSI_BUS_RESET:
1414 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1415 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1416 break;
1417 case QHSTA_M_BUS_DEVICE_RESET:
1418 ccb->ccb_h.status = CAM_BDR_SENT;
1419 break;
1420 case QHSTA_M_QUEUE_ABORTED:
1421 /* BDR or Bus Reset */
1422 printf("Saw Queue Aborted\n");
1423 ccb->ccb_h.status = adw->last_reset;
1424 break;
1425 case QHSTA_M_SXFR_SDMA_ERR:
1426 case QHSTA_M_SXFR_SXFR_PERR:
1427 case QHSTA_M_RDMA_PERR:
1428 ccb->ccb_h.status = CAM_UNCOR_PARITY;
1429 break;
1430 case QHSTA_M_WTM_TIMEOUT:
1431 case QHSTA_M_SXFR_WD_TMO:
1432 {
1433 /* The SCSI bus hung in a phase */
1434 xpt_print_path(adw->path);
1435 printf("Watch Dog timer expired. Reseting bus\n");
1436 adw_reset_bus(adw);
1437 break;
1438 }
1439 case QHSTA_M_SXFR_XFR_PH_ERR:
1440 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1441 break;
1442 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1443 break;
1444 case QHSTA_M_BAD_CMPL_STATUS_IN:
1445 /* No command complete after a status message */
1446 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1447 break;
1448 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1449 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1450 break;
1451 case QHSTA_M_INVALID_DEVICE:
1452 ccb->ccb_h.status = CAM_PATH_INVALID;
1453 break;
1454 case QHSTA_M_NO_AUTO_REQ_SENSE:
1455 /*
1456 * User didn't request sense, but we got a
1457 * check condition.
1458 */
1459 ccb->csio.scsi_status = acb->queue.scsi_status;
1460 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1461 break;
1462 default:
1463 panic("%s: Unhandled Host status error %x",
1464 adw_name(adw), acb->queue.host_status);
1465 /* NOTREACHED */
1466 }
1467 }
1468 if ((acb->state & ACB_RECOVERY_ACB) != 0) {
1469 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET
1470 || ccb->ccb_h.status == CAM_BDR_SENT)
1471 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1472 }
1473 if (ccb->ccb_h.status != CAM_REQ_CMP) {
1474 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1475 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1476 }
1477 adwfreeacb(adw, acb);
1478 xpt_done(ccb);
1479 }
1480
1481 static void
1482 adwtimeout(void *arg)
1483 {
1484 struct acb *acb;
1485 union ccb *ccb;
1486 struct adw_softc *adw;
1487 adw_idle_cmd_status_t status;
1488 int target_id;
1489 int s;
1490
1491 acb = (struct acb *)arg;
1492 ccb = acb->ccb;
1493 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1494 xpt_print_path(ccb->ccb_h.path);
1495 printf("ACB %p - timed out\n", (void *)acb);
1496
1497 s = splcam();
1498
1499 if ((acb->state & ACB_ACTIVE) == 0) {
1500 xpt_print_path(ccb->ccb_h.path);
1501 printf("ACB %p - timed out CCB already completed\n",
1502 (void *)acb);
1503 splx(s);
1504 return;
1505 }
1506
1507 acb->state |= ACB_RECOVERY_ACB;
1508 target_id = ccb->ccb_h.target_id;
1509
1510 /* Attempt a BDR first */
1511 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1512 ccb->ccb_h.target_id);
1513 splx(s);
1514 if (status == ADW_IDLE_CMD_SUCCESS) {
1515 printf("%s: BDR Delivered. No longer in timeout\n",
1516 adw_name(adw));
1517 adw_handle_device_reset(adw, target_id);
1518 } else {
1519 adw_reset_bus(adw);
1520 xpt_print_path(adw->path);
1521 printf("Bus Reset Delivered. No longer in timeout\n");
1522 }
1523 }
1524
1525 static void
1526 adw_handle_device_reset(struct adw_softc *adw, u_int target)
1527 {
1528 struct cam_path *path;
1529 cam_status error;
1530
1531 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1532 target, CAM_LUN_WILDCARD);
1533
1534 if (error == CAM_REQ_CMP) {
1535 xpt_async(AC_SENT_BDR, path, NULL);
1536 xpt_free_path(path);
1537 }
1538 adw->last_reset = CAM_BDR_SENT;
1539 }
1540
1541 static void
1542 adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1543 {
1544 if (initiated) {
1545 /*
1546 * The microcode currently sets the SCSI Bus Reset signal
1547 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1548 * command above. But the SCSI Bus Reset Hold Time in the
1549 * microcode is not deterministic (it may in fact be for less
1550 * than the SCSI Spec. minimum of 25 us). Therefore on return
1551 * the Adv Library sets the SCSI Bus Reset signal for
1552 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1553 * than 25 us.
1554 */
1555 u_int scsi_ctrl;
1556
1557 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1558 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1559 DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1560 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1561
1562 /*
1563 * We will perform the async notification when the
1564 * SCSI Reset interrupt occurs.
1565 */
1566 } else
1567 xpt_async(AC_BUS_RESET, adw->path, NULL);
1568 adw->last_reset = CAM_SCSI_BUS_RESET;
1569 }
Cache object: e0c81da7b3c8d1e6255f7aa71b21e12b
|