1 /*
2 * CAM SCSI interface for the the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
4 *
5 * Product specific probe and attach routines can be found in:
6 *
7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W
8 *
9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: releng/5.0/sys/dev/advansys/adwcam.c 73280 2001-03-01 17:09:09Z markm $
34 */
35 /*
36 * Ported from:
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
38 *
39 * Copyright (c) 1995-1998 Advanced System Products, Inc.
40 * All Rights Reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
45 * modification.
46 */
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/bus.h>
53
54 #include <machine/bus_pio.h>
55 #include <machine/bus_memio.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58
59 #include <sys/rman.h>
60
61 #include <cam/cam.h>
62 #include <cam/cam_ccb.h>
63 #include <cam/cam_sim.h>
64 #include <cam/cam_xpt_sim.h>
65 #include <cam/cam_debug.h>
66
67 #include <cam/scsi/scsi_message.h>
68
69 #include <dev/advansys/adwvar.h>
70
71 /* Definitions for our use of the SIM private CCB area */
72 #define ccb_acb_ptr spriv_ptr0
73 #define ccb_adw_ptr spriv_ptr1
74
75 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
76
77 u_long adw_unit;
78
79 static __inline cam_status adwccbstatus(union ccb*);
80 static __inline struct acb* adwgetacb(struct adw_softc *adw);
81 static __inline void adwfreeacb(struct adw_softc *adw,
82 struct acb *acb);
83
84 static void adwmapmem(void *arg, bus_dma_segment_t *segs,
85 int nseg, int error);
86 static struct sg_map_node*
87 adwallocsgmap(struct adw_softc *adw);
88 static int adwallocacbs(struct adw_softc *adw);
89
90 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
91 int nseg, int error);
92 static void adw_action(struct cam_sim *sim, union ccb *ccb);
93 static void adw_poll(struct cam_sim *sim);
94 static void adw_async(void *callback_arg, u_int32_t code,
95 struct cam_path *path, void *arg);
96 static void adwprocesserror(struct adw_softc *adw, struct acb *acb);
97 static void adwtimeout(void *arg);
98 static void adw_handle_device_reset(struct adw_softc *adw,
99 u_int target);
100 static void adw_handle_bus_reset(struct adw_softc *adw,
101 int initiated);
102
103 static __inline cam_status
104 adwccbstatus(union ccb* ccb)
105 {
106 return (ccb->ccb_h.status & CAM_STATUS_MASK);
107 }
108
109 static __inline struct acb*
110 adwgetacb(struct adw_softc *adw)
111 {
112 struct acb* acb;
113 int s;
114
115 s = splcam();
116 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
117 SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
118 } else if (adw->num_acbs < adw->max_acbs) {
119 adwallocacbs(adw);
120 acb = SLIST_FIRST(&adw->free_acb_list);
121 if (acb == NULL)
122 printf("%s: Can't malloc ACB\n", adw_name(adw));
123 else {
124 SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
125 }
126 }
127 splx(s);
128
129 return (acb);
130 }
131
132 static __inline void
133 adwfreeacb(struct adw_softc *adw, struct acb *acb)
134 {
135 int s;
136
137 s = splcam();
138 if ((acb->state & ACB_ACTIVE) != 0)
139 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
140 if ((acb->state & ACB_RELEASE_SIMQ) != 0)
141 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
142 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
143 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
144 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
145 adw->state &= ~ADW_RESOURCE_SHORTAGE;
146 }
147 acb->state = ACB_FREE;
148 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
149 splx(s);
150 }
151
152 static void
153 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
154 {
155 bus_addr_t *busaddrp;
156
157 busaddrp = (bus_addr_t *)arg;
158 *busaddrp = segs->ds_addr;
159 }
160
161 static struct sg_map_node *
162 adwallocsgmap(struct adw_softc *adw)
163 {
164 struct sg_map_node *sg_map;
165
166 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
167
168 if (sg_map == NULL)
169 return (NULL);
170
171 /* Allocate S/G space for the next batch of ACBS */
172 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
173 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
174 free(sg_map, M_DEVBUF);
175 return (NULL);
176 }
177
178 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
179
180 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
181 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
182
183 bzero(sg_map->sg_vaddr, PAGE_SIZE);
184 return (sg_map);
185 }
186
187 /*
188 * Allocate another chunk of CCB's. Return count of entries added.
189 * Assumed to be called at splcam().
190 */
191 static int
192 adwallocacbs(struct adw_softc *adw)
193 {
194 struct acb *next_acb;
195 struct sg_map_node *sg_map;
196 bus_addr_t busaddr;
197 struct adw_sg_block *blocks;
198 int newcount;
199 int i;
200
201 next_acb = &adw->acbs[adw->num_acbs];
202 sg_map = adwallocsgmap(adw);
203
204 if (sg_map == NULL)
205 return (0);
206
207 blocks = sg_map->sg_vaddr;
208 busaddr = sg_map->sg_physaddr;
209
210 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
211 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
212 int error;
213
214 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
215 &next_acb->dmamap);
216 if (error != 0)
217 break;
218 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb);
219 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb);
220 next_acb->queue.sense_baddr =
221 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data);
222 next_acb->sg_blocks = blocks;
223 next_acb->sg_busaddr = busaddr;
224 next_acb->state = ACB_FREE;
225 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
226 blocks += ADW_SG_BLOCKCNT;
227 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
228 next_acb++;
229 adw->num_acbs++;
230 }
231 return (i);
232 }
233
234 static void
235 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
236 {
237 struct acb *acb;
238 union ccb *ccb;
239 struct adw_softc *adw;
240 int s;
241
242 acb = (struct acb *)arg;
243 ccb = acb->ccb;
244 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
245
246 if (error != 0) {
247 if (error != EFBIG)
248 printf("%s: Unexepected error 0x%x returned from "
249 "bus_dmamap_load\n", adw_name(adw), error);
250 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
251 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
252 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
253 }
254 adwfreeacb(adw, acb);
255 xpt_done(ccb);
256 return;
257 }
258
259 if (nseg != 0) {
260 bus_dmasync_op_t op;
261
262 acb->queue.data_addr = dm_segs[0].ds_addr;
263 acb->queue.data_cnt = ccb->csio.dxfer_len;
264 if (nseg > 1) {
265 struct adw_sg_block *sg_block;
266 struct adw_sg_elm *sg;
267 bus_addr_t sg_busaddr;
268 u_int sg_index;
269 bus_dma_segment_t *end_seg;
270
271 end_seg = dm_segs + nseg;
272
273 sg_busaddr = acb->sg_busaddr;
274 sg_index = 0;
275 /* Copy the segments into our SG list */
276 for (sg_block = acb->sg_blocks;; sg_block++) {
277 u_int i;
278
279 sg = sg_block->sg_list;
280 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) {
281 if (dm_segs >= end_seg)
282 break;
283
284 sg->sg_addr = dm_segs->ds_addr;
285 sg->sg_count = dm_segs->ds_len;
286 sg++;
287 dm_segs++;
288 }
289 sg_block->sg_cnt = i;
290 sg_index += i;
291 if (dm_segs == end_seg) {
292 sg_block->sg_busaddr_next = 0;
293 break;
294 } else {
295 sg_busaddr +=
296 sizeof(struct adw_sg_block);
297 sg_block->sg_busaddr_next = sg_busaddr;
298 }
299 }
300 acb->queue.sg_real_addr = acb->sg_busaddr;
301 } else {
302 acb->queue.sg_real_addr = 0;
303 }
304
305 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
306 op = BUS_DMASYNC_PREREAD;
307 else
308 op = BUS_DMASYNC_PREWRITE;
309
310 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
311
312 } else {
313 acb->queue.data_addr = 0;
314 acb->queue.data_cnt = 0;
315 acb->queue.sg_real_addr = 0;
316 }
317
318 s = splcam();
319
320 /*
321 * Last time we need to check if this CCB needs to
322 * be aborted.
323 */
324 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
325 if (nseg != 0)
326 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
327 adwfreeacb(adw, acb);
328 xpt_done(ccb);
329 splx(s);
330 return;
331 }
332
333 acb->state |= ACB_ACTIVE;
334 ccb->ccb_h.status |= CAM_SIM_QUEUED;
335 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
336 ccb->ccb_h.timeout_ch =
337 timeout(adwtimeout, (caddr_t)acb,
338 (ccb->ccb_h.timeout * hz) / 1000);
339
340 adw_send_acb(adw, acb, acbvtob(adw, acb));
341
342 splx(s);
343 }
344
345 static void
346 adw_action(struct cam_sim *sim, union ccb *ccb)
347 {
348 struct adw_softc *adw;
349
350 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
351
352 adw = (struct adw_softc *)cam_sim_softc(sim);
353
354 switch (ccb->ccb_h.func_code) {
355 /* Common cases first */
356 case XPT_SCSI_IO: /* Execute the requested I/O operation */
357 {
358 struct ccb_scsiio *csio;
359 struct ccb_hdr *ccbh;
360 struct acb *acb;
361
362 csio = &ccb->csio;
363 ccbh = &ccb->ccb_h;
364
365 /* Max supported CDB length is 12 bytes */
366 if (csio->cdb_len > 12) {
367 ccb->ccb_h.status = CAM_REQ_INVALID;
368 xpt_done(ccb);
369 return;
370 }
371
372 if ((acb = adwgetacb(adw)) == NULL) {
373 int s;
374
375 s = splcam();
376 adw->state |= ADW_RESOURCE_SHORTAGE;
377 splx(s);
378 xpt_freeze_simq(sim, /*count*/1);
379 ccb->ccb_h.status = CAM_REQUEUE_REQ;
380 xpt_done(ccb);
381 return;
382 }
383
384 /* Link acb and ccb so we can find one from the other */
385 acb->ccb = ccb;
386 ccb->ccb_h.ccb_acb_ptr = acb;
387 ccb->ccb_h.ccb_adw_ptr = adw;
388
389 acb->queue.cntl = 0;
390 acb->queue.target_cmd = 0;
391 acb->queue.target_id = ccb->ccb_h.target_id;
392 acb->queue.target_lun = ccb->ccb_h.target_lun;
393
394 acb->queue.mflag = 0;
395 acb->queue.sense_len =
396 MIN(csio->sense_len, sizeof(acb->sense_data));
397 acb->queue.cdb_len = csio->cdb_len;
398 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
399 switch (csio->tag_action) {
400 case MSG_SIMPLE_Q_TAG:
401 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG;
402 break;
403 case MSG_HEAD_OF_Q_TAG:
404 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG;
405 break;
406 case MSG_ORDERED_Q_TAG:
407 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG;
408 break;
409 default:
410 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
411 break;
412 }
413 } else
414 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
415
416 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
417 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC;
418
419 acb->queue.done_status = 0;
420 acb->queue.scsi_status = 0;
421 acb->queue.host_status = 0;
422 acb->queue.sg_wk_ix = 0;
423 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
424 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
425 bcopy(csio->cdb_io.cdb_ptr,
426 acb->queue.cdb, csio->cdb_len);
427 } else {
428 /* I guess I could map it in... */
429 ccb->ccb_h.status = CAM_REQ_INVALID;
430 adwfreeacb(adw, acb);
431 xpt_done(ccb);
432 return;
433 }
434 } else {
435 bcopy(csio->cdb_io.cdb_bytes,
436 acb->queue.cdb, csio->cdb_len);
437 }
438
439 /*
440 * If we have any data to send with this command,
441 * map it into bus space.
442 */
443 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
444 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
445 /*
446 * We've been given a pointer
447 * to a single buffer.
448 */
449 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
450 int s;
451 int error;
452
453 s = splsoftvm();
454 error =
455 bus_dmamap_load(adw->buffer_dmat,
456 acb->dmamap,
457 csio->data_ptr,
458 csio->dxfer_len,
459 adwexecuteacb,
460 acb, /*flags*/0);
461 if (error == EINPROGRESS) {
462 /*
463 * So as to maintain ordering,
464 * freeze the controller queue
465 * until our mapping is
466 * returned.
467 */
468 xpt_freeze_simq(sim, 1);
469 acb->state |= CAM_RELEASE_SIMQ;
470 }
471 splx(s);
472 } else {
473 struct bus_dma_segment seg;
474
475 /* Pointer to physical buffer */
476 seg.ds_addr =
477 (bus_addr_t)csio->data_ptr;
478 seg.ds_len = csio->dxfer_len;
479 adwexecuteacb(acb, &seg, 1, 0);
480 }
481 } else {
482 struct bus_dma_segment *segs;
483
484 if ((ccbh->flags & CAM_DATA_PHYS) != 0)
485 panic("adw_action - Physical "
486 "segment pointers "
487 "unsupported");
488
489 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
490 panic("adw_action - Virtual "
491 "segment addresses "
492 "unsupported");
493
494 /* Just use the segments provided */
495 segs = (struct bus_dma_segment *)csio->data_ptr;
496 adwexecuteacb(acb, segs, csio->sglist_cnt,
497 (csio->sglist_cnt < ADW_SGSIZE)
498 ? 0 : EFBIG);
499 }
500 } else {
501 adwexecuteacb(acb, NULL, 0, 0);
502 }
503 break;
504 }
505 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
506 {
507 adw_idle_cmd_status_t status;
508
509 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
510 ccb->ccb_h.target_id);
511 if (status == ADW_IDLE_CMD_SUCCESS) {
512 ccb->ccb_h.status = CAM_REQ_CMP;
513 if (bootverbose) {
514 xpt_print_path(ccb->ccb_h.path);
515 printf("BDR Delivered\n");
516 }
517 } else
518 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
519 xpt_done(ccb);
520 break;
521 }
522 case XPT_ABORT: /* Abort the specified CCB */
523 /* XXX Implement */
524 ccb->ccb_h.status = CAM_REQ_INVALID;
525 xpt_done(ccb);
526 break;
527 case XPT_SET_TRAN_SETTINGS:
528 {
529 struct ccb_trans_settings *cts;
530 u_int target_mask;
531 int s;
532
533 cts = &ccb->cts;
534 target_mask = 0x01 << ccb->ccb_h.target_id;
535
536 s = splcam();
537 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
538 u_int sdtrdone;
539
540 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE);
541 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
542 u_int discenb;
543
544 discenb =
545 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
546
547 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
548 discenb |= target_mask;
549 else
550 discenb &= ~target_mask;
551
552 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
553 discenb);
554 }
555
556 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
557
558 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
559 adw->tagenb |= target_mask;
560 else
561 adw->tagenb &= ~target_mask;
562 }
563
564 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
565 u_int wdtrenb_orig;
566 u_int wdtrenb;
567 u_int wdtrdone;
568
569 wdtrenb_orig =
570 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
571 wdtrenb = wdtrenb_orig;
572 wdtrdone = adw_lram_read_16(adw,
573 ADW_MC_WDTR_DONE);
574 switch (cts->bus_width) {
575 case MSG_EXT_WDTR_BUS_32_BIT:
576 case MSG_EXT_WDTR_BUS_16_BIT:
577 wdtrenb |= target_mask;
578 break;
579 case MSG_EXT_WDTR_BUS_8_BIT:
580 default:
581 wdtrenb &= ~target_mask;
582 break;
583 }
584 if (wdtrenb != wdtrenb_orig) {
585 adw_lram_write_16(adw,
586 ADW_MC_WDTR_ABLE,
587 wdtrenb);
588 wdtrdone &= ~target_mask;
589 adw_lram_write_16(adw,
590 ADW_MC_WDTR_DONE,
591 wdtrdone);
592 /* Wide negotiation forces async */
593 sdtrdone &= ~target_mask;
594 adw_lram_write_16(adw,
595 ADW_MC_SDTR_DONE,
596 sdtrdone);
597 }
598 }
599
600 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
601 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
602 u_int sdtr_orig;
603 u_int sdtr;
604 u_int sdtrable_orig;
605 u_int sdtrable;
606
607 sdtr = adw_get_chip_sdtr(adw,
608 ccb->ccb_h.target_id);
609 sdtr_orig = sdtr;
610 sdtrable = adw_lram_read_16(adw,
611 ADW_MC_SDTR_ABLE);
612 sdtrable_orig = sdtrable;
613
614 if ((cts->valid
615 & CCB_TRANS_SYNC_RATE_VALID) != 0) {
616
617 sdtr =
618 adw_find_sdtr(adw,
619 cts->sync_period);
620 }
621
622 if ((cts->valid
623 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) {
624 if (cts->sync_offset == 0)
625 sdtr = ADW_MC_SDTR_ASYNC;
626 }
627
628 if (sdtr == ADW_MC_SDTR_ASYNC)
629 sdtrable &= ~target_mask;
630 else
631 sdtrable |= target_mask;
632 if (sdtr != sdtr_orig
633 || sdtrable != sdtrable_orig) {
634 adw_set_chip_sdtr(adw,
635 ccb->ccb_h.target_id,
636 sdtr);
637 sdtrdone &= ~target_mask;
638 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
639 sdtrable);
640 adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
641 sdtrdone);
642
643 }
644 }
645 }
646 splx(s);
647 ccb->ccb_h.status = CAM_REQ_CMP;
648 xpt_done(ccb);
649 break;
650 }
651 case XPT_GET_TRAN_SETTINGS:
652 /* Get default/user set transfer settings for the target */
653 {
654 struct ccb_trans_settings *cts;
655 u_int target_mask;
656
657 cts = &ccb->cts;
658 target_mask = 0x01 << ccb->ccb_h.target_id;
659 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
660 u_int mc_sdtr;
661
662 cts->flags = 0;
663 if ((adw->user_discenb & target_mask) != 0)
664 cts->flags |= CCB_TRANS_DISC_ENB;
665
666 if ((adw->user_tagenb & target_mask) != 0)
667 cts->flags |= CCB_TRANS_TAG_ENB;
668
669 if ((adw->user_wdtr & target_mask) != 0)
670 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
671 else
672 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
673
674 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id);
675 cts->sync_period = adw_find_period(adw, mc_sdtr);
676 if (cts->sync_period != 0)
677 cts->sync_offset = 15; /* XXX ??? */
678 else
679 cts->sync_offset = 0;
680
681 cts->valid = CCB_TRANS_SYNC_RATE_VALID
682 | CCB_TRANS_SYNC_OFFSET_VALID
683 | CCB_TRANS_BUS_WIDTH_VALID
684 | CCB_TRANS_DISC_VALID
685 | CCB_TRANS_TQ_VALID;
686 ccb->ccb_h.status = CAM_REQ_CMP;
687 } else {
688 u_int targ_tinfo;
689
690 cts->flags = 0;
691 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
692 & target_mask) != 0)
693 cts->flags |= CCB_TRANS_DISC_ENB;
694
695 if ((adw->tagenb & target_mask) != 0)
696 cts->flags |= CCB_TRANS_TAG_ENB;
697
698 targ_tinfo =
699 adw_lram_read_16(adw,
700 ADW_MC_DEVICE_HSHK_CFG_TABLE
701 + (2 * ccb->ccb_h.target_id));
702
703 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
704 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
705 else
706 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
707
708 cts->sync_period =
709 adw_hshk_cfg_period_factor(targ_tinfo);
710
711 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
712 if (cts->sync_period == 0)
713 cts->sync_offset = 0;
714
715 if (cts->sync_offset == 0)
716 cts->sync_period = 0;
717 }
718 cts->valid = CCB_TRANS_SYNC_RATE_VALID
719 | CCB_TRANS_SYNC_OFFSET_VALID
720 | CCB_TRANS_BUS_WIDTH_VALID
721 | CCB_TRANS_DISC_VALID
722 | CCB_TRANS_TQ_VALID;
723 ccb->ccb_h.status = CAM_REQ_CMP;
724 xpt_done(ccb);
725 break;
726 }
727 case XPT_CALC_GEOMETRY:
728 {
729 struct ccb_calc_geometry *ccg;
730 u_int32_t size_mb;
731 u_int32_t secs_per_cylinder;
732 int extended;
733
734 /*
735 * XXX Use Adaptec translation until I find out how to
736 * get this information from the card.
737 */
738 ccg = &ccb->ccg;
739 size_mb = ccg->volume_size
740 / ((1024L * 1024L) / ccg->block_size);
741 extended = 1;
742
743 if (size_mb > 1024 && extended) {
744 ccg->heads = 255;
745 ccg->secs_per_track = 63;
746 } else {
747 ccg->heads = 64;
748 ccg->secs_per_track = 32;
749 }
750 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
751 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
752 ccb->ccb_h.status = CAM_REQ_CMP;
753 xpt_done(ccb);
754 break;
755 }
756 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
757 {
758 int failure;
759
760 failure = adw_reset_bus(adw);
761 if (failure != 0) {
762 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
763 } else {
764 if (bootverbose) {
765 xpt_print_path(adw->path);
766 printf("Bus Reset Delivered\n");
767 }
768 ccb->ccb_h.status = CAM_REQ_CMP;
769 }
770 xpt_done(ccb);
771 break;
772 }
773 case XPT_TERM_IO: /* Terminate the I/O process */
774 /* XXX Implement */
775 ccb->ccb_h.status = CAM_REQ_INVALID;
776 xpt_done(ccb);
777 break;
778 case XPT_PATH_INQ: /* Path routing inquiry */
779 {
780 struct ccb_pathinq *cpi = &ccb->cpi;
781
782 cpi->version_num = 1;
783 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
784 cpi->target_sprt = 0;
785 cpi->hba_misc = 0;
786 cpi->hba_eng_cnt = 0;
787 cpi->max_target = ADW_MAX_TID;
788 cpi->max_lun = ADW_MAX_LUN;
789 cpi->initiator_id = adw->initiator_id;
790 cpi->bus_id = cam_sim_bus(sim);
791 cpi->base_transfer_speed = 3300;
792 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
793 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
794 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
795 cpi->unit_number = cam_sim_unit(sim);
796 cpi->ccb_h.status = CAM_REQ_CMP;
797 xpt_done(ccb);
798 break;
799 }
800 default:
801 ccb->ccb_h.status = CAM_REQ_INVALID;
802 xpt_done(ccb);
803 break;
804 }
805 }
806
807 static void
808 adw_poll(struct cam_sim *sim)
809 {
810 adw_intr(cam_sim_softc(sim));
811 }
812
813 static void
814 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
815 {
816 }
817
818 struct adw_softc *
819 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id)
820 {
821 struct adw_softc *adw;
822 int i;
823
824 /*
825 * Allocate a storage area for us
826 */
827 adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT | M_ZERO);
828 if (adw == NULL) {
829 printf("adw%d: cannot malloc!\n", device_get_unit(dev));
830 return NULL;
831 }
832 LIST_INIT(&adw->pending_ccbs);
833 SLIST_INIT(&adw->sg_maps);
834 adw->device = dev;
835 adw->unit = device_get_unit(dev);
836 adw->regs_res_type = regs_type;
837 adw->regs_res_id = regs_id;
838 adw->regs = regs;
839 adw->tag = rman_get_bustag(regs);
840 adw->bsh = rman_get_bushandle(regs);
841 i = adw->unit / 10;
842 adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT);
843 if (adw->name == NULL) {
844 printf("adw%d: cannot malloc name!\n", adw->unit);
845 free(adw, M_DEVBUF);
846 return NULL;
847 }
848 sprintf(adw->name, "adw%d", adw->unit);
849 return(adw);
850 }
851
852 void
853 adw_free(struct adw_softc *adw)
854 {
855 switch (adw->init_level) {
856 case 9:
857 {
858 struct sg_map_node *sg_map;
859
860 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
861 SLIST_REMOVE_HEAD(&adw->sg_maps, links);
862 bus_dmamap_unload(adw->sg_dmat,
863 sg_map->sg_dmamap);
864 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
865 sg_map->sg_dmamap);
866 free(sg_map, M_DEVBUF);
867 }
868 bus_dma_tag_destroy(adw->sg_dmat);
869 }
870 case 8:
871 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
872 case 7:
873 bus_dmamem_free(adw->acb_dmat, adw->acbs,
874 adw->acb_dmamap);
875 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap);
876 case 6:
877 bus_dma_tag_destroy(adw->acb_dmat);
878 case 5:
879 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap);
880 case 4:
881 bus_dmamem_free(adw->carrier_dmat, adw->carriers,
882 adw->carrier_dmamap);
883 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap);
884 case 3:
885 bus_dma_tag_destroy(adw->carrier_dmat);
886 case 2:
887 bus_dma_tag_destroy(adw->buffer_dmat);
888 case 1:
889 bus_dma_tag_destroy(adw->parent_dmat);
890 case 0:
891 break;
892 }
893 free(adw->name, M_DEVBUF);
894 free(adw, M_DEVBUF);
895 }
896
897 int
898 adw_init(struct adw_softc *adw)
899 {
900 struct adw_eeprom eep_config;
901 u_int tid;
902 u_int i;
903 u_int16_t checksum;
904 u_int16_t scsicfg1;
905
906 checksum = adw_eeprom_read(adw, &eep_config);
907 bcopy(eep_config.serial_number, adw->serial_number,
908 sizeof(adw->serial_number));
909 if (checksum != eep_config.checksum) {
910 u_int16_t serial_number[3];
911
912 adw->flags |= ADW_EEPROM_FAILED;
913 printf("%s: EEPROM checksum failed. Restoring Defaults\n",
914 adw_name(adw));
915
916 /*
917 * Restore the default EEPROM settings.
918 * Assume the 6 byte board serial number that was read
919 * from EEPROM is correct even if the EEPROM checksum
920 * failed.
921 */
922 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config));
923 bcopy(adw->serial_number, eep_config.serial_number,
924 sizeof(serial_number));
925 adw_eeprom_write(adw, &eep_config);
926 }
927
928 /* Pull eeprom information into our softc. */
929 adw->bios_ctrl = eep_config.bios_ctrl;
930 adw->user_wdtr = eep_config.wdtr_able;
931 for (tid = 0; tid < ADW_MAX_TID; tid++) {
932 u_int mc_sdtr;
933 u_int16_t tid_mask;
934
935 tid_mask = 0x1 << tid;
936 if ((adw->features & ADW_ULTRA) != 0) {
937 /*
938 * Ultra chips store sdtr and ultraenb
939 * bits in their seeprom, so we must
940 * construct valid mc_sdtr entries for
941 * indirectly.
942 */
943 if (eep_config.sync1.sync_enable & tid_mask) {
944 if (eep_config.sync2.ultra_enable & tid_mask)
945 mc_sdtr = ADW_MC_SDTR_20;
946 else
947 mc_sdtr = ADW_MC_SDTR_10;
948 } else
949 mc_sdtr = ADW_MC_SDTR_ASYNC;
950 } else {
951 switch (ADW_TARGET_GROUP(tid)) {
952 case 3:
953 mc_sdtr = eep_config.sync4.sdtr4;
954 break;
955 case 2:
956 mc_sdtr = eep_config.sync3.sdtr3;
957 break;
958 case 1:
959 mc_sdtr = eep_config.sync2.sdtr2;
960 break;
961 default: /* Shut up compiler */
962 case 0:
963 mc_sdtr = eep_config.sync1.sdtr1;
964 break;
965 }
966 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid);
967 mc_sdtr &= 0xFF;
968 }
969 adw_set_user_sdtr(adw, tid, mc_sdtr);
970 }
971 adw->user_tagenb = eep_config.tagqng_able;
972 adw->user_discenb = eep_config.disc_enable;
973 adw->max_acbs = eep_config.max_host_qng;
974 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
975
976 /*
977 * Sanity check the number of host openings.
978 */
979 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
980 adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
981 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
982 /* If the value is zero, assume it is uninitialized. */
983 if (adw->max_acbs == 0)
984 adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
985 else
986 adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
987 }
988
989 scsicfg1 = 0;
990 if ((adw->features & ADW_ULTRA2) != 0) {
991 switch (eep_config.termination_lvd) {
992 default:
993 printf("%s: Invalid EEPROM LVD Termination Settings.\n",
994 adw_name(adw));
995 printf("%s: Reverting to Automatic LVD Termination\n",
996 adw_name(adw));
997 /* FALLTHROUGH */
998 case ADW_EEPROM_TERM_AUTO:
999 break;
1000 case ADW_EEPROM_TERM_BOTH_ON:
1001 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO;
1002 /* FALLTHROUGH */
1003 case ADW_EEPROM_TERM_HIGH_ON:
1004 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI;
1005 /* FALLTHROUGH */
1006 case ADW_EEPROM_TERM_OFF:
1007 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV;
1008 break;
1009 }
1010 }
1011
1012 switch (eep_config.termination_se) {
1013 default:
1014 printf("%s: Invalid SE EEPROM Termination Settings.\n",
1015 adw_name(adw));
1016 printf("%s: Reverting to Automatic SE Termination\n",
1017 adw_name(adw));
1018 /* FALLTHROUGH */
1019 case ADW_EEPROM_TERM_AUTO:
1020 break;
1021 case ADW_EEPROM_TERM_BOTH_ON:
1022 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
1023 /* FALLTHROUGH */
1024 case ADW_EEPROM_TERM_HIGH_ON:
1025 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
1026 /* FALLTHROUGH */
1027 case ADW_EEPROM_TERM_OFF:
1028 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
1029 break;
1030 }
1031 printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id);
1032
1033 /* DMA tag for mapping buffers into device visible space. */
1034 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
1035 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
1036 /*highaddr*/BUS_SPACE_MAXADDR,
1037 /*filter*/NULL, /*filterarg*/NULL,
1038 /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE,
1039 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1040 /*flags*/BUS_DMA_ALLOCNOW,
1041 &adw->buffer_dmat) != 0) {
1042 return (ENOMEM);
1043 }
1044
1045 adw->init_level++;
1046
1047 /* DMA tag for our ccb carrier structures */
1048 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10,
1049 /*boundary*/0,
1050 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
1051 /*highaddr*/BUS_SPACE_MAXADDR,
1052 /*filter*/NULL, /*filterarg*/NULL,
1053 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1054 * sizeof(struct adw_carrier),
1055 /*nsegments*/1,
1056 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1057 /*flags*/0, &adw->carrier_dmat) != 0) {
1058 return (ENOMEM);
1059 }
1060
1061 adw->init_level++;
1062
1063 /* Allocation for our ccb carrier structures */
1064 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers,
1065 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) {
1066 return (ENOMEM);
1067 }
1068
1069 adw->init_level++;
1070
1071 /* And permanently map them */
1072 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap,
1073 adw->carriers,
1074 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1075 * sizeof(struct adw_carrier),
1076 adwmapmem, &adw->carrier_busbase, /*flags*/0);
1077
1078 /* Clear them out. */
1079 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1080 * sizeof(struct adw_carrier));
1081
1082 /* Setup our free carrier list */
1083 adw->free_carriers = adw->carriers;
1084 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) {
1085 adw->carriers[i].carr_offset =
1086 carriervtobo(adw, &adw->carriers[i]);
1087 adw->carriers[i].carr_ba =
1088 carriervtob(adw, &adw->carriers[i]);
1089 adw->carriers[i].areq_ba = 0;
1090 adw->carriers[i].next_ba =
1091 carriervtobo(adw, &adw->carriers[i+1]);
1092 }
1093 /* Terminal carrier. Never leaves the freelist */
1094 adw->carriers[i].carr_offset =
1095 carriervtobo(adw, &adw->carriers[i]);
1096 adw->carriers[i].carr_ba =
1097 carriervtob(adw, &adw->carriers[i]);
1098 adw->carriers[i].areq_ba = 0;
1099 adw->carriers[i].next_ba = ~0;
1100
1101 adw->init_level++;
1102
1103 /* DMA tag for our acb structures */
1104 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
1105 /*lowaddr*/BUS_SPACE_MAXADDR,
1106 /*highaddr*/BUS_SPACE_MAXADDR,
1107 /*filter*/NULL, /*filterarg*/NULL,
1108 adw->max_acbs * sizeof(struct acb),
1109 /*nsegments*/1,
1110 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1111 /*flags*/0, &adw->acb_dmat) != 0) {
1112 return (ENOMEM);
1113 }
1114
1115 adw->init_level++;
1116
1117 /* Allocation for our ccbs */
1118 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
1119 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0)
1120 return (ENOMEM);
1121
1122 adw->init_level++;
1123
1124 /* And permanently map them */
1125 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1126 adw->acbs,
1127 adw->max_acbs * sizeof(struct acb),
1128 adwmapmem, &adw->acb_busbase, /*flags*/0);
1129
1130 /* Clear them out. */
1131 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1132
1133 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1134 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
1135 /*lowaddr*/BUS_SPACE_MAXADDR,
1136 /*highaddr*/BUS_SPACE_MAXADDR,
1137 /*filter*/NULL, /*filterarg*/NULL,
1138 PAGE_SIZE, /*nsegments*/1,
1139 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1140 /*flags*/0, &adw->sg_dmat) != 0) {
1141 return (ENOMEM);
1142 }
1143
1144 adw->init_level++;
1145
1146 /* Allocate our first batch of ccbs */
1147 if (adwallocacbs(adw) == 0)
1148 return (ENOMEM);
1149
1150 if (adw_init_chip(adw, scsicfg1) != 0)
1151 return (ENXIO);
1152
1153 printf("Queue Depth %d\n", adw->max_acbs);
1154
1155 return (0);
1156 }
1157
1158 /*
1159 * Attach all the sub-devices we can find
1160 */
1161 int
1162 adw_attach(struct adw_softc *adw)
1163 {
1164 struct ccb_setasync csa;
1165 struct cam_devq *devq;
1166 int s;
1167 int error;
1168
1169 error = 0;
1170 s = splcam();
1171 /* Hook up our interrupt handler */
1172 if ((error = bus_setup_intr(adw->device, adw->irq,
1173 INTR_TYPE_CAM | INTR_ENTROPY, adw_intr,
1174 adw, &adw->ih)) != 0) {
1175 device_printf(adw->device, "bus_setup_intr() failed: %d\n",
1176 error);
1177 goto fail;
1178 }
1179
1180 /* Start the Risc processor now that we are fully configured. */
1181 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1182
1183 /*
1184 * Create the device queue for our SIM.
1185 */
1186 devq = cam_simq_alloc(adw->max_acbs);
1187 if (devq == NULL)
1188 return (ENOMEM);
1189
1190 /*
1191 * Construct our SIM entry.
1192 */
1193 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit,
1194 1, adw->max_acbs, devq);
1195 if (adw->sim == NULL) {
1196 error = ENOMEM;
1197 goto fail;
1198 }
1199
1200 /*
1201 * Register the bus.
1202 */
1203 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) {
1204 cam_sim_free(adw->sim, /*free devq*/TRUE);
1205 error = ENOMEM;
1206 goto fail;
1207 }
1208
1209 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1210 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1211 == CAM_REQ_CMP) {
1212 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1213 csa.ccb_h.func_code = XPT_SASYNC_CB;
1214 csa.event_enable = AC_LOST_DEVICE;
1215 csa.callback = adw_async;
1216 csa.callback_arg = adw;
1217 xpt_action((union ccb *)&csa);
1218 }
1219
1220 fail:
1221 splx(s);
1222 return (error);
1223 }
1224
1225 void
1226 adw_intr(void *arg)
1227 {
1228 struct adw_softc *adw;
1229 u_int int_stat;
1230
1231 adw = (struct adw_softc *)arg;
1232 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1233 return;
1234
1235 /* Reading the register clears the interrupt. */
1236 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1237
1238 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1239 u_int intrb_code;
1240
1241 /* Async Microcode Event */
1242 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE);
1243 switch (intrb_code) {
1244 case ADW_ASYNC_CARRIER_READY_FAILURE:
1245 /*
1246 * The RISC missed our update of
1247 * the commandq.
1248 */
1249 if (LIST_FIRST(&adw->pending_ccbs) != NULL)
1250 adw_tickle_risc(adw, ADW_TICKLE_A);
1251 break;
1252 case ADW_ASYNC_SCSI_BUS_RESET_DET:
1253 /*
1254 * The firmware detected a SCSI Bus reset.
1255 */
1256 printf("Someone Reset the Bus\n");
1257 adw_handle_bus_reset(adw, /*initiated*/FALSE);
1258 break;
1259 case ADW_ASYNC_RDMA_FAILURE:
1260 /*
1261 * Handle RDMA failure by resetting the
1262 * SCSI Bus and chip.
1263 */
1264 #if XXX
1265 AdvResetChipAndSB(adv_dvc_varp);
1266 #endif
1267 break;
1268
1269 case ADW_ASYNC_HOST_SCSI_BUS_RESET:
1270 /*
1271 * Host generated SCSI bus reset occurred.
1272 */
1273 adw_handle_bus_reset(adw, /*initiated*/TRUE);
1274 break;
1275 default:
1276 printf("adw_intr: unknown async code 0x%x\n",
1277 intrb_code);
1278 break;
1279 }
1280 }
1281
1282 /*
1283 * Run down the RequestQ.
1284 */
1285 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) {
1286 struct adw_carrier *free_carrier;
1287 struct acb *acb;
1288 union ccb *ccb;
1289
1290 #if 0
1291 printf("0x%x, 0x%x, 0x%x, 0x%x\n",
1292 adw->responseq->carr_offset,
1293 adw->responseq->carr_ba,
1294 adw->responseq->areq_ba,
1295 adw->responseq->next_ba);
1296 #endif
1297 /*
1298 * The firmware copies the adw_scsi_req_q.acb_baddr
1299 * field into the areq_ba field of the carrier.
1300 */
1301 acb = acbbotov(adw, adw->responseq->areq_ba);
1302
1303 /*
1304 * The least significant four bits of the next_ba
1305 * field are used as flags. Mask them out and then
1306 * advance through the list.
1307 */
1308 free_carrier = adw->responseq;
1309 adw->responseq =
1310 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK);
1311 free_carrier->next_ba = adw->free_carriers->carr_offset;
1312 adw->free_carriers = free_carrier;
1313
1314 /* Process CCB */
1315 ccb = acb->ccb;
1316 untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch);
1317 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1318 bus_dmasync_op_t op;
1319
1320 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1321 op = BUS_DMASYNC_POSTREAD;
1322 else
1323 op = BUS_DMASYNC_POSTWRITE;
1324 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1325 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1326 ccb->csio.resid = acb->queue.data_cnt;
1327 } else
1328 ccb->csio.resid = 0;
1329
1330 /* Common Cases inline... */
1331 if (acb->queue.host_status == QHSTA_NO_ERROR
1332 && (acb->queue.done_status == QD_NO_ERROR
1333 || acb->queue.done_status == QD_WITH_ERROR)) {
1334 ccb->csio.scsi_status = acb->queue.scsi_status;
1335 ccb->ccb_h.status = 0;
1336 switch (ccb->csio.scsi_status) {
1337 case SCSI_STATUS_OK:
1338 ccb->ccb_h.status |= CAM_REQ_CMP;
1339 break;
1340 case SCSI_STATUS_CHECK_COND:
1341 case SCSI_STATUS_CMD_TERMINATED:
1342 bcopy(&acb->sense_data, &ccb->csio.sense_data,
1343 ccb->csio.sense_len);
1344 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1345 ccb->csio.sense_resid = acb->queue.sense_len;
1346 /* FALLTHROUGH */
1347 default:
1348 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1349 | CAM_DEV_QFRZN;
1350 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1351 break;
1352 }
1353 adwfreeacb(adw, acb);
1354 xpt_done(ccb);
1355 } else {
1356 adwprocesserror(adw, acb);
1357 }
1358 }
1359 }
1360
1361 static void
1362 adwprocesserror(struct adw_softc *adw, struct acb *acb)
1363 {
1364 union ccb *ccb;
1365
1366 ccb = acb->ccb;
1367 if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1368 ccb->ccb_h.status = CAM_REQ_ABORTED;
1369 } else {
1370
1371 switch (acb->queue.host_status) {
1372 case QHSTA_M_SEL_TIMEOUT:
1373 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1374 break;
1375 case QHSTA_M_SXFR_OFF_UFLW:
1376 case QHSTA_M_SXFR_OFF_OFLW:
1377 case QHSTA_M_DATA_OVER_RUN:
1378 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1379 break;
1380 case QHSTA_M_SXFR_DESELECTED:
1381 case QHSTA_M_UNEXPECTED_BUS_FREE:
1382 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1383 break;
1384 case QHSTA_M_SCSI_BUS_RESET:
1385 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1386 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1387 break;
1388 case QHSTA_M_BUS_DEVICE_RESET:
1389 ccb->ccb_h.status = CAM_BDR_SENT;
1390 break;
1391 case QHSTA_M_QUEUE_ABORTED:
1392 /* BDR or Bus Reset */
1393 printf("Saw Queue Aborted\n");
1394 ccb->ccb_h.status = adw->last_reset;
1395 break;
1396 case QHSTA_M_SXFR_SDMA_ERR:
1397 case QHSTA_M_SXFR_SXFR_PERR:
1398 case QHSTA_M_RDMA_PERR:
1399 ccb->ccb_h.status = CAM_UNCOR_PARITY;
1400 break;
1401 case QHSTA_M_WTM_TIMEOUT:
1402 case QHSTA_M_SXFR_WD_TMO:
1403 {
1404 /* The SCSI bus hung in a phase */
1405 xpt_print_path(adw->path);
1406 printf("Watch Dog timer expired. Reseting bus\n");
1407 adw_reset_bus(adw);
1408 break;
1409 }
1410 case QHSTA_M_SXFR_XFR_PH_ERR:
1411 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1412 break;
1413 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1414 break;
1415 case QHSTA_M_BAD_CMPL_STATUS_IN:
1416 /* No command complete after a status message */
1417 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1418 break;
1419 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1420 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1421 break;
1422 case QHSTA_M_INVALID_DEVICE:
1423 ccb->ccb_h.status = CAM_PATH_INVALID;
1424 break;
1425 case QHSTA_M_NO_AUTO_REQ_SENSE:
1426 /*
1427 * User didn't request sense, but we got a
1428 * check condition.
1429 */
1430 ccb->csio.scsi_status = acb->queue.scsi_status;
1431 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1432 break;
1433 default:
1434 panic("%s: Unhandled Host status error %x",
1435 adw_name(adw), acb->queue.host_status);
1436 /* NOTREACHED */
1437 }
1438 }
1439 if ((acb->state & ACB_RECOVERY_ACB) != 0) {
1440 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET
1441 || ccb->ccb_h.status == CAM_BDR_SENT)
1442 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1443 }
1444 if (ccb->ccb_h.status != CAM_REQ_CMP) {
1445 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1446 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1447 }
1448 adwfreeacb(adw, acb);
1449 xpt_done(ccb);
1450 }
1451
1452 static void
1453 adwtimeout(void *arg)
1454 {
1455 struct acb *acb;
1456 union ccb *ccb;
1457 struct adw_softc *adw;
1458 adw_idle_cmd_status_t status;
1459 int target_id;
1460 int s;
1461
1462 acb = (struct acb *)arg;
1463 ccb = acb->ccb;
1464 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1465 xpt_print_path(ccb->ccb_h.path);
1466 printf("ACB %p - timed out\n", (void *)acb);
1467
1468 s = splcam();
1469
1470 if ((acb->state & ACB_ACTIVE) == 0) {
1471 xpt_print_path(ccb->ccb_h.path);
1472 printf("ACB %p - timed out CCB already completed\n",
1473 (void *)acb);
1474 splx(s);
1475 return;
1476 }
1477
1478 acb->state |= ACB_RECOVERY_ACB;
1479 target_id = ccb->ccb_h.target_id;
1480
1481 /* Attempt a BDR first */
1482 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1483 ccb->ccb_h.target_id);
1484 splx(s);
1485 if (status == ADW_IDLE_CMD_SUCCESS) {
1486 printf("%s: BDR Delivered. No longer in timeout\n",
1487 adw_name(adw));
1488 adw_handle_device_reset(adw, target_id);
1489 } else {
1490 adw_reset_bus(adw);
1491 xpt_print_path(adw->path);
1492 printf("Bus Reset Delivered. No longer in timeout\n");
1493 }
1494 }
1495
1496 static void
1497 adw_handle_device_reset(struct adw_softc *adw, u_int target)
1498 {
1499 struct cam_path *path;
1500 cam_status error;
1501
1502 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1503 target, CAM_LUN_WILDCARD);
1504
1505 if (error == CAM_REQ_CMP) {
1506 xpt_async(AC_SENT_BDR, path, NULL);
1507 xpt_free_path(path);
1508 }
1509 adw->last_reset = CAM_BDR_SENT;
1510 }
1511
1512 static void
1513 adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1514 {
1515 if (initiated) {
1516 /*
1517 * The microcode currently sets the SCSI Bus Reset signal
1518 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1519 * command above. But the SCSI Bus Reset Hold Time in the
1520 * microcode is not deterministic (it may in fact be for less
1521 * than the SCSI Spec. minimum of 25 us). Therefore on return
1522 * the Adv Library sets the SCSI Bus Reset signal for
1523 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1524 * than 25 us.
1525 */
1526 u_int scsi_ctrl;
1527
1528 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1529 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1530 DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1531 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1532
1533 /*
1534 * We will perform the async notification when the
1535 * SCSI Reset interrupt occurs.
1536 */
1537 } else
1538 xpt_async(AC_BUS_RESET, adw->path, NULL);
1539 adw->last_reset = CAM_SCSI_BUS_RESET;
1540 }
Cache object: 648f798d2a3a8acc4781cf72d9a7e9b0
|